code
stringlengths
13
1.2M
order_type
stringclasses
1 value
original_example
dict
step_ids
listlengths
1
5
from xai.brain.wordbase.nouns._teleconference import _TELECONFERENCE #calss header class _TELECONFERENCES(_TELECONFERENCE, ): def __init__(self,): _TELECONFERENCE.__init__(self) self.name = "TELECONFERENCES" self.specie = 'nouns' self.basic = "teleconference" self.jsondata = {}
normal
{ "blob_id": "9021fa440561461ee179f333aa04a155d06c6e86", "index": 7255, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass _TELECONFERENCES(_TELECONFERENCE):\n <mask token>\n", "step-3": "<mask token>\n\n\nclass _TELECONFERENCES(_TELECONFERENCE):\n\n def __init__(self):\n _TELECONFERENCE.__init__(self)\n self.name = 'TELECONFERENCES'\n self.specie = 'nouns'\n self.basic = 'teleconference'\n self.jsondata = {}\n", "step-4": "from xai.brain.wordbase.nouns._teleconference import _TELECONFERENCE\n\n\nclass _TELECONFERENCES(_TELECONFERENCE):\n\n def __init__(self):\n _TELECONFERENCE.__init__(self)\n self.name = 'TELECONFERENCES'\n self.specie = 'nouns'\n self.basic = 'teleconference'\n self.jsondata = {}\n", "step-5": "\n\nfrom xai.brain.wordbase.nouns._teleconference import _TELECONFERENCE\n\n#calss header\nclass _TELECONFERENCES(_TELECONFERENCE, ):\n\tdef __init__(self,): \n\t\t_TELECONFERENCE.__init__(self)\n\t\tself.name = \"TELECONFERENCES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"teleconference\"\n\t\tself.jsondata = {}\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import time import RPi.GPIO as GPIO GPIO.setmode(GPIO.BCM) POWER_PIN = 21 SPICLK = 18 SPIMISO = 23 SPIMOSI = 24 SPICS = 25 PAUSE = 0.1 # read SPI data from MCP3008 chip, 8 possible adc's (0 thru 7) def readadc(adcnum, clockpin, mosipin, misopin, cspin): if ((adcnum > 7) or (adcnum < 0)): return -1 GPIO.output(cspin, True) GPIO.output(clockpin, False) # start clock low GPIO.output(cspin, False) # bring CS low commandout = adcnum commandout |= 0x18 # start bit + single-ended bit commandout <<= 3 # we only need to send 5 bits here for i in range(5): if (commandout & 0x80): GPIO.output(mosipin, True) else: GPIO.output(mosipin, False) commandout <<= 1 GPIO.output(clockpin, True) GPIO.output(clockpin, False) adcout = 0 # read in one empty bit, one null bit and 10 ADC bits for i in range(12): GPIO.output(clockpin, True) GPIO.output(clockpin, False) adcout <<= 1 if (GPIO.input(misopin)): adcout |= 0x1 GPIO.output(cspin, True) adcout >>= 1 # first bit is 'null' so drop it return adcout def spi_setup(): GPIO.setup(SPIMOSI, GPIO.OUT) GPIO.setup(SPIMISO, GPIO.IN) GPIO.setup(SPICLK, GPIO.OUT) GPIO.setup(SPICS, GPIO.OUT) GPIO.setup(POWER_PIN, GPIO.OUT) def spi_readout(adc_pin): # read the analog pin return readadc(adc_pin, SPICLK, SPIMOSI, SPIMISO, SPICS) def power_on(): GPIO.output(POWER_PIN, True) def power_off(): GPIO.output(POWER_PIN, False) def adc_to_temp(readout): millivolts = readout * (3300.0 / 1024.0) temp_c = ((millivolts - 100.0) / 10.0) - 40.0 return temp_c if __name__ == "__main__": HYGROMETER = 0 TEMP = 1 LIGHT = 2 spi_setup() power_on() time.sleep(PAUSE) print("Hygrometer value %d" % spi_readout(HYGROMETER)) power_off() time.sleep(PAUSE) temp = adc_to_temp(spi_readout(TEMP)) print("Temp sensor: %.1f C" % temp) time.sleep(PAUSE) light_level = (float(spi_readout(LIGHT))/1024.0) * 100.0 print("Light level {}% ".format(light_level)) GPIO.cleanup()
normal
{ "blob_id": "fcdb43e36a4610ca0201a27d82b1a583f1482878", "index": 8924, "step-1": "<mask token>\n\n\ndef readadc(adcnum, clockpin, mosipin, misopin, cspin):\n if adcnum > 7 or adcnum < 0:\n return -1\n GPIO.output(cspin, True)\n GPIO.output(clockpin, False)\n GPIO.output(cspin, False)\n commandout = adcnum\n commandout |= 24\n commandout <<= 3\n for i in range(5):\n if commandout & 128:\n GPIO.output(mosipin, True)\n else:\n GPIO.output(mosipin, False)\n commandout <<= 1\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout = 0\n for i in range(12):\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout <<= 1\n if GPIO.input(misopin):\n adcout |= 1\n GPIO.output(cspin, True)\n adcout >>= 1\n return adcout\n\n\ndef spi_setup():\n GPIO.setup(SPIMOSI, GPIO.OUT)\n GPIO.setup(SPIMISO, GPIO.IN)\n GPIO.setup(SPICLK, GPIO.OUT)\n GPIO.setup(SPICS, GPIO.OUT)\n GPIO.setup(POWER_PIN, GPIO.OUT)\n\n\ndef spi_readout(adc_pin):\n return readadc(adc_pin, SPICLK, SPIMOSI, SPIMISO, SPICS)\n\n\n<mask token>\n\n\ndef adc_to_temp(readout):\n millivolts = readout * (3300.0 / 1024.0)\n temp_c = (millivolts - 100.0) / 10.0 - 40.0\n return temp_c\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef readadc(adcnum, clockpin, mosipin, misopin, cspin):\n if adcnum > 7 or adcnum < 0:\n return -1\n GPIO.output(cspin, True)\n GPIO.output(clockpin, False)\n GPIO.output(cspin, False)\n commandout = adcnum\n commandout |= 24\n commandout <<= 3\n for i in range(5):\n if commandout & 128:\n GPIO.output(mosipin, True)\n else:\n GPIO.output(mosipin, False)\n commandout <<= 1\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout = 0\n for i in range(12):\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout <<= 1\n if GPIO.input(misopin):\n adcout |= 1\n GPIO.output(cspin, True)\n adcout >>= 1\n return adcout\n\n\ndef spi_setup():\n GPIO.setup(SPIMOSI, GPIO.OUT)\n GPIO.setup(SPIMISO, GPIO.IN)\n GPIO.setup(SPICLK, GPIO.OUT)\n GPIO.setup(SPICS, GPIO.OUT)\n GPIO.setup(POWER_PIN, GPIO.OUT)\n\n\ndef spi_readout(adc_pin):\n return readadc(adc_pin, SPICLK, SPIMOSI, SPIMISO, SPICS)\n\n\n<mask token>\n\n\ndef power_off():\n GPIO.output(POWER_PIN, False)\n\n\ndef adc_to_temp(readout):\n millivolts = readout * (3300.0 / 1024.0)\n temp_c = (millivolts - 100.0) / 10.0 - 40.0\n return temp_c\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef readadc(adcnum, clockpin, mosipin, misopin, cspin):\n if adcnum > 7 or adcnum < 0:\n return -1\n GPIO.output(cspin, True)\n GPIO.output(clockpin, False)\n GPIO.output(cspin, False)\n commandout = adcnum\n commandout |= 24\n commandout <<= 3\n for i in range(5):\n if commandout & 128:\n GPIO.output(mosipin, True)\n else:\n GPIO.output(mosipin, False)\n commandout <<= 1\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout = 0\n for i in range(12):\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout <<= 1\n if GPIO.input(misopin):\n adcout |= 1\n GPIO.output(cspin, True)\n adcout >>= 1\n return adcout\n\n\ndef spi_setup():\n GPIO.setup(SPIMOSI, GPIO.OUT)\n GPIO.setup(SPIMISO, GPIO.IN)\n GPIO.setup(SPICLK, GPIO.OUT)\n GPIO.setup(SPICS, GPIO.OUT)\n GPIO.setup(POWER_PIN, GPIO.OUT)\n\n\ndef spi_readout(adc_pin):\n return readadc(adc_pin, SPICLK, SPIMOSI, SPIMISO, SPICS)\n\n\ndef power_on():\n GPIO.output(POWER_PIN, True)\n\n\ndef power_off():\n GPIO.output(POWER_PIN, False)\n\n\ndef adc_to_temp(readout):\n millivolts = readout * (3300.0 / 1024.0)\n temp_c = (millivolts - 100.0) / 10.0 - 40.0\n return temp_c\n\n\n<mask token>\n", "step-4": "<mask token>\nGPIO.setmode(GPIO.BCM)\n<mask token>\n\n\ndef readadc(adcnum, clockpin, mosipin, misopin, cspin):\n if adcnum > 7 or adcnum < 0:\n return -1\n GPIO.output(cspin, True)\n GPIO.output(clockpin, False)\n GPIO.output(cspin, False)\n commandout = adcnum\n commandout |= 24\n commandout <<= 3\n for i in range(5):\n if commandout & 128:\n GPIO.output(mosipin, True)\n else:\n GPIO.output(mosipin, False)\n commandout <<= 1\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout = 0\n for i in range(12):\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout <<= 1\n if GPIO.input(misopin):\n adcout |= 1\n GPIO.output(cspin, True)\n adcout >>= 1\n return adcout\n\n\ndef spi_setup():\n GPIO.setup(SPIMOSI, GPIO.OUT)\n GPIO.setup(SPIMISO, GPIO.IN)\n GPIO.setup(SPICLK, GPIO.OUT)\n GPIO.setup(SPICS, GPIO.OUT)\n GPIO.setup(POWER_PIN, GPIO.OUT)\n\n\ndef spi_readout(adc_pin):\n return readadc(adc_pin, SPICLK, SPIMOSI, SPIMISO, SPICS)\n\n\ndef power_on():\n GPIO.output(POWER_PIN, True)\n\n\ndef power_off():\n GPIO.output(POWER_PIN, False)\n\n\ndef adc_to_temp(readout):\n millivolts = readout * (3300.0 / 1024.0)\n temp_c = (millivolts - 100.0) / 10.0 - 40.0\n return temp_c\n\n\nif __name__ == '__main__':\n HYGROMETER = 0\n TEMP = 1\n LIGHT = 2\n spi_setup()\n power_on()\n time.sleep(PAUSE)\n print('Hygrometer value %d' % spi_readout(HYGROMETER))\n power_off()\n time.sleep(PAUSE)\n temp = adc_to_temp(spi_readout(TEMP))\n print('Temp sensor: %.1f C' % temp)\n time.sleep(PAUSE)\n light_level = float(spi_readout(LIGHT)) / 1024.0 * 100.0\n print('Light level {}% '.format(light_level))\n GPIO.cleanup()\n", "step-5": "import time\nimport RPi.GPIO as GPIO\n\nGPIO.setmode(GPIO.BCM)\n\nPOWER_PIN = 21\nSPICLK = 18\nSPIMISO = 23\nSPIMOSI = 24\nSPICS = 25\n\nPAUSE = 0.1\n\n# read SPI data from MCP3008 chip, 8 possible adc's (0 thru 7)\ndef readadc(adcnum, clockpin, mosipin, misopin, cspin):\n if ((adcnum > 7) or (adcnum < 0)):\n return -1\n GPIO.output(cspin, True)\n\n GPIO.output(clockpin, False) # start clock low\n GPIO.output(cspin, False) # bring CS low\n\n commandout = adcnum\n commandout |= 0x18 # start bit + single-ended bit\n commandout <<= 3 # we only need to send 5 bits here\n for i in range(5):\n if (commandout & 0x80):\n GPIO.output(mosipin, True)\n else:\n GPIO.output(mosipin, False)\n commandout <<= 1\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n\n adcout = 0\n # read in one empty bit, one null bit and 10 ADC bits\n for i in range(12):\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout <<= 1\n if (GPIO.input(misopin)):\n adcout |= 0x1\n\n GPIO.output(cspin, True)\n\n adcout >>= 1 # first bit is 'null' so drop it\n return adcout\n\n\ndef spi_setup():\n GPIO.setup(SPIMOSI, GPIO.OUT)\n GPIO.setup(SPIMISO, GPIO.IN)\n GPIO.setup(SPICLK, GPIO.OUT)\n GPIO.setup(SPICS, GPIO.OUT)\n GPIO.setup(POWER_PIN, GPIO.OUT)\n\n\ndef spi_readout(adc_pin):\n # read the analog pin\n return readadc(adc_pin, SPICLK, SPIMOSI, SPIMISO, SPICS)\n\n\ndef power_on():\n\n GPIO.output(POWER_PIN, True)\n\n\ndef power_off():\n GPIO.output(POWER_PIN, False)\n\n\ndef adc_to_temp(readout):\n millivolts = readout * (3300.0 / 1024.0)\n temp_c = ((millivolts - 100.0) / 10.0) - 40.0\n return temp_c\n\nif __name__ == \"__main__\":\n HYGROMETER = 0\n TEMP = 1\n LIGHT = 2\n spi_setup()\n power_on()\n time.sleep(PAUSE)\n print(\"Hygrometer value %d\" % spi_readout(HYGROMETER))\n power_off()\n time.sleep(PAUSE)\n temp = adc_to_temp(spi_readout(TEMP))\n print(\"Temp sensor: %.1f C\" % temp)\n time.sleep(PAUSE)\n light_level = (float(spi_readout(LIGHT))/1024.0) * 100.0\n print(\"Light level {}% \".format(light_level))\n GPIO.cleanup()\n", "step-ids": [ 4, 5, 6, 7, 10 ] }
[ 4, 5, 6, 7, 10 ]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ This program is run at regular intervals to check the battery charge status of the uninterruptible power supply. In our case, it is a LiPo battery with a nominal voltage of 3.7 volts. By setting the voltage for the Raspberry PI shutdown procedure at 3.7 V,we ensure that the processor has enough time to make a clean shutdown. This program must be launched at regular intervals (5 inute in our case) by the Raspberry PI OS cron task scheduler. The crontab -e command in the home directory opens the cron file and the command line would for example be for a trigger every 5 minutes: 5 * * * * sudo /usr/bin/python3 /home/pi/dev_python/amod/pidcmes_bbu.py """ import time import datetime as dt from subprocess import call from pidcmes_lib import Pidcmes # class for 'pidcmes' procedures pidcmes = Pidcmes() # initialize pidcmese class u_bat_min = 3.7 # minumum battery voltage n_moy = 20 # averaging to reduce glitches stop_run = False # to control the execution (run/stop) u_avg = pidcmes.get_tension(n_moy) # read the value in volts if u_avg < u_bat_min:# or i > 10: print("proper shut down of the machine due to low battery") # time.sleep(5) # call("sudo shutdown -h now", shell=True) # shutdown the RASPI else: print("tout va bien dormez braves gens")
normal
{ "blob_id": "67b967b688aeac1270eee836e0f6e6b3555b933e", "index": 5, "step-1": "<mask token>\n", "step-2": "<mask token>\nif u_avg < u_bat_min:\n print('proper shut down of the machine due to low battery')\nelse:\n print('tout va bien dormez braves gens')\n", "step-3": "<mask token>\npidcmes = Pidcmes()\nu_bat_min = 3.7\nn_moy = 20\nstop_run = False\nu_avg = pidcmes.get_tension(n_moy)\nif u_avg < u_bat_min:\n print('proper shut down of the machine due to low battery')\nelse:\n print('tout va bien dormez braves gens')\n", "step-4": "<mask token>\nimport time\nimport datetime as dt\nfrom subprocess import call\nfrom pidcmes_lib import Pidcmes\npidcmes = Pidcmes()\nu_bat_min = 3.7\nn_moy = 20\nstop_run = False\nu_avg = pidcmes.get_tension(n_moy)\nif u_avg < u_bat_min:\n print('proper shut down of the machine due to low battery')\nelse:\n print('tout va bien dormez braves gens')\n", "step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nThis program is run at regular intervals to check the battery charge status of the uninterruptible power supply.\nIn our case, it is a LiPo battery with a nominal voltage of 3.7 volts. By setting the voltage for the\nRaspberry PI shutdown procedure at 3.7 V,we ensure that the processor has enough time to make a clean shutdown.\n\nThis program must be launched at regular intervals (5 inute in our case) by the Raspberry PI OS cron task scheduler.\nThe crontab -e command in the home directory opens the cron file and the command line would for example be for a trigger every 5 minutes:\n5 * * * * sudo /usr/bin/python3 /home/pi/dev_python/amod/pidcmes_bbu.py\n\"\"\"\n\nimport time\nimport datetime as dt\n\nfrom subprocess import call\nfrom pidcmes_lib import Pidcmes # class for 'pidcmes' procedures\n \npidcmes = Pidcmes() # initialize pidcmese class\n\nu_bat_min = 3.7 # minumum battery voltage \nn_moy = 20 # averaging to reduce glitches\nstop_run = False # to control the execution (run/stop)\n\nu_avg = pidcmes.get_tension(n_moy) # read the value in volts\n\n \nif u_avg < u_bat_min:# or i > 10: \n print(\"proper shut down of the machine due to low battery\")\n# time.sleep(5)\n# call(\"sudo shutdown -h now\", shell=True) # shutdown the RASPI\nelse:\n print(\"tout va bien dormez braves gens\")\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
""" This module provides a script to extract data from all JSON files stored in a specific directory and create a HTML table for an better overview of the data. .. moduleauthor:: Maximilian Springenberg <[email protected]> | """ from collections import defaultdict from argparse import ArgumentParser import os import sys import json import pandas as pd FILE_DIR = os.path.dirname(os.path.abspath(__file__)) SRC_DIR = os.path.dirname(os.path.join(FILE_DIR, '..', '..', '')) sys.path.append(SRC_DIR) sys.path.append(FILE_DIR) from src.util import sanity_util def jsons_to_table(dir_jsons, dir_out, name, format='html'): """ Extracts the informations stored in the JSON files and stores creates an HTML-table for them. :param dir_jsons: directory of JSON files :param dir_out: output directory of the HTML-table :param name: name of the HTML page """ # sanity of paths dir_out = sanity_util.safe_dir_path(dir_path=dir_out) file_name = sanity_util.unique_file_name(dir=dir_out, fn=name, suffix='.{}'.format(format)) # reading JSON files p_files = sorted([os.path.join(dir_jsons, p_json) for p_json in os.listdir(dir_jsons)]) table = defaultdict(list) keys = set() for p_f in p_files: if p_f.lower().endswith('.json'): with open(p_f, 'r') as f_json: el = json.load(f_json) for k in el.keys(): keys.add(k) for p_f in p_files: if p_f.lower().endswith('.json'): with open(p_f, 'r') as f_json: el = json.load(f_json) for k in el.keys(): table[k].append(el[k]) for k in keys.difference(set(el.keys())): table[k].append(None) # DataFrame conversion df = pd.DataFrame.from_dict(table) # writing HTML table if format == 'html': table_str = df.to_html() else: table_str = df.to_latex() table_str += '<script type="text/javascript" src="stylize.js"></script>' stylize_js = js_stylize() with open(os.path.join(dir_out, 'stylize.js'), 'w') as f_js: f_js.write(stylize_js) with open(file_name, 'w') as f_out: f_out.write(table_str) def js_stylize(): return ''' /** * small script to stylize raw html tables * @author Maximilian Springenberg <[email protected]> */ /** * adding all bootstrap relevent dependencies to the headder */ function add_bootsrap(){ document.head.innerHTML += "<link rel=\"stylesheet\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css\">\n" + "<script src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.4.0/jquery.min.js\"></script>\n" + "<script src=\"https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js\"></script>\n" + "<script src=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js\"></script>"; } /** * setting classnames of a specific tag */ function style_tag(tagName, className){ tags = document.getElementsByTagName(tagName); for(let i=0; i<tags.length; ++i){ tags[i].className = className; } } /** * setting the (Bootstrap) contenteditable flag for a specific tag */ function editable_tag(tagName, editable){ tags = document.getElementsByTagName(tagName); for(let i=0; i<tags.length; ++i){ tags[i].setAttribute('contenteditable', editable); } } // setting title document.title = 'PHOCNet Table'; // adding bootstrap add_bootsrap(); // stylize tables style_tag('table', 'table table-responsive-md'); style_tag('thead', 'thead-dark'); // enable editable table-divisions editable_tag('td', 'true'); ''' def parser(): """ Creates a parser of this script. :return: args-parser with the following arguments Positional: =============== ====================================================== arg semantic =============== ====================================================== dir_jsons directory of JSON files dir_out the directory to safe the HTML page to file_name name of the HTML file =============== ====================================================== """ parser = ArgumentParser() parser.add_argument('dir_jsons', help='dir containing json files') parser.add_argument('dir_out', help='output directory') parser.add_argument('file_name', help='name of HTML file') return parser if __name__ == '__main__': arg_parser = parser() args = vars(arg_parser.parse_args()) jsons_to_table(dir_jsons=args['dir_jsons'], dir_out=args['dir_out'], name=args['name'], format='html')
normal
{ "blob_id": "d6e836140b1f9c955711402111dc07e74b4a23b1", "index": 1621, "step-1": "<mask token>\n\n\ndef jsons_to_table(dir_jsons, dir_out, name, format='html'):\n \"\"\"\n Extracts the informations stored in the JSON files and stores creates an HTML-table for them.\n\n :param dir_jsons: directory of JSON files\n :param dir_out: output directory of the HTML-table\n :param name: name of the HTML page\n \"\"\"\n dir_out = sanity_util.safe_dir_path(dir_path=dir_out)\n file_name = sanity_util.unique_file_name(dir=dir_out, fn=name, suffix=\n '.{}'.format(format))\n p_files = sorted([os.path.join(dir_jsons, p_json) for p_json in os.\n listdir(dir_jsons)])\n table = defaultdict(list)\n keys = set()\n for p_f in p_files:\n if p_f.lower().endswith('.json'):\n with open(p_f, 'r') as f_json:\n el = json.load(f_json)\n for k in el.keys():\n keys.add(k)\n for p_f in p_files:\n if p_f.lower().endswith('.json'):\n with open(p_f, 'r') as f_json:\n el = json.load(f_json)\n for k in el.keys():\n table[k].append(el[k])\n for k in keys.difference(set(el.keys())):\n table[k].append(None)\n df = pd.DataFrame.from_dict(table)\n if format == 'html':\n table_str = df.to_html()\n else:\n table_str = df.to_latex()\n table_str += '<script type=\"text/javascript\" src=\"stylize.js\"></script>'\n stylize_js = js_stylize()\n with open(os.path.join(dir_out, 'stylize.js'), 'w') as f_js:\n f_js.write(stylize_js)\n with open(file_name, 'w') as f_out:\n f_out.write(table_str)\n\n\ndef js_stylize():\n return \"\"\"\n /**\n * small script to stylize raw html tables\n * @author Maximilian Springenberg <[email protected]>\n */\n \n \n /**\n * adding all bootstrap relevent dependencies to the headder\n */\n function add_bootsrap(){\n document.head.innerHTML +=\n \"<link rel=\"stylesheet\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css\">\n\" +\n \"<script src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.4.0/jquery.min.js\"></script>\n\" +\n \"<script src=\"https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js\"></script>\n\" +\n \"<script src=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js\"></script>\";\n }\n \n \n /**\n * setting classnames of a specific tag\n */\n function style_tag(tagName, className){\n tags = document.getElementsByTagName(tagName);\n for(let i=0; i<tags.length; ++i){\n tags[i].className = className;\n }\n }\n \n \n /**\n * setting the (Bootstrap) contenteditable flag for a specific tag\n */\n function editable_tag(tagName, editable){\n tags = document.getElementsByTagName(tagName);\n for(let i=0; i<tags.length; ++i){\n tags[i].setAttribute('contenteditable', editable);\n }\n }\n \n \n // setting title\n document.title = 'PHOCNet Table';\n // adding bootstrap\n add_bootsrap();\n // stylize tables\n style_tag('table', 'table table-responsive-md');\n style_tag('thead', 'thead-dark');\n // enable editable table-divisions\n editable_tag('td', 'true'); \n \"\"\"\n\n\ndef parser():\n \"\"\"\n Creates a parser of this script.\n\n :return: args-parser with the following arguments\n\n\n Positional:\n\n =============== ======================================================\n arg semantic\n =============== ======================================================\n dir_jsons directory of JSON files\n dir_out the directory to safe the HTML page to\n file_name name of the HTML file\n =============== ======================================================\n \"\"\"\n parser = ArgumentParser()\n parser.add_argument('dir_jsons', help='dir containing json files')\n parser.add_argument('dir_out', help='output directory')\n parser.add_argument('file_name', help='name of HTML file')\n return parser\n\n\n<mask token>\n", "step-2": "<mask token>\nsys.path.append(SRC_DIR)\nsys.path.append(FILE_DIR)\n<mask token>\n\n\ndef jsons_to_table(dir_jsons, dir_out, name, format='html'):\n \"\"\"\n Extracts the informations stored in the JSON files and stores creates an HTML-table for them.\n\n :param dir_jsons: directory of JSON files\n :param dir_out: output directory of the HTML-table\n :param name: name of the HTML page\n \"\"\"\n dir_out = sanity_util.safe_dir_path(dir_path=dir_out)\n file_name = sanity_util.unique_file_name(dir=dir_out, fn=name, suffix=\n '.{}'.format(format))\n p_files = sorted([os.path.join(dir_jsons, p_json) for p_json in os.\n listdir(dir_jsons)])\n table = defaultdict(list)\n keys = set()\n for p_f in p_files:\n if p_f.lower().endswith('.json'):\n with open(p_f, 'r') as f_json:\n el = json.load(f_json)\n for k in el.keys():\n keys.add(k)\n for p_f in p_files:\n if p_f.lower().endswith('.json'):\n with open(p_f, 'r') as f_json:\n el = json.load(f_json)\n for k in el.keys():\n table[k].append(el[k])\n for k in keys.difference(set(el.keys())):\n table[k].append(None)\n df = pd.DataFrame.from_dict(table)\n if format == 'html':\n table_str = df.to_html()\n else:\n table_str = df.to_latex()\n table_str += '<script type=\"text/javascript\" src=\"stylize.js\"></script>'\n stylize_js = js_stylize()\n with open(os.path.join(dir_out, 'stylize.js'), 'w') as f_js:\n f_js.write(stylize_js)\n with open(file_name, 'w') as f_out:\n f_out.write(table_str)\n\n\ndef js_stylize():\n return \"\"\"\n /**\n * small script to stylize raw html tables\n * @author Maximilian Springenberg <[email protected]>\n */\n \n \n /**\n * adding all bootstrap relevent dependencies to the headder\n */\n function add_bootsrap(){\n document.head.innerHTML +=\n \"<link rel=\"stylesheet\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css\">\n\" +\n \"<script src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.4.0/jquery.min.js\"></script>\n\" +\n \"<script src=\"https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js\"></script>\n\" +\n \"<script src=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js\"></script>\";\n }\n \n \n /**\n * setting classnames of a specific tag\n */\n function style_tag(tagName, className){\n tags = document.getElementsByTagName(tagName);\n for(let i=0; i<tags.length; ++i){\n tags[i].className = className;\n }\n }\n \n \n /**\n * setting the (Bootstrap) contenteditable flag for a specific tag\n */\n function editable_tag(tagName, editable){\n tags = document.getElementsByTagName(tagName);\n for(let i=0; i<tags.length; ++i){\n tags[i].setAttribute('contenteditable', editable);\n }\n }\n \n \n // setting title\n document.title = 'PHOCNet Table';\n // adding bootstrap\n add_bootsrap();\n // stylize tables\n style_tag('table', 'table table-responsive-md');\n style_tag('thead', 'thead-dark');\n // enable editable table-divisions\n editable_tag('td', 'true'); \n \"\"\"\n\n\ndef parser():\n \"\"\"\n Creates a parser of this script.\n\n :return: args-parser with the following arguments\n\n\n Positional:\n\n =============== ======================================================\n arg semantic\n =============== ======================================================\n dir_jsons directory of JSON files\n dir_out the directory to safe the HTML page to\n file_name name of the HTML file\n =============== ======================================================\n \"\"\"\n parser = ArgumentParser()\n parser.add_argument('dir_jsons', help='dir containing json files')\n parser.add_argument('dir_out', help='output directory')\n parser.add_argument('file_name', help='name of HTML file')\n return parser\n\n\nif __name__ == '__main__':\n arg_parser = parser()\n args = vars(arg_parser.parse_args())\n jsons_to_table(dir_jsons=args['dir_jsons'], dir_out=args['dir_out'],\n name=args['name'], format='html')\n", "step-3": "<mask token>\nFILE_DIR = os.path.dirname(os.path.abspath(__file__))\nSRC_DIR = os.path.dirname(os.path.join(FILE_DIR, '..', '..', ''))\nsys.path.append(SRC_DIR)\nsys.path.append(FILE_DIR)\n<mask token>\n\n\ndef jsons_to_table(dir_jsons, dir_out, name, format='html'):\n \"\"\"\n Extracts the informations stored in the JSON files and stores creates an HTML-table for them.\n\n :param dir_jsons: directory of JSON files\n :param dir_out: output directory of the HTML-table\n :param name: name of the HTML page\n \"\"\"\n dir_out = sanity_util.safe_dir_path(dir_path=dir_out)\n file_name = sanity_util.unique_file_name(dir=dir_out, fn=name, suffix=\n '.{}'.format(format))\n p_files = sorted([os.path.join(dir_jsons, p_json) for p_json in os.\n listdir(dir_jsons)])\n table = defaultdict(list)\n keys = set()\n for p_f in p_files:\n if p_f.lower().endswith('.json'):\n with open(p_f, 'r') as f_json:\n el = json.load(f_json)\n for k in el.keys():\n keys.add(k)\n for p_f in p_files:\n if p_f.lower().endswith('.json'):\n with open(p_f, 'r') as f_json:\n el = json.load(f_json)\n for k in el.keys():\n table[k].append(el[k])\n for k in keys.difference(set(el.keys())):\n table[k].append(None)\n df = pd.DataFrame.from_dict(table)\n if format == 'html':\n table_str = df.to_html()\n else:\n table_str = df.to_latex()\n table_str += '<script type=\"text/javascript\" src=\"stylize.js\"></script>'\n stylize_js = js_stylize()\n with open(os.path.join(dir_out, 'stylize.js'), 'w') as f_js:\n f_js.write(stylize_js)\n with open(file_name, 'w') as f_out:\n f_out.write(table_str)\n\n\ndef js_stylize():\n return \"\"\"\n /**\n * small script to stylize raw html tables\n * @author Maximilian Springenberg <[email protected]>\n */\n \n \n /**\n * adding all bootstrap relevent dependencies to the headder\n */\n function add_bootsrap(){\n document.head.innerHTML +=\n \"<link rel=\"stylesheet\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css\">\n\" +\n \"<script src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.4.0/jquery.min.js\"></script>\n\" +\n \"<script src=\"https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js\"></script>\n\" +\n \"<script src=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js\"></script>\";\n }\n \n \n /**\n * setting classnames of a specific tag\n */\n function style_tag(tagName, className){\n tags = document.getElementsByTagName(tagName);\n for(let i=0; i<tags.length; ++i){\n tags[i].className = className;\n }\n }\n \n \n /**\n * setting the (Bootstrap) contenteditable flag for a specific tag\n */\n function editable_tag(tagName, editable){\n tags = document.getElementsByTagName(tagName);\n for(let i=0; i<tags.length; ++i){\n tags[i].setAttribute('contenteditable', editable);\n }\n }\n \n \n // setting title\n document.title = 'PHOCNet Table';\n // adding bootstrap\n add_bootsrap();\n // stylize tables\n style_tag('table', 'table table-responsive-md');\n style_tag('thead', 'thead-dark');\n // enable editable table-divisions\n editable_tag('td', 'true'); \n \"\"\"\n\n\ndef parser():\n \"\"\"\n Creates a parser of this script.\n\n :return: args-parser with the following arguments\n\n\n Positional:\n\n =============== ======================================================\n arg semantic\n =============== ======================================================\n dir_jsons directory of JSON files\n dir_out the directory to safe the HTML page to\n file_name name of the HTML file\n =============== ======================================================\n \"\"\"\n parser = ArgumentParser()\n parser.add_argument('dir_jsons', help='dir containing json files')\n parser.add_argument('dir_out', help='output directory')\n parser.add_argument('file_name', help='name of HTML file')\n return parser\n\n\nif __name__ == '__main__':\n arg_parser = parser()\n args = vars(arg_parser.parse_args())\n jsons_to_table(dir_jsons=args['dir_jsons'], dir_out=args['dir_out'],\n name=args['name'], format='html')\n", "step-4": "<mask token>\nfrom collections import defaultdict\nfrom argparse import ArgumentParser\nimport os\nimport sys\nimport json\nimport pandas as pd\nFILE_DIR = os.path.dirname(os.path.abspath(__file__))\nSRC_DIR = os.path.dirname(os.path.join(FILE_DIR, '..', '..', ''))\nsys.path.append(SRC_DIR)\nsys.path.append(FILE_DIR)\nfrom src.util import sanity_util\n\n\ndef jsons_to_table(dir_jsons, dir_out, name, format='html'):\n \"\"\"\n Extracts the informations stored in the JSON files and stores creates an HTML-table for them.\n\n :param dir_jsons: directory of JSON files\n :param dir_out: output directory of the HTML-table\n :param name: name of the HTML page\n \"\"\"\n dir_out = sanity_util.safe_dir_path(dir_path=dir_out)\n file_name = sanity_util.unique_file_name(dir=dir_out, fn=name, suffix=\n '.{}'.format(format))\n p_files = sorted([os.path.join(dir_jsons, p_json) for p_json in os.\n listdir(dir_jsons)])\n table = defaultdict(list)\n keys = set()\n for p_f in p_files:\n if p_f.lower().endswith('.json'):\n with open(p_f, 'r') as f_json:\n el = json.load(f_json)\n for k in el.keys():\n keys.add(k)\n for p_f in p_files:\n if p_f.lower().endswith('.json'):\n with open(p_f, 'r') as f_json:\n el = json.load(f_json)\n for k in el.keys():\n table[k].append(el[k])\n for k in keys.difference(set(el.keys())):\n table[k].append(None)\n df = pd.DataFrame.from_dict(table)\n if format == 'html':\n table_str = df.to_html()\n else:\n table_str = df.to_latex()\n table_str += '<script type=\"text/javascript\" src=\"stylize.js\"></script>'\n stylize_js = js_stylize()\n with open(os.path.join(dir_out, 'stylize.js'), 'w') as f_js:\n f_js.write(stylize_js)\n with open(file_name, 'w') as f_out:\n f_out.write(table_str)\n\n\ndef js_stylize():\n return \"\"\"\n /**\n * small script to stylize raw html tables\n * @author Maximilian Springenberg <[email protected]>\n */\n \n \n /**\n * adding all bootstrap relevent dependencies to the headder\n */\n function add_bootsrap(){\n document.head.innerHTML +=\n \"<link rel=\"stylesheet\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css\">\n\" +\n \"<script src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.4.0/jquery.min.js\"></script>\n\" +\n \"<script src=\"https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js\"></script>\n\" +\n \"<script src=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js\"></script>\";\n }\n \n \n /**\n * setting classnames of a specific tag\n */\n function style_tag(tagName, className){\n tags = document.getElementsByTagName(tagName);\n for(let i=0; i<tags.length; ++i){\n tags[i].className = className;\n }\n }\n \n \n /**\n * setting the (Bootstrap) contenteditable flag for a specific tag\n */\n function editable_tag(tagName, editable){\n tags = document.getElementsByTagName(tagName);\n for(let i=0; i<tags.length; ++i){\n tags[i].setAttribute('contenteditable', editable);\n }\n }\n \n \n // setting title\n document.title = 'PHOCNet Table';\n // adding bootstrap\n add_bootsrap();\n // stylize tables\n style_tag('table', 'table table-responsive-md');\n style_tag('thead', 'thead-dark');\n // enable editable table-divisions\n editable_tag('td', 'true'); \n \"\"\"\n\n\ndef parser():\n \"\"\"\n Creates a parser of this script.\n\n :return: args-parser with the following arguments\n\n\n Positional:\n\n =============== ======================================================\n arg semantic\n =============== ======================================================\n dir_jsons directory of JSON files\n dir_out the directory to safe the HTML page to\n file_name name of the HTML file\n =============== ======================================================\n \"\"\"\n parser = ArgumentParser()\n parser.add_argument('dir_jsons', help='dir containing json files')\n parser.add_argument('dir_out', help='output directory')\n parser.add_argument('file_name', help='name of HTML file')\n return parser\n\n\nif __name__ == '__main__':\n arg_parser = parser()\n args = vars(arg_parser.parse_args())\n jsons_to_table(dir_jsons=args['dir_jsons'], dir_out=args['dir_out'],\n name=args['name'], format='html')\n", "step-5": "\"\"\"\nThis module provides a script to extract data from all JSON files stored in a specific directory and create a HTML\ntable for an better overview of the data.\n\n.. moduleauthor:: Maximilian Springenberg <[email protected]>\n\n|\n\n\"\"\"\nfrom collections import defaultdict\nfrom argparse import ArgumentParser\n\nimport os\nimport sys\nimport json\nimport pandas as pd\n\nFILE_DIR = os.path.dirname(os.path.abspath(__file__))\nSRC_DIR = os.path.dirname(os.path.join(FILE_DIR, '..', '..', ''))\nsys.path.append(SRC_DIR)\nsys.path.append(FILE_DIR)\nfrom src.util import sanity_util\n\n\ndef jsons_to_table(dir_jsons, dir_out, name, format='html'):\n \"\"\"\n Extracts the informations stored in the JSON files and stores creates an HTML-table for them.\n\n :param dir_jsons: directory of JSON files\n :param dir_out: output directory of the HTML-table\n :param name: name of the HTML page\n \"\"\"\n # sanity of paths\n dir_out = sanity_util.safe_dir_path(dir_path=dir_out)\n file_name = sanity_util.unique_file_name(dir=dir_out, fn=name, suffix='.{}'.format(format))\n # reading JSON files\n p_files = sorted([os.path.join(dir_jsons, p_json) for p_json in os.listdir(dir_jsons)])\n table = defaultdict(list)\n keys = set()\n for p_f in p_files:\n if p_f.lower().endswith('.json'):\n with open(p_f, 'r') as f_json:\n el = json.load(f_json)\n for k in el.keys():\n keys.add(k)\n for p_f in p_files:\n if p_f.lower().endswith('.json'):\n with open(p_f, 'r') as f_json:\n el = json.load(f_json)\n for k in el.keys():\n table[k].append(el[k])\n for k in keys.difference(set(el.keys())):\n table[k].append(None)\n # DataFrame conversion\n df = pd.DataFrame.from_dict(table)\n # writing HTML table\n if format == 'html':\n table_str = df.to_html()\n else:\n table_str = df.to_latex()\n table_str += '<script type=\"text/javascript\" src=\"stylize.js\"></script>'\n stylize_js = js_stylize()\n with open(os.path.join(dir_out, 'stylize.js'), 'w') as f_js:\n f_js.write(stylize_js)\n with open(file_name, 'w') as f_out:\n f_out.write(table_str)\n\n\ndef js_stylize():\n return '''\n /**\n * small script to stylize raw html tables\n * @author Maximilian Springenberg <[email protected]>\n */\n \n \n /**\n * adding all bootstrap relevent dependencies to the headder\n */\n function add_bootsrap(){\n document.head.innerHTML +=\n \"<link rel=\\\"stylesheet\\\" href=\\\"https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css\\\">\\n\" +\n \"<script src=\\\"https://ajax.googleapis.com/ajax/libs/jquery/3.4.0/jquery.min.js\\\"></script>\\n\" +\n \"<script src=\\\"https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js\\\"></script>\\n\" +\n \"<script src=\\\"https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js\\\"></script>\";\n }\n \n \n /**\n * setting classnames of a specific tag\n */\n function style_tag(tagName, className){\n tags = document.getElementsByTagName(tagName);\n for(let i=0; i<tags.length; ++i){\n tags[i].className = className;\n }\n }\n \n \n /**\n * setting the (Bootstrap) contenteditable flag for a specific tag\n */\n function editable_tag(tagName, editable){\n tags = document.getElementsByTagName(tagName);\n for(let i=0; i<tags.length; ++i){\n tags[i].setAttribute('contenteditable', editable);\n }\n }\n \n \n // setting title\n document.title = 'PHOCNet Table';\n // adding bootstrap\n add_bootsrap();\n // stylize tables\n style_tag('table', 'table table-responsive-md');\n style_tag('thead', 'thead-dark');\n // enable editable table-divisions\n editable_tag('td', 'true'); \n '''\n\n\ndef parser():\n \"\"\"\n Creates a parser of this script.\n\n :return: args-parser with the following arguments\n\n\n Positional:\n\n =============== ======================================================\n arg semantic\n =============== ======================================================\n dir_jsons directory of JSON files\n dir_out the directory to safe the HTML page to\n file_name name of the HTML file\n =============== ======================================================\n \"\"\"\n parser = ArgumentParser()\n parser.add_argument('dir_jsons', help='dir containing json files')\n parser.add_argument('dir_out', help='output directory')\n parser.add_argument('file_name', help='name of HTML file')\n return parser\n\n\nif __name__ == '__main__':\n arg_parser = parser()\n args = vars(arg_parser.parse_args())\n jsons_to_table(dir_jsons=args['dir_jsons'], dir_out=args['dir_out'], name=args['name'], format='html')\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
from django.utils import timezone from factory import DjangoModelFactory from djtriggers.tests.models import DummyTrigger class DummyTriggerFactory(DjangoModelFactory): class Meta: model = DummyTrigger trigger_type = 'dummy_trigger_test' source = 'tests' date_received = timezone.now() date_processed = None process_after = None number_of_tries = 0
normal
{ "blob_id": "813354c9c294c0323c1b54cda7074fbffa49cdb3", "index": 442, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass DummyTriggerFactory(DjangoModelFactory):\n\n\n class Meta:\n model = DummyTrigger\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass DummyTriggerFactory(DjangoModelFactory):\n\n\n class Meta:\n model = DummyTrigger\n trigger_type = 'dummy_trigger_test'\n source = 'tests'\n date_received = timezone.now()\n date_processed = None\n process_after = None\n number_of_tries = 0\n", "step-4": "from django.utils import timezone\nfrom factory import DjangoModelFactory\nfrom djtriggers.tests.models import DummyTrigger\n\n\nclass DummyTriggerFactory(DjangoModelFactory):\n\n\n class Meta:\n model = DummyTrigger\n trigger_type = 'dummy_trigger_test'\n source = 'tests'\n date_received = timezone.now()\n date_processed = None\n process_after = None\n number_of_tries = 0\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#----------- writing our for loop """ number = [1,2,3,4,5] friends = ['ahmet', 'mehmet','ayşe'] # for n in number: # print(n) # for n in friends: # print(n) def my_for_loop(my_iterable): my_iterator = iter(my_iterable) while True: try: print(next(my_iterator)) except StopIteration: break my_for_loop(number) my_for_loop(friends) """ #--------------to show thirth power of given range numbers with iterator class """ class CubeNumbers: def __init__(self, start, end): self.start = start self.end = end def __iter__(self): return self def __next__(self): if self.start <= self.end: result = self.start ** 3 self.start += 1 return result else: raise StopIteration cubed = CubeNumbers(0, 5) print(next(cubed)) print(next(cubed)) print(next(cubed)) print(next(cubed)) print(next(cubed)) print(next(cubed)) print(next(cubed)) """ #--------to show thirth power of given range numbers with generator """ cubed = (x**3 for x in range(0, 5)) print(type(cubed)) print(next(cubed)) print(next(cubed)) print(next(cubed)) print(next(cubed)) print(next(cubed)) print(next(cubed)) print(next(cubed)) """ #---------------fibonacci numbers with generator function """ def fibo(limit): x = 0 y = 1 while x < limit: yield x x, y = y, x + y my_fib = fibo(1000) for fib in my_fib: print(fib) """ #-------------to show index and value together """ friends = ['john', 'walter', 'henry'] # i = 0 # while i < len(friends): # v = friends[i] # print(i, v) # i += 1 # for n in range(len(friends)): # v = friends[n] # print(n, v) for i, v in enumerate(friends): print(i, v) """
normal
{ "blob_id": "70325d0e5eb9dcd7a065f83eaf14647bc30bd7f3", "index": 9053, "step-1": "<mask token>\n", "step-2": "\n#----------- writing our for loop\n\"\"\" number = [1,2,3,4,5]\nfriends = ['ahmet', 'mehmet','ayşe']\n\n# for n in number:\n# print(n)\n# for n in friends:\n# print(n)\n\ndef my_for_loop(my_iterable):\n my_iterator = iter(my_iterable)\n while True:\n try:\n print(next(my_iterator))\n except StopIteration:\n break\n\nmy_for_loop(number)\nmy_for_loop(friends) \"\"\"\n\n\n#--------------to show thirth power of given range numbers with iterator class\n\n\n\"\"\" class CubeNumbers:\n def __init__(self, start, end):\n self.start = start\n self.end = end\n \n def __iter__(self):\n return self\n \n def __next__(self):\n if self.start <= self.end:\n result = self.start ** 3\n self.start += 1\n return result\n else:\n raise StopIteration\n \ncubed = CubeNumbers(0, 5)\nprint(next(cubed))\nprint(next(cubed))\nprint(next(cubed))\nprint(next(cubed))\nprint(next(cubed))\nprint(next(cubed))\nprint(next(cubed)) \"\"\"\n\n\n#--------to show thirth power of given range numbers with generator \n\n\"\"\" cubed = (x**3 for x in range(0, 5))\nprint(type(cubed))\nprint(next(cubed))\nprint(next(cubed))\nprint(next(cubed))\nprint(next(cubed))\nprint(next(cubed))\nprint(next(cubed))\nprint(next(cubed)) \"\"\"\n\n\n#---------------fibonacci numbers with generator function\n\n\"\"\" def fibo(limit):\n x = 0\n y = 1\n while x < limit:\n yield x\n x, y = y, x + y\n \nmy_fib = fibo(1000)\nfor fib in my_fib:\n print(fib) \"\"\"\n \n\n#-------------to show index and value together\n\n\"\"\" friends = ['john', 'walter', 'henry']\n\n# i = 0\n# while i < len(friends):\n# v = friends[i]\n# print(i, v)\n# i += 1\n\n# for n in range(len(friends)):\n# v = friends[n]\n# print(n, v)\n\nfor i, v in enumerate(friends):\n print(i, v) \"\"\"", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
from flask import Flask, request, jsonify from flask_restful import Api import json import eth_account import algosdk app = Flask(__name__) api = Api(app) app.url_map.strict_slashes = False @app.route('/verify', methods=['GET','POST']) def verify(): content = request.get_json(silent=True, force=True) #Check if signature is valid print(content) if content == None: return jsonify("No json data is sent.") sig = content.get('sig') payload = content.get('payload') message = payload.get('message') pk = payload.get('pk') platform = payload.get('platform') if platform == "Ethereum": encoded_msg = eth_account.messages.encode_defunct(text=json.dumps(payload)) result = eth_account.Account.recover_message(encoded_msg,signature=sig) == pk else: result = algosdk.util.verify_bytes(json.dumps(payload).encode('utf-8'), sig, pk) return jsonify(result) if __name__ == '__main__': app.run(port='5002')
normal
{ "blob_id": "8bae45de54535e7b0788aa12717645ae9f193664", "index": 8113, "step-1": "<mask token>\n\n\[email protected]('/verify', methods=['GET', 'POST'])\ndef verify():\n content = request.get_json(silent=True, force=True)\n print(content)\n if content == None:\n return jsonify('No json data is sent.')\n sig = content.get('sig')\n payload = content.get('payload')\n message = payload.get('message')\n pk = payload.get('pk')\n platform = payload.get('platform')\n if platform == 'Ethereum':\n encoded_msg = eth_account.messages.encode_defunct(text=json.dumps(\n payload))\n result = eth_account.Account.recover_message(encoded_msg, signature=sig\n ) == pk\n else:\n result = algosdk.util.verify_bytes(json.dumps(payload).encode(\n 'utf-8'), sig, pk)\n return jsonify(result)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\[email protected]('/verify', methods=['GET', 'POST'])\ndef verify():\n content = request.get_json(silent=True, force=True)\n print(content)\n if content == None:\n return jsonify('No json data is sent.')\n sig = content.get('sig')\n payload = content.get('payload')\n message = payload.get('message')\n pk = payload.get('pk')\n platform = payload.get('platform')\n if platform == 'Ethereum':\n encoded_msg = eth_account.messages.encode_defunct(text=json.dumps(\n payload))\n result = eth_account.Account.recover_message(encoded_msg, signature=sig\n ) == pk\n else:\n result = algosdk.util.verify_bytes(json.dumps(payload).encode(\n 'utf-8'), sig, pk)\n return jsonify(result)\n\n\nif __name__ == '__main__':\n app.run(port='5002')\n", "step-3": "<mask token>\napp = Flask(__name__)\napi = Api(app)\napp.url_map.strict_slashes = False\n\n\[email protected]('/verify', methods=['GET', 'POST'])\ndef verify():\n content = request.get_json(silent=True, force=True)\n print(content)\n if content == None:\n return jsonify('No json data is sent.')\n sig = content.get('sig')\n payload = content.get('payload')\n message = payload.get('message')\n pk = payload.get('pk')\n platform = payload.get('platform')\n if platform == 'Ethereum':\n encoded_msg = eth_account.messages.encode_defunct(text=json.dumps(\n payload))\n result = eth_account.Account.recover_message(encoded_msg, signature=sig\n ) == pk\n else:\n result = algosdk.util.verify_bytes(json.dumps(payload).encode(\n 'utf-8'), sig, pk)\n return jsonify(result)\n\n\nif __name__ == '__main__':\n app.run(port='5002')\n", "step-4": "from flask import Flask, request, jsonify\nfrom flask_restful import Api\nimport json\nimport eth_account\nimport algosdk\napp = Flask(__name__)\napi = Api(app)\napp.url_map.strict_slashes = False\n\n\[email protected]('/verify', methods=['GET', 'POST'])\ndef verify():\n content = request.get_json(silent=True, force=True)\n print(content)\n if content == None:\n return jsonify('No json data is sent.')\n sig = content.get('sig')\n payload = content.get('payload')\n message = payload.get('message')\n pk = payload.get('pk')\n platform = payload.get('platform')\n if platform == 'Ethereum':\n encoded_msg = eth_account.messages.encode_defunct(text=json.dumps(\n payload))\n result = eth_account.Account.recover_message(encoded_msg, signature=sig\n ) == pk\n else:\n result = algosdk.util.verify_bytes(json.dumps(payload).encode(\n 'utf-8'), sig, pk)\n return jsonify(result)\n\n\nif __name__ == '__main__':\n app.run(port='5002')\n", "step-5": "from flask import Flask, request, jsonify\nfrom flask_restful import Api\nimport json\nimport eth_account\nimport algosdk\n\napp = Flask(__name__)\napi = Api(app)\napp.url_map.strict_slashes = False\n\[email protected]('/verify', methods=['GET','POST'])\ndef verify():\n content = request.get_json(silent=True, force=True)\n #Check if signature is valid\n print(content)\n if content == None:\n return jsonify(\"No json data is sent.\")\n sig = content.get('sig')\n payload = content.get('payload')\n message = payload.get('message')\n pk = payload.get('pk')\n platform = payload.get('platform')\n if platform == \"Ethereum\":\n encoded_msg = eth_account.messages.encode_defunct(text=json.dumps(payload))\n result = eth_account.Account.recover_message(encoded_msg,signature=sig) == pk\n else:\n result = algosdk.util.verify_bytes(json.dumps(payload).encode('utf-8'), sig, pk)\n return jsonify(result)\n\n\nif __name__ == '__main__':\n app.run(port='5002')\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
n=int(0) import random def doubleEven(n): if n % 2 == 0: n = n*2 return (n) else: return "-1" print(doubleEven(n = int(input("put in a number")))) g=int(0) def grade(g): if g < 50: return "F" if g < 66: return "C" if g > 92: return "A+" else: print("error") print(grade(g = int(input("put in your percent")))) num1 = 0 num2 = 0 num3 = 0 def largestNum(num1, num2, num3): num1 = int(input("input number 1")) num2 = int(input("input number 2")) num3 = int(input("input number 3")) if num1 > num2: if num1 > num3: return num1 if num3 > num1: return num3 if num2 > num3: return num2 if num3 > num2: return num3 print(largestNum(10, 20, 30)) def sumDice(Dice, numRolls):
normal
{ "blob_id": "5251724656e1d971900fff3d8fa0210c6cfc27bb", "index": 5505, "step-1": "n=int(0)\nimport random\ndef doubleEven(n):\n if n % 2 == 0:\n n = n*2\n return (n)\n else:\n return \"-1\"\n\n\nprint(doubleEven(n = int(input(\"put in a number\"))))\n\ng=int(0)\n\ndef grade(g):\n if g < 50:\n return \"F\"\n if g < 66:\n return \"C\"\n if g > 92:\n return \"A+\"\n\n else:\n print(\"error\")\n\n\nprint(grade(g = int(input(\"put in your percent\"))))\n\nnum1 = 0\nnum2 = 0\nnum3 = 0\n\ndef largestNum(num1, num2, num3):\n num1 = int(input(\"input number 1\"))\n num2 = int(input(\"input number 2\"))\n num3 = int(input(\"input number 3\"))\n if num1 > num2:\n if num1 > num3:\n return num1\n if num3 > num1:\n return num3\n if num2 > num3:\n return num2\n if num3 > num2:\n return num3\n\n\nprint(largestNum(10, 20, 30))\n\n\ndef sumDice(Dice, numRolls):", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
#!/usr/bin/env python3 # # main.py - By Steven Chen Hao Nyeo # Graphical interface for Socionics Engine # Created: August 8, 2019 import wx from cognitive_function import * from entity import Entity from function_to_type import Translator from function_analysis import * class TypeFrame(wx.Frame): def __init__(self, parent, title): # Create Frame wx.Frame.__init__(self, parent, title = title, size = (530, 480), style = wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER) self.panel = wx.Panel(self) # The current list of cognitive functions entered into the system self.entityList = [] # Arrays containing the rows of buttons for dominant and auxiliary functions self.domButtons = [] self.auxButtons = [] # Keep track of the current row of buttons to enable self.rowCount = 0 # Setup for program interface self.row_1_y = 30 self.row_2_y = 90 self.row_3_y = 150 wx.StaticText(self.panel, label = "Dominant Function:", pos = (30, self.row_1_y - 20)) self.createCogButtons(0) wx.StaticText(self.panel, label = "Auxiliary Function:", pos = (30, self.row_2_y - 20)) self.createCogButtons(1) # The function that creates the buttons for the eight cognitive functions def createCogButtons(self, row): # Keeps track of creation of dominant or auxiliary buttons cogButtons = self.domButtons if row == 0 else self.auxButtons # Create and bind the buttons to the event labels = ["N", "S", "T", "F"] for i in range(4): cogButtons.append(wx.Button(self.panel, label = labels[i] + "i", size = (50, 30) , pos = (30 + 120 * i, self.row_1_y if row == 0 else self.row_2_y))) cogButtons.append(wx.Button(self.panel, label = labels[i] + "e", size = (50, 30) , pos = (90 + 120 * i, self.row_1_y if row == 0 else self.row_2_y))) for i in range(8): self.Bind(wx.EVT_BUTTON, self.onclick_cogFunction, cogButtons[i]) # The auxiliary buttons are disabled before the dominant function is entered if (row == 1): for button in self.auxButtons: button.Disable() # The event handler for clicking on the buttons def onclick_cogFunction(self, event): btnLabel = event.GetEventObject().GetLabel() # First row - dominant function if (self.rowCount == 0): # Disable the dominant function buttons self.rowCount = 1 self.entityList.append(self.labelToFunction(btnLabel)) for button in self.domButtons: button.Disable() # Re-enable the appropriate auxiliary function buttons for button in self.auxButtons: if (button.Label[1] == self.entityList[0].opposite().sublabel and button.Label[0] != self.entityList[0].opposite_orientation().label and button.Label[0] != self.entityList[0].label): button.Enable() # Second row - auxiliary function else: self.entityList.append(self.labelToFunction(btnLabel)) for button in self.auxButtons: button.Disable() if (len(self.entityList) == 2): e = Entity(self.entityList) print(Translator.translate_orientation(e) + Translator.translate_observing(e) + Translator.translate_decision_making(e) + Translator.translate_perception(e)) # The helper functin that returns the corresponding function object according to the entered string def labelToFunction(self, btnLabel): if (btnLabel == "Ni"): return Ni elif (btnLabel == "Ne"): return Ne elif (btnLabel == "Si"): return Si elif (btnLabel == "Se"): return Se elif (btnLabel == "Ti"): return Ti elif (btnLabel == "Te"): return Te elif (btnLabel == "Fi"): return Fi elif (btnLabel == "Fe"): return Fe if __name__ == "__main__": app = wx.App() frame = TypeFrame(None, title = "Socionics Engine") frame.Show() app.MainLoop()
normal
{ "blob_id": "519dbe97ce9de30e616d660ef168e686c52b01b5", "index": 5452, "step-1": "<mask token>\n\n\nclass TypeFrame(wx.Frame):\n <mask token>\n\n def createCogButtons(self, row):\n cogButtons = self.domButtons if row == 0 else self.auxButtons\n labels = ['N', 'S', 'T', 'F']\n for i in range(4):\n cogButtons.append(wx.Button(self.panel, label=labels[i] + 'i',\n size=(50, 30), pos=(30 + 120 * i, self.row_1_y if row == 0 else\n self.row_2_y)))\n cogButtons.append(wx.Button(self.panel, label=labels[i] + 'e',\n size=(50, 30), pos=(90 + 120 * i, self.row_1_y if row == 0 else\n self.row_2_y)))\n for i in range(8):\n self.Bind(wx.EVT_BUTTON, self.onclick_cogFunction, cogButtons[i])\n if row == 1:\n for button in self.auxButtons:\n button.Disable()\n\n def onclick_cogFunction(self, event):\n btnLabel = event.GetEventObject().GetLabel()\n if self.rowCount == 0:\n self.rowCount = 1\n self.entityList.append(self.labelToFunction(btnLabel))\n for button in self.domButtons:\n button.Disable()\n for button in self.auxButtons:\n if button.Label[1] == self.entityList[0].opposite(\n ).sublabel and button.Label[0] != self.entityList[0\n ].opposite_orientation().label and button.Label[0\n ] != self.entityList[0].label:\n button.Enable()\n else:\n self.entityList.append(self.labelToFunction(btnLabel))\n for button in self.auxButtons:\n button.Disable()\n if len(self.entityList) == 2:\n e = Entity(self.entityList)\n print(Translator.translate_orientation(e) + Translator.\n translate_observing(e) + Translator.\n translate_decision_making(e) + Translator.\n translate_perception(e))\n\n def labelToFunction(self, btnLabel):\n if btnLabel == 'Ni':\n return Ni\n elif btnLabel == 'Ne':\n return Ne\n elif btnLabel == 'Si':\n return Si\n elif btnLabel == 'Se':\n return Se\n elif btnLabel == 'Ti':\n return Ti\n elif btnLabel == 'Te':\n return Te\n elif btnLabel == 'Fi':\n return Fi\n elif btnLabel == 'Fe':\n return Fe\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass TypeFrame(wx.Frame):\n\n def __init__(self, parent, title):\n wx.Frame.__init__(self, parent, title=title, size=(530, 480), style\n =wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER)\n self.panel = wx.Panel(self)\n self.entityList = []\n self.domButtons = []\n self.auxButtons = []\n self.rowCount = 0\n self.row_1_y = 30\n self.row_2_y = 90\n self.row_3_y = 150\n wx.StaticText(self.panel, label='Dominant Function:', pos=(30, self\n .row_1_y - 20))\n self.createCogButtons(0)\n wx.StaticText(self.panel, label='Auxiliary Function:', pos=(30, \n self.row_2_y - 20))\n self.createCogButtons(1)\n\n def createCogButtons(self, row):\n cogButtons = self.domButtons if row == 0 else self.auxButtons\n labels = ['N', 'S', 'T', 'F']\n for i in range(4):\n cogButtons.append(wx.Button(self.panel, label=labels[i] + 'i',\n size=(50, 30), pos=(30 + 120 * i, self.row_1_y if row == 0 else\n self.row_2_y)))\n cogButtons.append(wx.Button(self.panel, label=labels[i] + 'e',\n size=(50, 30), pos=(90 + 120 * i, self.row_1_y if row == 0 else\n self.row_2_y)))\n for i in range(8):\n self.Bind(wx.EVT_BUTTON, self.onclick_cogFunction, cogButtons[i])\n if row == 1:\n for button in self.auxButtons:\n button.Disable()\n\n def onclick_cogFunction(self, event):\n btnLabel = event.GetEventObject().GetLabel()\n if self.rowCount == 0:\n self.rowCount = 1\n self.entityList.append(self.labelToFunction(btnLabel))\n for button in self.domButtons:\n button.Disable()\n for button in self.auxButtons:\n if button.Label[1] == self.entityList[0].opposite(\n ).sublabel and button.Label[0] != self.entityList[0\n ].opposite_orientation().label and button.Label[0\n ] != self.entityList[0].label:\n button.Enable()\n else:\n self.entityList.append(self.labelToFunction(btnLabel))\n for button in self.auxButtons:\n button.Disable()\n if len(self.entityList) == 2:\n e = Entity(self.entityList)\n print(Translator.translate_orientation(e) + Translator.\n translate_observing(e) + Translator.\n translate_decision_making(e) + Translator.\n translate_perception(e))\n\n def labelToFunction(self, btnLabel):\n if btnLabel == 'Ni':\n return Ni\n elif btnLabel == 'Ne':\n return Ne\n elif btnLabel == 'Si':\n return Si\n elif btnLabel == 'Se':\n return Se\n elif btnLabel == 'Ti':\n return Ti\n elif btnLabel == 'Te':\n return Te\n elif btnLabel == 'Fi':\n return Fi\n elif btnLabel == 'Fe':\n return Fe\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass TypeFrame(wx.Frame):\n\n def __init__(self, parent, title):\n wx.Frame.__init__(self, parent, title=title, size=(530, 480), style\n =wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER)\n self.panel = wx.Panel(self)\n self.entityList = []\n self.domButtons = []\n self.auxButtons = []\n self.rowCount = 0\n self.row_1_y = 30\n self.row_2_y = 90\n self.row_3_y = 150\n wx.StaticText(self.panel, label='Dominant Function:', pos=(30, self\n .row_1_y - 20))\n self.createCogButtons(0)\n wx.StaticText(self.panel, label='Auxiliary Function:', pos=(30, \n self.row_2_y - 20))\n self.createCogButtons(1)\n\n def createCogButtons(self, row):\n cogButtons = self.domButtons if row == 0 else self.auxButtons\n labels = ['N', 'S', 'T', 'F']\n for i in range(4):\n cogButtons.append(wx.Button(self.panel, label=labels[i] + 'i',\n size=(50, 30), pos=(30 + 120 * i, self.row_1_y if row == 0 else\n self.row_2_y)))\n cogButtons.append(wx.Button(self.panel, label=labels[i] + 'e',\n size=(50, 30), pos=(90 + 120 * i, self.row_1_y if row == 0 else\n self.row_2_y)))\n for i in range(8):\n self.Bind(wx.EVT_BUTTON, self.onclick_cogFunction, cogButtons[i])\n if row == 1:\n for button in self.auxButtons:\n button.Disable()\n\n def onclick_cogFunction(self, event):\n btnLabel = event.GetEventObject().GetLabel()\n if self.rowCount == 0:\n self.rowCount = 1\n self.entityList.append(self.labelToFunction(btnLabel))\n for button in self.domButtons:\n button.Disable()\n for button in self.auxButtons:\n if button.Label[1] == self.entityList[0].opposite(\n ).sublabel and button.Label[0] != self.entityList[0\n ].opposite_orientation().label and button.Label[0\n ] != self.entityList[0].label:\n button.Enable()\n else:\n self.entityList.append(self.labelToFunction(btnLabel))\n for button in self.auxButtons:\n button.Disable()\n if len(self.entityList) == 2:\n e = Entity(self.entityList)\n print(Translator.translate_orientation(e) + Translator.\n translate_observing(e) + Translator.\n translate_decision_making(e) + Translator.\n translate_perception(e))\n\n def labelToFunction(self, btnLabel):\n if btnLabel == 'Ni':\n return Ni\n elif btnLabel == 'Ne':\n return Ne\n elif btnLabel == 'Si':\n return Si\n elif btnLabel == 'Se':\n return Se\n elif btnLabel == 'Ti':\n return Ti\n elif btnLabel == 'Te':\n return Te\n elif btnLabel == 'Fi':\n return Fi\n elif btnLabel == 'Fe':\n return Fe\n\n\nif __name__ == '__main__':\n app = wx.App()\n frame = TypeFrame(None, title='Socionics Engine')\n frame.Show()\n app.MainLoop()\n", "step-4": "import wx\nfrom cognitive_function import *\nfrom entity import Entity\nfrom function_to_type import Translator\nfrom function_analysis import *\n\n\nclass TypeFrame(wx.Frame):\n\n def __init__(self, parent, title):\n wx.Frame.__init__(self, parent, title=title, size=(530, 480), style\n =wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER)\n self.panel = wx.Panel(self)\n self.entityList = []\n self.domButtons = []\n self.auxButtons = []\n self.rowCount = 0\n self.row_1_y = 30\n self.row_2_y = 90\n self.row_3_y = 150\n wx.StaticText(self.panel, label='Dominant Function:', pos=(30, self\n .row_1_y - 20))\n self.createCogButtons(0)\n wx.StaticText(self.panel, label='Auxiliary Function:', pos=(30, \n self.row_2_y - 20))\n self.createCogButtons(1)\n\n def createCogButtons(self, row):\n cogButtons = self.domButtons if row == 0 else self.auxButtons\n labels = ['N', 'S', 'T', 'F']\n for i in range(4):\n cogButtons.append(wx.Button(self.panel, label=labels[i] + 'i',\n size=(50, 30), pos=(30 + 120 * i, self.row_1_y if row == 0 else\n self.row_2_y)))\n cogButtons.append(wx.Button(self.panel, label=labels[i] + 'e',\n size=(50, 30), pos=(90 + 120 * i, self.row_1_y if row == 0 else\n self.row_2_y)))\n for i in range(8):\n self.Bind(wx.EVT_BUTTON, self.onclick_cogFunction, cogButtons[i])\n if row == 1:\n for button in self.auxButtons:\n button.Disable()\n\n def onclick_cogFunction(self, event):\n btnLabel = event.GetEventObject().GetLabel()\n if self.rowCount == 0:\n self.rowCount = 1\n self.entityList.append(self.labelToFunction(btnLabel))\n for button in self.domButtons:\n button.Disable()\n for button in self.auxButtons:\n if button.Label[1] == self.entityList[0].opposite(\n ).sublabel and button.Label[0] != self.entityList[0\n ].opposite_orientation().label and button.Label[0\n ] != self.entityList[0].label:\n button.Enable()\n else:\n self.entityList.append(self.labelToFunction(btnLabel))\n for button in self.auxButtons:\n button.Disable()\n if len(self.entityList) == 2:\n e = Entity(self.entityList)\n print(Translator.translate_orientation(e) + Translator.\n translate_observing(e) + Translator.\n translate_decision_making(e) + Translator.\n translate_perception(e))\n\n def labelToFunction(self, btnLabel):\n if btnLabel == 'Ni':\n return Ni\n elif btnLabel == 'Ne':\n return Ne\n elif btnLabel == 'Si':\n return Si\n elif btnLabel == 'Se':\n return Se\n elif btnLabel == 'Ti':\n return Ti\n elif btnLabel == 'Te':\n return Te\n elif btnLabel == 'Fi':\n return Fi\n elif btnLabel == 'Fe':\n return Fe\n\n\nif __name__ == '__main__':\n app = wx.App()\n frame = TypeFrame(None, title='Socionics Engine')\n frame.Show()\n app.MainLoop()\n", "step-5": "#!/usr/bin/env python3\n#\n# main.py - By Steven Chen Hao Nyeo \n# Graphical interface for Socionics Engine \n# Created: August 8, 2019\n\nimport wx\nfrom cognitive_function import *\nfrom entity import Entity\nfrom function_to_type import Translator\nfrom function_analysis import *\n\nclass TypeFrame(wx.Frame):\n def __init__(self, parent, title):\n \n # Create Frame\n wx.Frame.__init__(self, parent, title = title, size = (530, 480), style = wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER)\n self.panel = wx.Panel(self)\n \n # The current list of cognitive functions entered into the system \n self.entityList = []\n\n # Arrays containing the rows of buttons for dominant and auxiliary functions\n self.domButtons = []\n self.auxButtons = []\n\n # Keep track of the current row of buttons to enable\n self.rowCount = 0\n\n # Setup for program interface\n self.row_1_y = 30\n self.row_2_y = 90\n self.row_3_y = 150\n wx.StaticText(self.panel, label = \"Dominant Function:\", pos = (30, self.row_1_y - 20))\n self.createCogButtons(0)\n wx.StaticText(self.panel, label = \"Auxiliary Function:\", pos = (30, self.row_2_y - 20))\n self.createCogButtons(1)\n\n # The function that creates the buttons for the eight cognitive functions\n def createCogButtons(self, row):\n\n # Keeps track of creation of dominant or auxiliary buttons\n cogButtons = self.domButtons if row == 0 else self.auxButtons \n \n # Create and bind the buttons to the event\n labels = [\"N\", \"S\", \"T\", \"F\"]\n for i in range(4): \n cogButtons.append(wx.Button(self.panel, label = labels[i] + \"i\", size = (50, 30) , pos = (30 + 120 * i, self.row_1_y if row == 0 else self.row_2_y)))\n cogButtons.append(wx.Button(self.panel, label = labels[i] + \"e\", size = (50, 30) , pos = (90 + 120 * i, self.row_1_y if row == 0 else self.row_2_y)))\n for i in range(8):\n self.Bind(wx.EVT_BUTTON, self.onclick_cogFunction, cogButtons[i])\n\n # The auxiliary buttons are disabled before the dominant function is entered\n if (row == 1): \n for button in self.auxButtons:\n button.Disable()\n\n # The event handler for clicking on the buttons\n def onclick_cogFunction(self, event):\n btnLabel = event.GetEventObject().GetLabel()\n\n # First row - dominant function\n if (self.rowCount == 0):\n\n # Disable the dominant function buttons\n self.rowCount = 1\n self.entityList.append(self.labelToFunction(btnLabel))\n for button in self.domButtons:\n button.Disable()\n\n # Re-enable the appropriate auxiliary function buttons\n for button in self.auxButtons:\n if (button.Label[1] == self.entityList[0].opposite().sublabel \n and button.Label[0] != self.entityList[0].opposite_orientation().label\n and button.Label[0] != self.entityList[0].label):\n button.Enable()\n\n # Second row - auxiliary function\n else:\n self.entityList.append(self.labelToFunction(btnLabel))\n for button in self.auxButtons:\n button.Disable()\n\n if (len(self.entityList) == 2):\n e = Entity(self.entityList)\n\n print(Translator.translate_orientation(e) +\n Translator.translate_observing(e) +\n Translator.translate_decision_making(e) +\n Translator.translate_perception(e))\n\n # The helper functin that returns the corresponding function object according to the entered string\n def labelToFunction(self, btnLabel):\n if (btnLabel == \"Ni\"): \n return Ni\n elif (btnLabel == \"Ne\"): \n return Ne\n elif (btnLabel == \"Si\"): \n return Si\n elif (btnLabel == \"Se\"): \n return Se\n elif (btnLabel == \"Ti\"): \n return Ti\n elif (btnLabel == \"Te\"): \n return Te\n elif (btnLabel == \"Fi\"): \n return Fi\n elif (btnLabel == \"Fe\"): \n return Fe\n\nif __name__ == \"__main__\":\n app = wx.App()\n frame = TypeFrame(None, title = \"Socionics Engine\")\n frame.Show()\n app.MainLoop()\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
# _*_ coding:utf-8 _*_ import csv c=open(r"e:/test.csv","r+") #read=csv.reader(c) #for line in read: # print line read=c.readlines() print read c.close()
normal
{ "blob_id": "c65e14de297cc785b804e68f29bd5766ca7a8cf7", "index": 7958, "step-1": "# _*_ coding:utf-8 _*_\nimport csv\n\nc=open(r\"e:/test.csv\",\"r+\")\n#read=csv.reader(c)\n#for line in read:\n# print line\nread=c.readlines()\nprint read\nc.close()", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
class Solution(object): def findPaths(self, m, n, N, i, j): """ :type m: int :type n: int :type N: int :type i: int :type j: int :rtype: int """ MOD = 10 ** 9 + 7 dz = zip((1,0,-1,0),(0,1,0,-1)) dp = [[0]* n for x in range(m)] dp[i][j] = 1 ans = 0 for _ in range(N): ndp = [[0] * n for x in range(m)] for x in range(m): for y in range(n): for dx,dy in dz: nx,ny = x + dx, y+dy if 0 <= nx < m and 0 <= ny <n: ndp[nx][ny]= (ndp[nx][ny]+dp[x][y])%MOD else: ans = (ans + dp[x][y])% MOD dp = ndp return ans
normal
{ "blob_id": "ebbc79d6582f7d6139e0dcec6333b679bb86c63c", "index": 1383, "step-1": "<mask token>\n", "step-2": "class Solution(object):\n <mask token>\n", "step-3": "class Solution(object):\n\n def findPaths(self, m, n, N, i, j):\n \"\"\"\n :type m: int\n :type n: int\n :type N: int\n :type i: int\n :type j: int\n :rtype: int\n \"\"\"\n MOD = 10 ** 9 + 7\n dz = zip((1, 0, -1, 0), (0, 1, 0, -1))\n dp = [([0] * n) for x in range(m)]\n dp[i][j] = 1\n ans = 0\n for _ in range(N):\n ndp = [([0] * n) for x in range(m)]\n for x in range(m):\n for y in range(n):\n for dx, dy in dz:\n nx, ny = x + dx, y + dy\n if 0 <= nx < m and 0 <= ny < n:\n ndp[nx][ny] = (ndp[nx][ny] + dp[x][y]) % MOD\n else:\n ans = (ans + dp[x][y]) % MOD\n dp = ndp\n return ans\n", "step-4": "class Solution(object):\n def findPaths(self, m, n, N, i, j):\n \"\"\"\n :type m: int\n :type n: int\n :type N: int\n :type i: int\n :type j: int\n :rtype: int\n \"\"\"\n MOD = 10 ** 9 + 7\n dz = zip((1,0,-1,0),(0,1,0,-1))\n dp = [[0]* n for x in range(m)]\n dp[i][j] = 1\n ans = 0\n for _ in range(N):\n ndp = [[0] * n for x in range(m)]\n for x in range(m):\n for y in range(n):\n for dx,dy in dz:\n nx,ny = x + dx, y+dy\n if 0 <= nx < m and 0 <= ny <n:\n ndp[nx][ny]= (ndp[nx][ny]+dp[x][y])%MOD\n else:\n ans = (ans + dp[x][y])% MOD\n \n dp = ndp\n \n return ans\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# Generated by Django 3.2.5 on 2021-08-28 12:34 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('userProfile', '0022_auto_20210823_1858'), ] operations = [ migrations.RemoveField( model_name='subscription', name='price_type', ), migrations.AddField( model_name='subscription', name='price', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='userProfile.price'), ), ]
normal
{ "blob_id": "96bb865b66e5d9ba62bab210705338f1799cc490", "index": 7022, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('userProfile', '0022_auto_20210823_1858')]\n operations = [migrations.RemoveField(model_name='subscription', name=\n 'price_type'), migrations.AddField(model_name='subscription', name=\n 'price', field=models.ForeignKey(null=True, on_delete=django.db.\n models.deletion.SET_NULL, to='userProfile.price'))]\n", "step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('userProfile', '0022_auto_20210823_1858')]\n operations = [migrations.RemoveField(model_name='subscription', name=\n 'price_type'), migrations.AddField(model_name='subscription', name=\n 'price', field=models.ForeignKey(null=True, on_delete=django.db.\n models.deletion.SET_NULL, to='userProfile.price'))]\n", "step-5": "# Generated by Django 3.2.5 on 2021-08-28 12:34\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('userProfile', '0022_auto_20210823_1858'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='subscription',\n name='price_type',\n ),\n migrations.AddField(\n model_name='subscription',\n name='price',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='userProfile.price'),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from __future__ import annotations from typing import Generator, Optional from collections import Counter from itertools import zip_longest from re import finditer codon_table = """UUU F CUU L AUU I GUU V UUC F CUC L AUC I GUC V UUA L CUA L AUA I GUA V UUG L CUG L AUG M GUG V UCU S CCU P ACU T GCU A UCC S CCC P ACC T GCC A UCA S CCA P ACA T GCA A UCG S CCG P ACG T GCG A UAU Y CAU H AAU N GAU D UAC Y CAC H AAC N GAC D UAA Stop CAA Q AAA K GAA E UAG Stop CAG Q AAG K GAG E UGU C CGU R AGU S GGU G UGC C CGC R AGC S GGC G UGA Stop CGA R AGA R GGA G UGG W CGG R AGG R GGG G""" codons = dict(zip(codon_table.split()[::2], codon_table.split()[1::2])) def consensus(*args): """Return a consensus sequence from n Seq objects.""" counts = map(Counter, zip_longest(*args)) consensus = "" for c in counts: del c[None] consensus += c.most_common(1)[0][0] return Seq(consensus, args[0].id) class Base(str): """Class for nucleotide bases""" pass class Seq: """Class for nucleotide sequences""" def __init__(self, sequence: str, id: str = None, codons: dict = codons): self.sequence = sequence self.id = id self.codons = codons def __repr__(self): if not self.id: return f"Seq({self.sequence[:60]})" concat = "" if len(self) > 60: concat = "..." return f"Seq({self.sequence[:60]}{concat}, id='{self.id}')" def __str__(self): return self.sequence def __len__(self) -> int: return len(self.sequence) def __invert__(self) -> Seq: """Inverting a Seq object (i.e. ~Seq) will return the reverse complement of that sequence""" return self.reverse_complement() def __eq__(self, other) -> bool: """Compare the string representations of two Seq objects""" return str(self) == str(other) def __add__(self, other: Seq) -> Seq: """Adding two sequence objects (i.e. Seq1 + Seq2) returns a new Seq object that is the concatenation of the two objects sequences. ID is taken from eh first object""" new_sequence = self.sequence + other.sequence return Seq(new_sequence, self.id) def __sub__(self, other: Seq) -> int: """Subtracting two Seq objects (i.e. seq1 - seq2) returns the hamming difference between them""" return sum(i != j for i, j in zip_longest(self.sequence, other.sequence)) def __getitem__(self, index): if type(index) == int: return Base(self.sequence[index]) if type(index) == str: return self.find(index, overlapping=True) return Seq(self.sequence[index], self.id) def __setitem__(self, index, nt): self.sequence = self.sequence[:index] + nt + self.sequence[index + 1 :] def __iter__(self): self.n = 0 return self def __next__(self): if self.n < len(self): result = self[self.n] self.n += 1 return result else: raise StopIteration def __contains__(self, other): if str(other) in str(self): return True else: return False @property def gc(self) -> float: """Return the GC content of the sequence""" g = self.count("G") c = self.count("C") return (g + c) / len(self) * 100 @property def counts(self) -> dict: """Return the counts of letters in the sequence""" return Counter(self.sequence) def to_fasta(self, line_length: int = 60) -> str: formated_sequence = "\n".join( [str(s) for s in self.kmers(line_length, line_length)] ) return f">{self.id}\n{formated_sequence}\n" def kmers(self, n: int, step: int = 1) -> Generator: """Return a generator for kmers of length n""" return ( Seq(self.sequence[i : i + n]) for i in range(0, len(self.sequence), step) ) def count(self, string: str, max_diff: int = 0) -> int: if max_diff == 0: return self.sequence.count(string) other = Seq(string) return sum((kmer - other) <= max_diff for kmer in self.kmers(len(other))) def substitute(self, old: str, new: str, count: int = -1): return Seq(self.sequence.replace(str(old), str(new), count), self.id) def find(self, target: str, count: int = -1, overlapping: bool = False): locs = [] if overlapping and len(target) > 1: target = f"(?=({target}))" matches = finditer(target, self.sequence) for i, match in enumerate(matches, 1): locs.append(match.start()) if i == count: break return locs def find_one(self, target: str) -> Optional[str]: loc = self.sequence.find(str(target)) if loc == -1: return None return loc def reverse_complement(self, rna: bool = False) -> Seq: complements = {"A": "T", "T": "A", "G": "C", "C": "G"} if rna: complements = {"A": "U", "U": "A", "G": "C", "C": "G"} revc = "".join(complements[nt] for nt in reversed(self)) return Seq(revc, self.id) def transcribe(self) -> Seq: return Seq(self.sequence.replace("T", "U"), self.id) def reverse_transcribe(self) -> Seq: return Seq(self.sequence.replace("U", "T"), self.id) def translate(self) -> Seq: """ Return the translated sequence. *Currently stop signals are ignored.* """ AA = "".join( self.codons[self.sequence[i : i + 3]] for i in range(0, len(self.sequence), 3) if self.codons[self.sequence[i : i + 3]] != "Stop" ) return Seq(AA, self.id) def startswith(self, seq: str) -> bool: return self.sequence.startswith(str(seq)) def endswith(self, seq: str) -> bool: return self.sequence.endswith(str(seq))
normal
{ "blob_id": "3d742505d480493fbc729e7a0febdcab3a7dc041", "index": 9386, "step-1": "<mask token>\n\n\nclass Seq:\n <mask token>\n\n def __init__(self, sequence: str, id: str=None, codons: dict=codons):\n self.sequence = sequence\n self.id = id\n self.codons = codons\n\n def __repr__(self):\n if not self.id:\n return f'Seq({self.sequence[:60]})'\n concat = ''\n if len(self) > 60:\n concat = '...'\n return f\"Seq({self.sequence[:60]}{concat}, id='{self.id}')\"\n\n def __str__(self):\n return self.sequence\n\n def __len__(self) ->int:\n return len(self.sequence)\n <mask token>\n\n def __eq__(self, other) ->bool:\n \"\"\"Compare the string representations of two Seq objects\"\"\"\n return str(self) == str(other)\n\n def __add__(self, other: Seq) ->Seq:\n \"\"\"Adding two sequence objects (i.e. Seq1 + Seq2) returns a new Seq object that is the\n concatenation of the two objects sequences. ID is taken from eh first object\"\"\"\n new_sequence = self.sequence + other.sequence\n return Seq(new_sequence, self.id)\n <mask token>\n\n def __getitem__(self, index):\n if type(index) == int:\n return Base(self.sequence[index])\n if type(index) == str:\n return self.find(index, overlapping=True)\n return Seq(self.sequence[index], self.id)\n\n def __setitem__(self, index, nt):\n self.sequence = self.sequence[:index] + nt + self.sequence[index + 1:]\n <mask token>\n <mask token>\n\n def __contains__(self, other):\n if str(other) in str(self):\n return True\n else:\n return False\n\n @property\n def gc(self) ->float:\n \"\"\"Return the GC content of the sequence\"\"\"\n g = self.count('G')\n c = self.count('C')\n return (g + c) / len(self) * 100\n\n @property\n def counts(self) ->dict:\n \"\"\"Return the counts of letters in the sequence\"\"\"\n return Counter(self.sequence)\n\n def to_fasta(self, line_length: int=60) ->str:\n formated_sequence = '\\n'.join([str(s) for s in self.kmers(\n line_length, line_length)])\n return f'>{self.id}\\n{formated_sequence}\\n'\n\n def kmers(self, n: int, step: int=1) ->Generator:\n \"\"\"Return a generator for kmers of length n\"\"\"\n return (Seq(self.sequence[i:i + n]) for i in range(0, len(self.\n sequence), step))\n <mask token>\n\n def substitute(self, old: str, new: str, count: int=-1):\n return Seq(self.sequence.replace(str(old), str(new), count), self.id)\n\n def find(self, target: str, count: int=-1, overlapping: bool=False):\n locs = []\n if overlapping and len(target) > 1:\n target = f'(?=({target}))'\n matches = finditer(target, self.sequence)\n for i, match in enumerate(matches, 1):\n locs.append(match.start())\n if i == count:\n break\n return locs\n\n def find_one(self, target: str) ->Optional[str]:\n loc = self.sequence.find(str(target))\n if loc == -1:\n return None\n return loc\n <mask token>\n\n def transcribe(self) ->Seq:\n return Seq(self.sequence.replace('T', 'U'), self.id)\n\n def reverse_transcribe(self) ->Seq:\n return Seq(self.sequence.replace('U', 'T'), self.id)\n\n def translate(self) ->Seq:\n \"\"\"\n Return the translated sequence.\n *Currently stop signals are ignored.*\n \"\"\"\n AA = ''.join(self.codons[self.sequence[i:i + 3]] for i in range(0,\n len(self.sequence), 3) if self.codons[self.sequence[i:i + 3]] !=\n 'Stop')\n return Seq(AA, self.id)\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Seq:\n <mask token>\n\n def __init__(self, sequence: str, id: str=None, codons: dict=codons):\n self.sequence = sequence\n self.id = id\n self.codons = codons\n\n def __repr__(self):\n if not self.id:\n return f'Seq({self.sequence[:60]})'\n concat = ''\n if len(self) > 60:\n concat = '...'\n return f\"Seq({self.sequence[:60]}{concat}, id='{self.id}')\"\n\n def __str__(self):\n return self.sequence\n\n def __len__(self) ->int:\n return len(self.sequence)\n\n def __invert__(self) ->Seq:\n \"\"\"Inverting a Seq object (i.e. ~Seq) will return the reverse complement of that sequence\"\"\"\n return self.reverse_complement()\n\n def __eq__(self, other) ->bool:\n \"\"\"Compare the string representations of two Seq objects\"\"\"\n return str(self) == str(other)\n\n def __add__(self, other: Seq) ->Seq:\n \"\"\"Adding two sequence objects (i.e. Seq1 + Seq2) returns a new Seq object that is the\n concatenation of the two objects sequences. ID is taken from eh first object\"\"\"\n new_sequence = self.sequence + other.sequence\n return Seq(new_sequence, self.id)\n\n def __sub__(self, other: Seq) ->int:\n \"\"\"Subtracting two Seq objects (i.e. seq1 - seq2) returns the hamming difference between them\"\"\"\n return sum(i != j for i, j in zip_longest(self.sequence, other.\n sequence))\n\n def __getitem__(self, index):\n if type(index) == int:\n return Base(self.sequence[index])\n if type(index) == str:\n return self.find(index, overlapping=True)\n return Seq(self.sequence[index], self.id)\n\n def __setitem__(self, index, nt):\n self.sequence = self.sequence[:index] + nt + self.sequence[index + 1:]\n <mask token>\n\n def __next__(self):\n if self.n < len(self):\n result = self[self.n]\n self.n += 1\n return result\n else:\n raise StopIteration\n\n def __contains__(self, other):\n if str(other) in str(self):\n return True\n else:\n return False\n\n @property\n def gc(self) ->float:\n \"\"\"Return the GC content of the sequence\"\"\"\n g = self.count('G')\n c = self.count('C')\n return (g + c) / len(self) * 100\n\n @property\n def counts(self) ->dict:\n \"\"\"Return the counts of letters in the sequence\"\"\"\n return Counter(self.sequence)\n\n def to_fasta(self, line_length: int=60) ->str:\n formated_sequence = '\\n'.join([str(s) for s in self.kmers(\n line_length, line_length)])\n return f'>{self.id}\\n{formated_sequence}\\n'\n\n def kmers(self, n: int, step: int=1) ->Generator:\n \"\"\"Return a generator for kmers of length n\"\"\"\n return (Seq(self.sequence[i:i + n]) for i in range(0, len(self.\n sequence), step))\n\n def count(self, string: str, max_diff: int=0) ->int:\n if max_diff == 0:\n return self.sequence.count(string)\n other = Seq(string)\n return sum(kmer - other <= max_diff for kmer in self.kmers(len(other)))\n\n def substitute(self, old: str, new: str, count: int=-1):\n return Seq(self.sequence.replace(str(old), str(new), count), self.id)\n\n def find(self, target: str, count: int=-1, overlapping: bool=False):\n locs = []\n if overlapping and len(target) > 1:\n target = f'(?=({target}))'\n matches = finditer(target, self.sequence)\n for i, match in enumerate(matches, 1):\n locs.append(match.start())\n if i == count:\n break\n return locs\n\n def find_one(self, target: str) ->Optional[str]:\n loc = self.sequence.find(str(target))\n if loc == -1:\n return None\n return loc\n <mask token>\n\n def transcribe(self) ->Seq:\n return Seq(self.sequence.replace('T', 'U'), self.id)\n\n def reverse_transcribe(self) ->Seq:\n return Seq(self.sequence.replace('U', 'T'), self.id)\n\n def translate(self) ->Seq:\n \"\"\"\n Return the translated sequence.\n *Currently stop signals are ignored.*\n \"\"\"\n AA = ''.join(self.codons[self.sequence[i:i + 3]] for i in range(0,\n len(self.sequence), 3) if self.codons[self.sequence[i:i + 3]] !=\n 'Stop')\n return Seq(AA, self.id)\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Base(str):\n \"\"\"Class for nucleotide bases\"\"\"\n pass\n\n\nclass Seq:\n \"\"\"Class for nucleotide sequences\"\"\"\n\n def __init__(self, sequence: str, id: str=None, codons: dict=codons):\n self.sequence = sequence\n self.id = id\n self.codons = codons\n\n def __repr__(self):\n if not self.id:\n return f'Seq({self.sequence[:60]})'\n concat = ''\n if len(self) > 60:\n concat = '...'\n return f\"Seq({self.sequence[:60]}{concat}, id='{self.id}')\"\n\n def __str__(self):\n return self.sequence\n\n def __len__(self) ->int:\n return len(self.sequence)\n\n def __invert__(self) ->Seq:\n \"\"\"Inverting a Seq object (i.e. ~Seq) will return the reverse complement of that sequence\"\"\"\n return self.reverse_complement()\n\n def __eq__(self, other) ->bool:\n \"\"\"Compare the string representations of two Seq objects\"\"\"\n return str(self) == str(other)\n\n def __add__(self, other: Seq) ->Seq:\n \"\"\"Adding two sequence objects (i.e. Seq1 + Seq2) returns a new Seq object that is the\n concatenation of the two objects sequences. ID is taken from eh first object\"\"\"\n new_sequence = self.sequence + other.sequence\n return Seq(new_sequence, self.id)\n\n def __sub__(self, other: Seq) ->int:\n \"\"\"Subtracting two Seq objects (i.e. seq1 - seq2) returns the hamming difference between them\"\"\"\n return sum(i != j for i, j in zip_longest(self.sequence, other.\n sequence))\n\n def __getitem__(self, index):\n if type(index) == int:\n return Base(self.sequence[index])\n if type(index) == str:\n return self.find(index, overlapping=True)\n return Seq(self.sequence[index], self.id)\n\n def __setitem__(self, index, nt):\n self.sequence = self.sequence[:index] + nt + self.sequence[index + 1:]\n\n def __iter__(self):\n self.n = 0\n return self\n\n def __next__(self):\n if self.n < len(self):\n result = self[self.n]\n self.n += 1\n return result\n else:\n raise StopIteration\n\n def __contains__(self, other):\n if str(other) in str(self):\n return True\n else:\n return False\n\n @property\n def gc(self) ->float:\n \"\"\"Return the GC content of the sequence\"\"\"\n g = self.count('G')\n c = self.count('C')\n return (g + c) / len(self) * 100\n\n @property\n def counts(self) ->dict:\n \"\"\"Return the counts of letters in the sequence\"\"\"\n return Counter(self.sequence)\n\n def to_fasta(self, line_length: int=60) ->str:\n formated_sequence = '\\n'.join([str(s) for s in self.kmers(\n line_length, line_length)])\n return f'>{self.id}\\n{formated_sequence}\\n'\n\n def kmers(self, n: int, step: int=1) ->Generator:\n \"\"\"Return a generator for kmers of length n\"\"\"\n return (Seq(self.sequence[i:i + n]) for i in range(0, len(self.\n sequence), step))\n\n def count(self, string: str, max_diff: int=0) ->int:\n if max_diff == 0:\n return self.sequence.count(string)\n other = Seq(string)\n return sum(kmer - other <= max_diff for kmer in self.kmers(len(other)))\n\n def substitute(self, old: str, new: str, count: int=-1):\n return Seq(self.sequence.replace(str(old), str(new), count), self.id)\n\n def find(self, target: str, count: int=-1, overlapping: bool=False):\n locs = []\n if overlapping and len(target) > 1:\n target = f'(?=({target}))'\n matches = finditer(target, self.sequence)\n for i, match in enumerate(matches, 1):\n locs.append(match.start())\n if i == count:\n break\n return locs\n\n def find_one(self, target: str) ->Optional[str]:\n loc = self.sequence.find(str(target))\n if loc == -1:\n return None\n return loc\n\n def reverse_complement(self, rna: bool=False) ->Seq:\n complements = {'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G'}\n if rna:\n complements = {'A': 'U', 'U': 'A', 'G': 'C', 'C': 'G'}\n revc = ''.join(complements[nt] for nt in reversed(self))\n return Seq(revc, self.id)\n\n def transcribe(self) ->Seq:\n return Seq(self.sequence.replace('T', 'U'), self.id)\n\n def reverse_transcribe(self) ->Seq:\n return Seq(self.sequence.replace('U', 'T'), self.id)\n\n def translate(self) ->Seq:\n \"\"\"\n Return the translated sequence.\n *Currently stop signals are ignored.*\n \"\"\"\n AA = ''.join(self.codons[self.sequence[i:i + 3]] for i in range(0,\n len(self.sequence), 3) if self.codons[self.sequence[i:i + 3]] !=\n 'Stop')\n return Seq(AA, self.id)\n\n def startswith(self, seq: str) ->bool:\n return self.sequence.startswith(str(seq))\n\n def endswith(self, seq: str) ->bool:\n return self.sequence.endswith(str(seq))\n", "step-4": "<mask token>\ncodon_table = \"\"\"UUU F CUU L AUU I GUU V\nUUC F CUC L AUC I GUC V\nUUA L CUA L AUA I GUA V\nUUG L CUG L AUG M GUG V\nUCU S CCU P ACU T GCU A\nUCC S CCC P ACC T GCC A\nUCA S CCA P ACA T GCA A\nUCG S CCG P ACG T GCG A\nUAU Y CAU H AAU N GAU D\nUAC Y CAC H AAC N GAC D\nUAA Stop CAA Q AAA K GAA E\nUAG Stop CAG Q AAG K GAG E\nUGU C CGU R AGU S GGU G\nUGC C CGC R AGC S GGC G\nUGA Stop CGA R AGA R GGA G\nUGG W CGG R AGG R GGG G\"\"\"\ncodons = dict(zip(codon_table.split()[::2], codon_table.split()[1::2]))\n\n\ndef consensus(*args):\n \"\"\"Return a consensus sequence from n Seq objects.\"\"\"\n counts = map(Counter, zip_longest(*args))\n consensus = ''\n for c in counts:\n del c[None]\n consensus += c.most_common(1)[0][0]\n return Seq(consensus, args[0].id)\n\n\nclass Base(str):\n \"\"\"Class for nucleotide bases\"\"\"\n pass\n\n\nclass Seq:\n \"\"\"Class for nucleotide sequences\"\"\"\n\n def __init__(self, sequence: str, id: str=None, codons: dict=codons):\n self.sequence = sequence\n self.id = id\n self.codons = codons\n\n def __repr__(self):\n if not self.id:\n return f'Seq({self.sequence[:60]})'\n concat = ''\n if len(self) > 60:\n concat = '...'\n return f\"Seq({self.sequence[:60]}{concat}, id='{self.id}')\"\n\n def __str__(self):\n return self.sequence\n\n def __len__(self) ->int:\n return len(self.sequence)\n\n def __invert__(self) ->Seq:\n \"\"\"Inverting a Seq object (i.e. ~Seq) will return the reverse complement of that sequence\"\"\"\n return self.reverse_complement()\n\n def __eq__(self, other) ->bool:\n \"\"\"Compare the string representations of two Seq objects\"\"\"\n return str(self) == str(other)\n\n def __add__(self, other: Seq) ->Seq:\n \"\"\"Adding two sequence objects (i.e. Seq1 + Seq2) returns a new Seq object that is the\n concatenation of the two objects sequences. ID is taken from eh first object\"\"\"\n new_sequence = self.sequence + other.sequence\n return Seq(new_sequence, self.id)\n\n def __sub__(self, other: Seq) ->int:\n \"\"\"Subtracting two Seq objects (i.e. seq1 - seq2) returns the hamming difference between them\"\"\"\n return sum(i != j for i, j in zip_longest(self.sequence, other.\n sequence))\n\n def __getitem__(self, index):\n if type(index) == int:\n return Base(self.sequence[index])\n if type(index) == str:\n return self.find(index, overlapping=True)\n return Seq(self.sequence[index], self.id)\n\n def __setitem__(self, index, nt):\n self.sequence = self.sequence[:index] + nt + self.sequence[index + 1:]\n\n def __iter__(self):\n self.n = 0\n return self\n\n def __next__(self):\n if self.n < len(self):\n result = self[self.n]\n self.n += 1\n return result\n else:\n raise StopIteration\n\n def __contains__(self, other):\n if str(other) in str(self):\n return True\n else:\n return False\n\n @property\n def gc(self) ->float:\n \"\"\"Return the GC content of the sequence\"\"\"\n g = self.count('G')\n c = self.count('C')\n return (g + c) / len(self) * 100\n\n @property\n def counts(self) ->dict:\n \"\"\"Return the counts of letters in the sequence\"\"\"\n return Counter(self.sequence)\n\n def to_fasta(self, line_length: int=60) ->str:\n formated_sequence = '\\n'.join([str(s) for s in self.kmers(\n line_length, line_length)])\n return f'>{self.id}\\n{formated_sequence}\\n'\n\n def kmers(self, n: int, step: int=1) ->Generator:\n \"\"\"Return a generator for kmers of length n\"\"\"\n return (Seq(self.sequence[i:i + n]) for i in range(0, len(self.\n sequence), step))\n\n def count(self, string: str, max_diff: int=0) ->int:\n if max_diff == 0:\n return self.sequence.count(string)\n other = Seq(string)\n return sum(kmer - other <= max_diff for kmer in self.kmers(len(other)))\n\n def substitute(self, old: str, new: str, count: int=-1):\n return Seq(self.sequence.replace(str(old), str(new), count), self.id)\n\n def find(self, target: str, count: int=-1, overlapping: bool=False):\n locs = []\n if overlapping and len(target) > 1:\n target = f'(?=({target}))'\n matches = finditer(target, self.sequence)\n for i, match in enumerate(matches, 1):\n locs.append(match.start())\n if i == count:\n break\n return locs\n\n def find_one(self, target: str) ->Optional[str]:\n loc = self.sequence.find(str(target))\n if loc == -1:\n return None\n return loc\n\n def reverse_complement(self, rna: bool=False) ->Seq:\n complements = {'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G'}\n if rna:\n complements = {'A': 'U', 'U': 'A', 'G': 'C', 'C': 'G'}\n revc = ''.join(complements[nt] for nt in reversed(self))\n return Seq(revc, self.id)\n\n def transcribe(self) ->Seq:\n return Seq(self.sequence.replace('T', 'U'), self.id)\n\n def reverse_transcribe(self) ->Seq:\n return Seq(self.sequence.replace('U', 'T'), self.id)\n\n def translate(self) ->Seq:\n \"\"\"\n Return the translated sequence.\n *Currently stop signals are ignored.*\n \"\"\"\n AA = ''.join(self.codons[self.sequence[i:i + 3]] for i in range(0,\n len(self.sequence), 3) if self.codons[self.sequence[i:i + 3]] !=\n 'Stop')\n return Seq(AA, self.id)\n\n def startswith(self, seq: str) ->bool:\n return self.sequence.startswith(str(seq))\n\n def endswith(self, seq: str) ->bool:\n return self.sequence.endswith(str(seq))\n", "step-5": "from __future__ import annotations\n\nfrom typing import Generator, Optional\nfrom collections import Counter\nfrom itertools import zip_longest\nfrom re import finditer\n\ncodon_table = \"\"\"UUU F CUU L AUU I GUU V\nUUC F CUC L AUC I GUC V\nUUA L CUA L AUA I GUA V\nUUG L CUG L AUG M GUG V\nUCU S CCU P ACU T GCU A\nUCC S CCC P ACC T GCC A\nUCA S CCA P ACA T GCA A\nUCG S CCG P ACG T GCG A\nUAU Y CAU H AAU N GAU D\nUAC Y CAC H AAC N GAC D\nUAA Stop CAA Q AAA K GAA E\nUAG Stop CAG Q AAG K GAG E\nUGU C CGU R AGU S GGU G\nUGC C CGC R AGC S GGC G\nUGA Stop CGA R AGA R GGA G\nUGG W CGG R AGG R GGG G\"\"\"\n\ncodons = dict(zip(codon_table.split()[::2], codon_table.split()[1::2]))\n\n\ndef consensus(*args):\n \"\"\"Return a consensus sequence from n Seq objects.\"\"\"\n counts = map(Counter, zip_longest(*args))\n consensus = \"\"\n for c in counts:\n del c[None]\n consensus += c.most_common(1)[0][0]\n return Seq(consensus, args[0].id)\n\n\nclass Base(str):\n \"\"\"Class for nucleotide bases\"\"\"\n\n pass\n\n\nclass Seq:\n \"\"\"Class for nucleotide sequences\"\"\"\n\n def __init__(self, sequence: str, id: str = None, codons: dict = codons):\n self.sequence = sequence\n self.id = id\n self.codons = codons\n\n def __repr__(self):\n if not self.id:\n return f\"Seq({self.sequence[:60]})\"\n concat = \"\"\n if len(self) > 60:\n concat = \"...\"\n return f\"Seq({self.sequence[:60]}{concat}, id='{self.id}')\"\n\n def __str__(self):\n return self.sequence\n\n def __len__(self) -> int:\n return len(self.sequence)\n\n def __invert__(self) -> Seq:\n \"\"\"Inverting a Seq object (i.e. ~Seq) will return the reverse complement of that sequence\"\"\"\n return self.reverse_complement()\n\n def __eq__(self, other) -> bool:\n \"\"\"Compare the string representations of two Seq objects\"\"\"\n return str(self) == str(other)\n\n def __add__(self, other: Seq) -> Seq:\n \"\"\"Adding two sequence objects (i.e. Seq1 + Seq2) returns a new Seq object that is the\n concatenation of the two objects sequences. ID is taken from eh first object\"\"\"\n new_sequence = self.sequence + other.sequence\n return Seq(new_sequence, self.id)\n\n def __sub__(self, other: Seq) -> int:\n \"\"\"Subtracting two Seq objects (i.e. seq1 - seq2) returns the hamming difference between them\"\"\"\n return sum(i != j for i, j in zip_longest(self.sequence, other.sequence))\n\n def __getitem__(self, index):\n if type(index) == int:\n return Base(self.sequence[index])\n if type(index) == str:\n return self.find(index, overlapping=True)\n return Seq(self.sequence[index], self.id)\n\n def __setitem__(self, index, nt):\n self.sequence = self.sequence[:index] + nt + self.sequence[index + 1 :]\n\n def __iter__(self):\n self.n = 0\n return self\n\n def __next__(self):\n if self.n < len(self):\n result = self[self.n]\n self.n += 1\n return result\n else:\n raise StopIteration\n\n def __contains__(self, other):\n if str(other) in str(self):\n return True\n else:\n return False\n\n @property\n def gc(self) -> float:\n \"\"\"Return the GC content of the sequence\"\"\"\n g = self.count(\"G\")\n c = self.count(\"C\")\n return (g + c) / len(self) * 100\n\n @property\n def counts(self) -> dict:\n \"\"\"Return the counts of letters in the sequence\"\"\"\n return Counter(self.sequence)\n\n def to_fasta(self, line_length: int = 60) -> str:\n formated_sequence = \"\\n\".join(\n [str(s) for s in self.kmers(line_length, line_length)]\n )\n return f\">{self.id}\\n{formated_sequence}\\n\"\n\n def kmers(self, n: int, step: int = 1) -> Generator:\n \"\"\"Return a generator for kmers of length n\"\"\"\n return (\n Seq(self.sequence[i : i + n]) for i in range(0, len(self.sequence), step)\n )\n\n def count(self, string: str, max_diff: int = 0) -> int:\n if max_diff == 0:\n return self.sequence.count(string)\n other = Seq(string)\n return sum((kmer - other) <= max_diff for kmer in self.kmers(len(other)))\n\n def substitute(self, old: str, new: str, count: int = -1):\n return Seq(self.sequence.replace(str(old), str(new), count), self.id)\n\n def find(self, target: str, count: int = -1, overlapping: bool = False):\n locs = []\n if overlapping and len(target) > 1:\n target = f\"(?=({target}))\"\n matches = finditer(target, self.sequence)\n for i, match in enumerate(matches, 1):\n locs.append(match.start())\n if i == count:\n break\n return locs\n\n def find_one(self, target: str) -> Optional[str]:\n loc = self.sequence.find(str(target))\n if loc == -1:\n return None\n return loc\n\n def reverse_complement(self, rna: bool = False) -> Seq:\n complements = {\"A\": \"T\", \"T\": \"A\", \"G\": \"C\", \"C\": \"G\"}\n if rna:\n complements = {\"A\": \"U\", \"U\": \"A\", \"G\": \"C\", \"C\": \"G\"}\n revc = \"\".join(complements[nt] for nt in reversed(self))\n return Seq(revc, self.id)\n\n def transcribe(self) -> Seq:\n return Seq(self.sequence.replace(\"T\", \"U\"), self.id)\n\n def reverse_transcribe(self) -> Seq:\n return Seq(self.sequence.replace(\"U\", \"T\"), self.id)\n\n def translate(self) -> Seq:\n \"\"\"\n Return the translated sequence.\n *Currently stop signals are ignored.*\n \"\"\"\n AA = \"\".join(\n self.codons[self.sequence[i : i + 3]]\n for i in range(0, len(self.sequence), 3)\n if self.codons[self.sequence[i : i + 3]] != \"Stop\"\n )\n return Seq(AA, self.id)\n\n def startswith(self, seq: str) -> bool:\n return self.sequence.startswith(str(seq))\n\n def endswith(self, seq: str) -> bool:\n return self.sequence.endswith(str(seq))\n", "step-ids": [ 20, 24, 31, 33, 35 ] }
[ 20, 24, 31, 33, 35 ]
# 체크는 오른쪽+아래로만 체크합니다. def check22(y, x, board) : dirs = [[0,1], [1,0], [1,1]] ret = [(y,x)] for d in dirs : dy, dx = y+d[0], x+d[1] if not ( (0<=dy<len(board)) and (0<=dx<len(board[0])) and board[dy][dx]!='0' and board[y][x]==board[dy][dx] ) : return False else : ret.append((dy,dx)) return ret # 나중에 한 번에 삭제될 거임 def dropdown(board) : for x in range(len(board[0])) : cnt = 0 movable = False for y in range(len(board)-1, -1, -1) : # if y == len(board)-1 : # if board[y][x] == '0' : break if board[y][x] == '0' : cnt += 1 movable = True if board[y][x] != '0' and movable : # 위에 떠있는 블록임. cnt만큼 내리면 됨 board[y+cnt][x] = board[y][x] board[y][x] = '0' return board def deleteBoard(delete, board) : for delNode in delete : board[delNode[0]][delNode[1]] = '0' return board def solution(m, n, board): answer = 0 for i in range(len(board)) : board[i] = list(board[i]) while True : delete = set([]) for y in range(len(board)) : for x in range(len(board[0])) : tmp = check22(y, x, board) if tmp : delete |= set(tmp) delete = list(delete) if not delete : break answer += len(delete) board = deleteBoard(delete, board) # print(board) board = dropdown(board) # print(board) return answer
normal
{ "blob_id": "938c4325480608b904bfbe0b11c081166aad694b", "index": 7291, "step-1": "def check22(y, x, board):\n dirs = [[0, 1], [1, 0], [1, 1]]\n ret = [(y, x)]\n for d in dirs:\n dy, dx = y + d[0], x + d[1]\n if not (0 <= dy < len(board) and 0 <= dx < len(board[0]) and board[\n dy][dx] != '0' and board[y][x] == board[dy][dx]):\n return False\n else:\n ret.append((dy, dx))\n return ret\n\n\n<mask token>\n", "step-2": "def check22(y, x, board):\n dirs = [[0, 1], [1, 0], [1, 1]]\n ret = [(y, x)]\n for d in dirs:\n dy, dx = y + d[0], x + d[1]\n if not (0 <= dy < len(board) and 0 <= dx < len(board[0]) and board[\n dy][dx] != '0' and board[y][x] == board[dy][dx]):\n return False\n else:\n ret.append((dy, dx))\n return ret\n\n\n<mask token>\n\n\ndef deleteBoard(delete, board):\n for delNode in delete:\n board[delNode[0]][delNode[1]] = '0'\n return board\n\n\n<mask token>\n", "step-3": "def check22(y, x, board):\n dirs = [[0, 1], [1, 0], [1, 1]]\n ret = [(y, x)]\n for d in dirs:\n dy, dx = y + d[0], x + d[1]\n if not (0 <= dy < len(board) and 0 <= dx < len(board[0]) and board[\n dy][dx] != '0' and board[y][x] == board[dy][dx]):\n return False\n else:\n ret.append((dy, dx))\n return ret\n\n\n<mask token>\n\n\ndef deleteBoard(delete, board):\n for delNode in delete:\n board[delNode[0]][delNode[1]] = '0'\n return board\n\n\ndef solution(m, n, board):\n answer = 0\n for i in range(len(board)):\n board[i] = list(board[i])\n while True:\n delete = set([])\n for y in range(len(board)):\n for x in range(len(board[0])):\n tmp = check22(y, x, board)\n if tmp:\n delete |= set(tmp)\n delete = list(delete)\n if not delete:\n break\n answer += len(delete)\n board = deleteBoard(delete, board)\n board = dropdown(board)\n return answer\n", "step-4": "def check22(y, x, board):\n dirs = [[0, 1], [1, 0], [1, 1]]\n ret = [(y, x)]\n for d in dirs:\n dy, dx = y + d[0], x + d[1]\n if not (0 <= dy < len(board) and 0 <= dx < len(board[0]) and board[\n dy][dx] != '0' and board[y][x] == board[dy][dx]):\n return False\n else:\n ret.append((dy, dx))\n return ret\n\n\ndef dropdown(board):\n for x in range(len(board[0])):\n cnt = 0\n movable = False\n for y in range(len(board) - 1, -1, -1):\n if board[y][x] == '0':\n cnt += 1\n movable = True\n if board[y][x] != '0' and movable:\n board[y + cnt][x] = board[y][x]\n board[y][x] = '0'\n return board\n\n\ndef deleteBoard(delete, board):\n for delNode in delete:\n board[delNode[0]][delNode[1]] = '0'\n return board\n\n\ndef solution(m, n, board):\n answer = 0\n for i in range(len(board)):\n board[i] = list(board[i])\n while True:\n delete = set([])\n for y in range(len(board)):\n for x in range(len(board[0])):\n tmp = check22(y, x, board)\n if tmp:\n delete |= set(tmp)\n delete = list(delete)\n if not delete:\n break\n answer += len(delete)\n board = deleteBoard(delete, board)\n board = dropdown(board)\n return answer\n", "step-5": "# 체크는 오른쪽+아래로만 체크합니다.\ndef check22(y, x, board) : \n \n dirs = [[0,1], [1,0], [1,1]]\n \n ret = [(y,x)]\n for d in dirs :\n dy, dx = y+d[0], x+d[1]\n if not ( (0<=dy<len(board)) and (0<=dx<len(board[0])) and board[dy][dx]!='0' and board[y][x]==board[dy][dx] ) :\n return False\n else :\n ret.append((dy,dx))\n\n return ret # 나중에 한 번에 삭제될 거임\n\ndef dropdown(board) :\n \n for x in range(len(board[0])) :\n cnt = 0\n movable = False\n for y in range(len(board)-1, -1, -1) :\n # if y == len(board)-1 :\n # if board[y][x] == '0' : break\n if board[y][x] == '0' :\n cnt += 1\n movable = True\n if board[y][x] != '0' and movable :\n # 위에 떠있는 블록임. cnt만큼 내리면 됨\n board[y+cnt][x] = board[y][x]\n board[y][x] = '0'\n \n return board\n \ndef deleteBoard(delete, board) :\n \n for delNode in delete :\n board[delNode[0]][delNode[1]] = '0'\n \n return board\n\ndef solution(m, n, board):\n answer = 0\n \n for i in range(len(board)) :\n board[i] = list(board[i])\n \n \n while True :\n \n delete = set([])\n \n for y in range(len(board)) :\n for x in range(len(board[0])) :\n tmp = check22(y, x, board)\n if tmp :\n delete |= set(tmp)\n \n delete = list(delete)\n if not delete : break\n \n answer += len(delete)\n \n board = deleteBoard(delete, board)\n # print(board)\n board = dropdown(board)\n # print(board)\n \n return answer\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
from Monument import Monument, Dataset import importer_utils as utils import importer as importer class RoRo(Monument): def set_adm_location(self): counties = self.data_files["counties"] self.set_from_dict_match(counties, "iso_code", "judetul_iso", "located_adm") def set_location(self): """ Set Location property from article linked in localitate. Run this after set_adm_location. localitate can contain several links (we take the 1st which seems to be the most granular one) and a mix of administrative types. Compare with admin location so that they're not the same. """ if self.has_non_empty_attribute("localitate"): loc_item = None if utils.count_wikilinks(self.localitate) > 0: loc_link = utils.get_wikilinks(self.localitate)[0] loc_item = utils.q_from_wikipedia("ro", loc_link.title) adm_item = self.get_statement_values("located_adm") if loc_item and loc_item != adm_item[0]: self.add_statement("location", loc_item) if not loc_item: self.add_to_report("localitate", self.localitate, "location") def set_heritage_id(self): self.add_statement("romanian_monument_id", self.cod) def update_descriptions(self): adm_code = self.judetul_iso counties = self.data_files["counties"] county_item = utils.get_item_from_dict_by_key(dict_name=counties, search_term=adm_code, return_content_of="itemLabel", search_in="iso_code") if len(county_item) == 1: place_name = "{}, Romania".format(county_item[0]) else: place_name = "Romania" desc = "heritage site in {}".format(place_name) self.add_description("en", desc) self.add_disambiguator(str(self.cod)) def set_address(self): street_patterns = ("piața", "str.", "bd.") if self.has_non_empty_attribute("adresa"): adr_lower = self.adresa.lower() adr_nice = utils.remove_markup(self.adresa) if any(pattern in adr_lower for pattern in street_patterns): if self.has_non_empty_attribute("localitate"): town = utils.remove_markup(self.localitate) adr_nice = "{}, {}".format(adr_nice, town) self.add_statement("located_street", adr_nice) else: directions = utils.package_monolingual(adr_nice, 'ro') self.add_statement("directions", directions) def update_labels(self): romanian = utils.remove_markup(self.denumire) self.add_label("ro", romanian) def __init__(self, db_row_dict, mapping, data_files, existing, repository): Monument.__init__(self, db_row_dict, mapping, data_files, existing, repository) self.set_monuments_all_id("cod") self.set_changed() self.set_wlm_source() self.set_heritage_id() self.set_heritage() self.set_country() self.set_adm_location() self.set_address() self.set_location() self.set_coords() self.set_commonscat() self.set_image("imagine") self.update_labels() self.update_descriptions() self.set_wd_item(self.find_matching_wikidata(mapping)) if __name__ == "__main__": """Command line entry point for importer.""" args = importer.handle_args() dataset = Dataset("ro", "ro", RoRo) dataset.data_files = {"counties": "romania_counties.json"} importer.main(args, dataset)
normal
{ "blob_id": "5f8a9d82a3245671b438475d1fac7be4db769fbe", "index": 8493, "step-1": "<mask token>\n\n\nclass RoRo(Monument):\n\n def set_adm_location(self):\n counties = self.data_files['counties']\n self.set_from_dict_match(counties, 'iso_code', 'judetul_iso',\n 'located_adm')\n <mask token>\n\n def set_heritage_id(self):\n self.add_statement('romanian_monument_id', self.cod)\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, db_row_dict, mapping, data_files, existing, repository):\n Monument.__init__(self, db_row_dict, mapping, data_files, existing,\n repository)\n self.set_monuments_all_id('cod')\n self.set_changed()\n self.set_wlm_source()\n self.set_heritage_id()\n self.set_heritage()\n self.set_country()\n self.set_adm_location()\n self.set_address()\n self.set_location()\n self.set_coords()\n self.set_commonscat()\n self.set_image('imagine')\n self.update_labels()\n self.update_descriptions()\n self.set_wd_item(self.find_matching_wikidata(mapping))\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass RoRo(Monument):\n\n def set_adm_location(self):\n counties = self.data_files['counties']\n self.set_from_dict_match(counties, 'iso_code', 'judetul_iso',\n 'located_adm')\n <mask token>\n\n def set_heritage_id(self):\n self.add_statement('romanian_monument_id', self.cod)\n\n def update_descriptions(self):\n adm_code = self.judetul_iso\n counties = self.data_files['counties']\n county_item = utils.get_item_from_dict_by_key(dict_name=counties,\n search_term=adm_code, return_content_of='itemLabel', search_in=\n 'iso_code')\n if len(county_item) == 1:\n place_name = '{}, Romania'.format(county_item[0])\n else:\n place_name = 'Romania'\n desc = 'heritage site in {}'.format(place_name)\n self.add_description('en', desc)\n self.add_disambiguator(str(self.cod))\n <mask token>\n <mask token>\n\n def __init__(self, db_row_dict, mapping, data_files, existing, repository):\n Monument.__init__(self, db_row_dict, mapping, data_files, existing,\n repository)\n self.set_monuments_all_id('cod')\n self.set_changed()\n self.set_wlm_source()\n self.set_heritage_id()\n self.set_heritage()\n self.set_country()\n self.set_adm_location()\n self.set_address()\n self.set_location()\n self.set_coords()\n self.set_commonscat()\n self.set_image('imagine')\n self.update_labels()\n self.update_descriptions()\n self.set_wd_item(self.find_matching_wikidata(mapping))\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass RoRo(Monument):\n\n def set_adm_location(self):\n counties = self.data_files['counties']\n self.set_from_dict_match(counties, 'iso_code', 'judetul_iso',\n 'located_adm')\n\n def set_location(self):\n \"\"\"\n Set Location property from article linked in localitate.\n\n Run this after set_adm_location. localitate can\n contain several links (we take the 1st which seems to\n be the most granular one) and a mix of administrative\n types. Compare with admin location so that they're not\n the same.\n \"\"\"\n if self.has_non_empty_attribute('localitate'):\n loc_item = None\n if utils.count_wikilinks(self.localitate) > 0:\n loc_link = utils.get_wikilinks(self.localitate)[0]\n loc_item = utils.q_from_wikipedia('ro', loc_link.title)\n adm_item = self.get_statement_values('located_adm')\n if loc_item and loc_item != adm_item[0]:\n self.add_statement('location', loc_item)\n if not loc_item:\n self.add_to_report('localitate', self.localitate, 'location')\n\n def set_heritage_id(self):\n self.add_statement('romanian_monument_id', self.cod)\n\n def update_descriptions(self):\n adm_code = self.judetul_iso\n counties = self.data_files['counties']\n county_item = utils.get_item_from_dict_by_key(dict_name=counties,\n search_term=adm_code, return_content_of='itemLabel', search_in=\n 'iso_code')\n if len(county_item) == 1:\n place_name = '{}, Romania'.format(county_item[0])\n else:\n place_name = 'Romania'\n desc = 'heritage site in {}'.format(place_name)\n self.add_description('en', desc)\n self.add_disambiguator(str(self.cod))\n\n def set_address(self):\n street_patterns = 'piața', 'str.', 'bd.'\n if self.has_non_empty_attribute('adresa'):\n adr_lower = self.adresa.lower()\n adr_nice = utils.remove_markup(self.adresa)\n if any(pattern in adr_lower for pattern in street_patterns):\n if self.has_non_empty_attribute('localitate'):\n town = utils.remove_markup(self.localitate)\n adr_nice = '{}, {}'.format(adr_nice, town)\n self.add_statement('located_street', adr_nice)\n else:\n directions = utils.package_monolingual(adr_nice, 'ro')\n self.add_statement('directions', directions)\n\n def update_labels(self):\n romanian = utils.remove_markup(self.denumire)\n self.add_label('ro', romanian)\n\n def __init__(self, db_row_dict, mapping, data_files, existing, repository):\n Monument.__init__(self, db_row_dict, mapping, data_files, existing,\n repository)\n self.set_monuments_all_id('cod')\n self.set_changed()\n self.set_wlm_source()\n self.set_heritage_id()\n self.set_heritage()\n self.set_country()\n self.set_adm_location()\n self.set_address()\n self.set_location()\n self.set_coords()\n self.set_commonscat()\n self.set_image('imagine')\n self.update_labels()\n self.update_descriptions()\n self.set_wd_item(self.find_matching_wikidata(mapping))\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass RoRo(Monument):\n\n def set_adm_location(self):\n counties = self.data_files['counties']\n self.set_from_dict_match(counties, 'iso_code', 'judetul_iso',\n 'located_adm')\n\n def set_location(self):\n \"\"\"\n Set Location property from article linked in localitate.\n\n Run this after set_adm_location. localitate can\n contain several links (we take the 1st which seems to\n be the most granular one) and a mix of administrative\n types. Compare with admin location so that they're not\n the same.\n \"\"\"\n if self.has_non_empty_attribute('localitate'):\n loc_item = None\n if utils.count_wikilinks(self.localitate) > 0:\n loc_link = utils.get_wikilinks(self.localitate)[0]\n loc_item = utils.q_from_wikipedia('ro', loc_link.title)\n adm_item = self.get_statement_values('located_adm')\n if loc_item and loc_item != adm_item[0]:\n self.add_statement('location', loc_item)\n if not loc_item:\n self.add_to_report('localitate', self.localitate, 'location')\n\n def set_heritage_id(self):\n self.add_statement('romanian_monument_id', self.cod)\n\n def update_descriptions(self):\n adm_code = self.judetul_iso\n counties = self.data_files['counties']\n county_item = utils.get_item_from_dict_by_key(dict_name=counties,\n search_term=adm_code, return_content_of='itemLabel', search_in=\n 'iso_code')\n if len(county_item) == 1:\n place_name = '{}, Romania'.format(county_item[0])\n else:\n place_name = 'Romania'\n desc = 'heritage site in {}'.format(place_name)\n self.add_description('en', desc)\n self.add_disambiguator(str(self.cod))\n\n def set_address(self):\n street_patterns = 'piața', 'str.', 'bd.'\n if self.has_non_empty_attribute('adresa'):\n adr_lower = self.adresa.lower()\n adr_nice = utils.remove_markup(self.adresa)\n if any(pattern in adr_lower for pattern in street_patterns):\n if self.has_non_empty_attribute('localitate'):\n town = utils.remove_markup(self.localitate)\n adr_nice = '{}, {}'.format(adr_nice, town)\n self.add_statement('located_street', adr_nice)\n else:\n directions = utils.package_monolingual(adr_nice, 'ro')\n self.add_statement('directions', directions)\n\n def update_labels(self):\n romanian = utils.remove_markup(self.denumire)\n self.add_label('ro', romanian)\n\n def __init__(self, db_row_dict, mapping, data_files, existing, repository):\n Monument.__init__(self, db_row_dict, mapping, data_files, existing,\n repository)\n self.set_monuments_all_id('cod')\n self.set_changed()\n self.set_wlm_source()\n self.set_heritage_id()\n self.set_heritage()\n self.set_country()\n self.set_adm_location()\n self.set_address()\n self.set_location()\n self.set_coords()\n self.set_commonscat()\n self.set_image('imagine')\n self.update_labels()\n self.update_descriptions()\n self.set_wd_item(self.find_matching_wikidata(mapping))\n\n\nif __name__ == '__main__':\n \"\"\"Command line entry point for importer.\"\"\"\n args = importer.handle_args()\n dataset = Dataset('ro', 'ro', RoRo)\n dataset.data_files = {'counties': 'romania_counties.json'}\n importer.main(args, dataset)\n", "step-5": "from Monument import Monument, Dataset\nimport importer_utils as utils\nimport importer as importer\n\n\nclass RoRo(Monument):\n\n def set_adm_location(self):\n counties = self.data_files[\"counties\"]\n self.set_from_dict_match(counties, \"iso_code\",\n \"judetul_iso\", \"located_adm\")\n\n def set_location(self):\n \"\"\"\n Set Location property from article linked in localitate.\n\n Run this after set_adm_location. localitate can\n contain several links (we take the 1st which seems to\n be the most granular one) and a mix of administrative\n types. Compare with admin location so that they're not\n the same.\n \"\"\"\n if self.has_non_empty_attribute(\"localitate\"):\n loc_item = None\n if utils.count_wikilinks(self.localitate) > 0:\n loc_link = utils.get_wikilinks(self.localitate)[0]\n loc_item = utils.q_from_wikipedia(\"ro\", loc_link.title)\n adm_item = self.get_statement_values(\"located_adm\")\n if loc_item and loc_item != adm_item[0]:\n self.add_statement(\"location\", loc_item)\n\n if not loc_item:\n self.add_to_report(\"localitate\", self.localitate, \"location\")\n\n def set_heritage_id(self):\n self.add_statement(\"romanian_monument_id\", self.cod)\n\n def update_descriptions(self):\n adm_code = self.judetul_iso\n counties = self.data_files[\"counties\"]\n county_item = utils.get_item_from_dict_by_key(dict_name=counties,\n search_term=adm_code,\n return_content_of=\"itemLabel\",\n search_in=\"iso_code\")\n if len(county_item) == 1:\n place_name = \"{}, Romania\".format(county_item[0])\n else:\n place_name = \"Romania\"\n desc = \"heritage site in {}\".format(place_name)\n self.add_description(\"en\", desc)\n self.add_disambiguator(str(self.cod))\n\n def set_address(self):\n street_patterns = (\"piața\", \"str.\", \"bd.\")\n if self.has_non_empty_attribute(\"adresa\"):\n adr_lower = self.adresa.lower()\n adr_nice = utils.remove_markup(self.adresa)\n if any(pattern in adr_lower for pattern in street_patterns):\n if self.has_non_empty_attribute(\"localitate\"):\n town = utils.remove_markup(self.localitate)\n adr_nice = \"{}, {}\".format(adr_nice, town)\n self.add_statement(\"located_street\", adr_nice)\n else:\n directions = utils.package_monolingual(adr_nice, 'ro')\n self.add_statement(\"directions\", directions)\n\n def update_labels(self):\n romanian = utils.remove_markup(self.denumire)\n self.add_label(\"ro\", romanian)\n\n def __init__(self, db_row_dict, mapping, data_files, existing, repository):\n Monument.__init__(self, db_row_dict, mapping,\n data_files, existing, repository)\n self.set_monuments_all_id(\"cod\")\n self.set_changed()\n self.set_wlm_source()\n self.set_heritage_id()\n self.set_heritage()\n self.set_country()\n self.set_adm_location()\n self.set_address()\n self.set_location()\n self.set_coords()\n self.set_commonscat()\n self.set_image(\"imagine\")\n self.update_labels()\n self.update_descriptions()\n self.set_wd_item(self.find_matching_wikidata(mapping))\n\n\nif __name__ == \"__main__\":\n \"\"\"Command line entry point for importer.\"\"\"\n args = importer.handle_args()\n dataset = Dataset(\"ro\", \"ro\", RoRo)\n dataset.data_files = {\"counties\": \"romania_counties.json\"}\n importer.main(args, dataset)\n", "step-ids": [ 4, 5, 8, 9, 11 ] }
[ 4, 5, 8, 9, 11 ]
""" Simulator contains the tools needed to set up a multilayer antireflection coating simulation. Based on transfer matrix method outlined in Hou, H.S. 1974. """ # Author: Andrew Nadolski (with lots of help from previous work by Colin Merkel, # Steve Byrnes, and Aritoki Suzuki) # Filename: simulator.py import glob import os import pprint import time import materials as mats import numpy as np import scipy as sp class Layer: """A layer in the AR coating. Attributes ---------- name : string The name of the material comprising the layer. Default is 'Generic layer' thickness : float The thickness of the layer material. Default is 5 mil. type : string The type of layer. Default is `Layer`, which is an element of the AR coating. Other acceptable types are `Source` and `Terminator`. dielectric : float The dielectric constant of the layer material. Default is 1. losstangent : float The loss tangent of the material. Default is 0. """ def __init__(self): self.name = 'Generic layer' self.thickness = 5. self.type = 'Layer' self.units = 'mil' self.dielectric = 1. self.losstangent = 0. def __repr__(self): """Return a nice string formatted representation of the layer.""" return '{} (AR layer)'.format(self.name) def display_layer_parameters(self): """Display the attributes of the layer.""" pprint.pprint(vars(self)) return def get_index(self): """Return the refractive index of the layer.""" return (np.sqrt(self.dielectric)) def ideal_thickness(self, opt_freq=160e9): """Return the ideal quarter wavelength thickness of the AR coating layer at a given optimization frequency. Arguments --------- opt_freq : float, optional The optimization frequency (in Hz) for the layers thickness. Defaults to 160 GHz. """ return (1/np.sqrt(self.dielectric)*3e8/(4*opt_freq)) class SourceLayer(Layer): """A special case of ``Layer``; represents the layer from which the simulated wave emanates. Attributes ---------- thickness : float The thickness of the source layer. Defaults to ``numpy.inf`` since the model doesn't care about the thickness of source layer. The thickness of the source layer should not be changed under normal operations. type : string The type of layer. Default is `Source`, which is an element of the model, but not the coating. Other acceptable types are `Layer` and `Terminator`. """ def __init__(self): Layer.__init__(self) self.thickness = np.inf self.type = 'Source' def __repr__(self): """Return a nice string formatted representation of the layer.""" return '{} (source layer)'.format(self.name) class SubstrateLayer(Layer): """A special case of ``Layer``; represents the layer to which the AR coating is attached. Attributes ---------- thickness : float The thickness of the substrate layer. Defaults to 250 mils, which is the typical thickness of a sample puck used in the Berkeley FTS setup. This may be changed as is necessary, but the units must (eventually) be converted to meters before being fed to the simulator. type : string The type of layer """ def __init__(self): Layer.__init__(self) self.thickness = 250. self.type = 'Substrate' def __repr__(self): return '{} (substrate)'.format(self.name) class TerminatorLayer(Layer): """A special case of ``Layer``; represents the layer upon which the simulated wave terminates. Attributes ---------- thickness : float The thickness of the terminating layer. Defaults to ``numpy.inf`` since the model doesn't care about the thickness of the terminating layer. The thickness of the terminating layer should not be changed under normal operations. type : string The type of layer. Default is `Terminator`, which is an element of the model, but not the coating. Other acceptable types are `Source` and `Layer`. """ def __init__(self): Layer.__init__(self) self.thickness = np.inf self.type = 'Terminator' def __repr__(self): """Return a nice string formatted representation of the layer.""" return '{} (terminator layer)'.format(self.name) class Builder: """The main body of the simulator code. Attributes ---------- bands : list A list of n tuples, with each tuple composed of a lower and upper limit for a frequency band in units of hertz. Default is the SPT-3G bands. freq_sweep : array The range of frequencies to be simulated. Defaults to 0. Set a frequency sweep by calling ``set_freq_sweep()``. optimization_frequency : float The frequency (in Hz) at which to calculate the ideal thickness for a given material. Defaults to 160e9 Hz (160 GHz). save_name : string The name under which the results of the simulation are saved. Defaults to 'transmission_data_XXXXX.txt' where `XXXXX` is a time-stamp to avoid overwriting previous simulation results. save_path : string The path to which the simulation results will be saved. Defaults to the current working directory. source : object ``Layer`` object ``SourceLayer`` that defines where the wave emanates from. Default is `None`. stack : list The user-defined layers incorporated in the simulation EXCEPT the source and terminator layers. Default is empty list. structure : list The layers incorporated in the simulation INCLUDING the source and terminator layers. Default is empty list. The list is populated by creating layers and calling ``_interconnect()``. terminator : object ``Layer`` object ``TerminatorLayer`` that defines where the wave terminates. Defaults is `None`. """ def __init__(self): self.bands = [(81.7e9, 107.5e9),(128.6e9, 167.2e9),(196.9e9, 249.2e9)] self.freq_sweep = 0. self.log_name = 'log_simulation_{t}.txt'.format(t=time.ctime(time.time())) self.optimization_frequency = 160e9 # given in Hz, i.e. 160 GHz self.save_name = 'transmission_data_{t}.txt'.format(t=time.ctime(time.time())) self.save_path = '.' self.source = None self.stack = [] self.structure = [] self.terminator = None def _calc_R_T_amp(self, polarization, n, delta): """Calculate the reflected and transmitted amplitudes Arguments --------- polarization : string The polarization of the source wave. Must be one of: 's', 'p', or 'u'. n : array An array of refractive indices, ordered from source to terminator delta : array An array of wavevector offsets Returns ------- (r, t) : tuple A tuple where 'r' is the reflected amplitude, and 't' is the transmitted amplitude """ t_amp = np.zeros((len(self.structure), len(self.structure)), dtype=complex) r_amp = np.zeros((len(self.structure), len(self.structure)), dtype=complex) # # debugging statement # print("\nr_amp is:") # for i in range(len(self.structure)): # for j in range(len(self.structure)): # print("{}{} {}".format(i,j,r_amp[i][j])) # # debugging statement # print("\nt_amp is:") # for i in range(len(self.structure)): # for j in range(len(self.structure)): # print("{}{} {}".format(i,j,t_amp[i][j])) for i in range(len(self.structure)-1): t_amp[i,i+1] = self._t_at_interface(polarization, n[i], n[i+1]) r_amp[i,i+1] = self._r_at_interface(polarization, n[i], n[i+1]) # # debugging statement # print("\nmod r_amp is:") # for i in range(len(self.structure)): # for j in range(len(self.structure)): # print("{}{} {}".format(i,j,r_amp[i][j])) # # debugging statement # print("\nmod t_amp is:") # for i in range(len(self.structure)): # for j in range(len(self.structure)): # print("{}{} {}".format(i,j,t_amp[i][j])) M = np.zeros((len(self.structure),2,2),dtype=complex) # # debugging statement # print("\nThe 'M' matrix is:") # for i in range(len(self.structure)): # for j in range(2): # for k in range(2): # print("M{}{}{} ---> {}".format(i,j,k,M[i][j][k])) m_r_amp = np.zeros((len(self.structure),2,2), dtype=complex) m_t_amp = np.zeros((len(self.structure),2,2), dtype=complex) for i in range(1,len(self.structure)-1): m_t_amp[i] = self._make_2x2(np.exp(-1j*delta[i]), 0., 0., np.exp(1j*delta[i]), dtype=complex) m_r_amp[i] = self._make_2x2(1., r_amp[i,i+1], r_amp[i,i+1], 1., dtype=complex) # # debugging statement # print("\nThe temporary 'm_r_amp' matrix is:") # for i in range(len(self.structure)): # for j in range(2): # for k in range(2): # print("m_r_amp{}{}{} ---> {}".format(i,j,k,m_r_amp[i][j][k])) # # debugging statement # print("\nThe temporary 'm_t_amp' matrix is:") # for i in range(len(self.structure)): # for j in range(2): # for k in range(2): # print("m_t_amp{}{}{} ---> {}".format(i,j,k,m_t_amp[i][j][k])) m_temp = np.dot(m_t_amp, m_r_amp) # # debugging statement # print("\nThe 'm_temp' matrix is:") # for i in m_temp: # print i # for i in range(len(self.structure)): # for j in range(2): # for k in range(2): # print("m_temp{}{}{} ---> {}".format(i,j,k,m_temp[i][j][k])) for i in range(1,len(self.structure)-1): M[i] = 1/t_amp[i,i+1] * np.dot(self._make_2x2(np.exp(-1j*delta[i]), 0., 0., np.exp(1j*delta[i]), dtype=complex), self._make_2x2(1., r_amp[i,i+1], \ r_amp[i,i+1], 1., \ dtype=complex)) # # debugging statement # print("\nThe modified 'M' matrix is:") # for i in range(len(self.structure)): # for j in range(2): # for k in range(2): # print("mod M{}{}{} ---> {}".format(i,j,k,M[i][j][k])) M_prime = self._make_2x2(1., 0., 0., 1., dtype=complex) # # debugging statement # print("\nThe first modified 'M_prime' matrix is:") # for i in range(2): # for j in range(2): # print("1st mod M_prime{}{} ---> {}".format(i,j,M_prime[i][j])) for i in range(1, len(self.structure)-1): # print("\n'M_prime' #{} is:\n{}".format(i,M_prime)) M_prime = np.dot(M_prime, M[i]) # # debugging statement # print("\nThe second modified 'M_prime' matrix is:") # for i in range(2): # for j in range(2): # print("2nd mod M_prime{}{} ---> {}".format(i,j,M_prime[i][j])) # print("\nr_amp01 is ---> {}".format(r_amp[0,1])) # print("t_amp01 is ---> {}".format(t_amp[0,1])) mod_M_prime = self._make_2x2(1.,r_amp[0,1], r_amp[0,1], 1., dtype=complex)/t_amp[0,1] # # debugging statement # print("\nThe third modified 'M_prime' matrix is:") # for i in range(2): # for j in range(2): # print("3rd mod M_prime{}{} ---> {}".format(i, j, mod_M_prime[i][j])) M_prime = np.dot(self._make_2x2(1., r_amp[0,1], r_amp[0,1], 1., \ dtype=complex)/t_amp[0,1], M_prime) # # debugging statement # print("\nThe 'M_final' matrix is:") # for i in range(2): # for j in range(2): # print("M_final{}{} ---> {}".format(i, j, M_prime[i][j])) t = 1/M_prime[0,0] r = M_prime[0,1]/M_prime[0,0] # # debugging statement # print("\n't' ---> {}".format(t)) # print("'r' ---> {}".format(r)) return (r, t) def _d_converter(self): """Check the units of all elements in the connected ar coating stack. Convert the lengths of the layers to meters if they are not already in meters. """ units = {'um':1e-6, 'mm':1e-3, 'inch':2.54e-2, 'in':2.54e-2,\ 'micron':1e-6, 'mil':2.54e-5, 'm':1.0} for i in self.stack: i.thickness = i.thickness*units[i.units] return def _find_ks(self, n, frequency, tan, lossy=True): """Calculate the wavenumbers. Arguments --------- n : array An array of refractive indices, ordered from source to terminator frequency : float The frequency at which to calculate the wavevector, k tan : array An array of loss tangents, ordered from vacuum to substrate lossy : boolean, optional If `True` the wavevector will be found for a lossy material. If `False` the wavevector will be found for lossless material. Default is `True`. Returns ------- k : complex The complex wavenumber, k """ if lossy: k = 2*np.pi*n*frequency*(1+0.5j*tan)/3e8 # New expression for loss (as of 9/13/16), this one is more physical (i.e. subtractive) # k = 2*np.pi*n*frequency*(1-0.5j*tan)/3e8 # Original expression for loss (pre 9/13/16), but it is incorrectly ADDITIVE else: k = 2*np.pi*n*frequency/3e8 return k def _find_k_offsets(self, k, d): """Calculate the wavenumber offset, delta. Arguments --------- k : array The wavevector d : array An array of thicknesses, ordered from source to terminator Returns ------- delta : array The wavenumber offset """ olderr = sp.seterr(invalid= 'ignore') # turn off 'invalid multiplication' error; # it's just the 'inf' boundaries delta = k * d sp.seterr(**olderr) # turn the error back on return delta def _get_R(self, net_r_amp): """Return fraction of reflected power. Arguments --------- net_r_amp : float The net reflection amplitude after calculating the transfer matrix. """ return np.abs(net_r_amp)**2 def _get_T(self, polarization, net_t_amp, n_i, n_f, theta_i=0., theta_f=0.): """Return the fraction of transmitted power. Arguments --------- polarization : string The polarization of the source wave. One of: 's' or 'p'. net_t_amp : float The net transmission amplitude after calculating the transfer matrix. n_i : float The index of refraction of material 'i'. n_f : float The index of refraction of material 'f'. theta_i : float, optional The angle of incidence at interface 'i'. Default is 0. theta_f : float, optional The angle of incidence at interface 'f'. Default is 0. """ if (polarization=='s'): return np.abs(net_t_amp**2) * (n_f/n_i) elif (polarization=='p'): return np.abs(net_t_amp**2) * (n_f/n_i) else: raise ValueError("Polarization must be 's' or 'p'") def _get_bandpass_stats(self): mean = [] for band in self.bands: pass pass def _interconnect(self): """Connect all the AR coating layer objects, ensuring that the source and terminator layers come first and last, respectively. """ self.clear_structure() self.structure.append(self.source) for i in range(len(self.stack)): self.structure.append(self.stack[i]) self.structure.append(self.terminator) return def _make_2x2(self, A11, A12, A21, A22, dtype=float): """Return a 2x2 array quickly. Arguments --------- A11 : float Array element [0,0]. A12 : float Array element [0,1]. A21 : float Array element [1,0]. A22 : float Array element [1,1]. dtype : dtype, optional The datatype of the array. Defaults to float. """ array = np.empty((2,2), dtype=dtype) array[0,0] = A11 array[0,1] = A12 array[1,0] = A21 array[1,1] = A22 return array def _make_log(self): pass def _make_save_path(self, save_path, save_name): """Assemble the file name and path to the results file. Returns ------- path : string The full path to the save destination for the simulation results """ if save_name.endswith('.txt'): path = os.path.join(save_path, save_name) else: self.save_name = save_name+'.txt' path = os.path.join(save_path, save_name) return path def _r_at_interface(self, polarization, n_1, n_2): """Calculate the reflected amplitude at an interface. Arguments --------- polarization : string The polarization of the source wave. Must be one of: 's' or 'p'. n_1 : float The index of refraction of the first material. n_2 : float The index of refraction of the second material. Returns ------- reflected amplitude : float The amplitude of the reflected power """ if polarization == 's': return ((n_1-n_2)/(n_1+n_2)) elif polarization == 'p': return ((n_1-n_2)/(n_1+n_2)) else: raise ValueError("Polarization must be 's' or 'p'") def _sort_ns(self): """Organize the refractive indices of the layers in the simulation. Returns ------- n : array The ordered list of indices of refraction, from source to terminator """ n = [] for layer in self.structure: n.append(layer.get_index()) n = np.asarray(n) return n def _sort_ds(self): """Organize the layers' thicknesses in the simulation. Returns ------- d : array The ordered list of thicknesses, from source to terminator """ d = [] for layer in self.structure: if (layer.type == 'Layer' or layer.type == 'Substrate'): d.append(layer.thickness) d.insert(0, self.structure[0].thickness) d.append(self.structure[-1].thickness) d = np.asarray(d) return d def _sort_tans(self): """Organize the loss tangents of the layers in the simulation. Returns ------- tan : array The ordered list of loss tangents, from source to terminator """ tan = [] for layer in self.structure: tan.append(layer.losstangent) tan = np.asarray(tan) return tan def _t_at_interface(self, polarization, n_1, n_2): """Calculate the transmission amplitude at an interface. Arguments --------- polarization : string The polarization of the source wave. Must be one of: 's' or 'p'. n_1 : float The index of refraction of the first material. n_2 : float The index of refraction of the second material. Returns ------- transmitted_amplitude : float The amplitude of the transmitted power """ if polarization == 's': return 2*n_1/(n_1 + n_2) elif polarization == 'p': return 2*n_1/(n_1 + n_2) else: raise ValueError("Polarization must be 's' or 'p'") def _unpolarized_simulation(self, frequency, theta_0=0): """Handle the special case of unpolarized light by running the model for both 's' and 'p' polarizations and computing the mean of the two results. Arguments --------- frequency : float The frequency (in Hz) at which to evaluate the model. theta_0 : float, optional The angle of incidence at the initial interface. Default is 0. """ s_data = self.simulate(frequency, 's', theta_0) p_data = self.simulate(frequency, 'p', theta_0) T = (s_data + p_data)/2 return T def add_layer(self, material, thickness=5.0, units='mil', type='layer', \ stack_position=-1): """Create a layer from the set of pre-programmed materials and add it to the AR coating stack Arguments --------- material : string A key in the dictionary of materials found in materials.py. You can view these materials by calling 'show_materials()'. thickness : float, optional The thickness of the AR coating layer material. Assumed to be given in 'mil' (i.e. thousandths of an inch) unless otherwise stated. Default is 5. units : string, optional The units of length for the AR coating layer. Default is 'mil'. Must be one of: { 'mil', 'inch', 'mm', 'm', 'um', 'in', 'micron' } type : string, optional The layer type. Default is 'layer', which corresponds to an AR layer. Other options are 'source' or 'terminator', which correspond to source and terminator layers, respectively. stack_position : int, optional The position of the layer in the AR coating stack, indexed from 0. Default is -1 (i.e., layer is automatically added to the end (bottom?) of the stack. """ type = type.lower() if type == 'layer': layer = Layer() layer.name = material.lower() layer.thickness = thickness layer.units = units try: # layer.dielectric = mats.Electrical.DIELECTRIC[layer.name] layer.dielectric = mats.Electrical.props[layer.name][0] except: raise KeyError('I don\'t know that material!') try: # layer.losstangent = mats.Electrical.LOSS_TAN[layer.name] layer.losstangent = mats.Electrical.props[layer.name][1] except: layer.losstangent = 0 print('\nI don\'t know this loss tangent. Setting loss to 0!') if (stack_position == -1): self.stack.append(layer) else: self.stack.insert(stack_position, layer) elif type == 'source': self.source = SourceLayer() self.source.name = material.lower() try: # self.source.dielectric = mats.Electrical.DIELECTRIC[self.source.name] self.source.dielectric = mats.Electrical.props[self.source.name][0] except: raise KeyError('I don\'t know that material!') try: # self.source.losstangent = mats.Electrical.LOSS_TAN[self.source.name] self.source.losstangent = mats.Electrical.props[self.source.name][1] except: self.source.losstangent = 0 print('\nI don\'t know this loss tangent. Setting loss to 0!') elif type == 'terminator': self.terminator = TerminatorLayer() self.terminator.name = material.lower() try: # self.terminator.dielectric = mats.Electrical.DIELECTRIC[self.terminator.name] self.terminator.dielectric = mats.Electrical.props[self.terminator.name][0] except: raise KeyError('I don\'t know that material!') try: # self.terminator.losstangent = mats.Electrical.LOSS_TAN[self.terminator.name] self.terminator.losstangent = mats.Electrical.props[self.terminator.name][1] except: self.terminator.losstangent = 0 print('\nI don\'t know this loss tangent. Setting loss to 0!') else: raise ValueError('Type must be one of LAYER, SOURCE, or TERMINATOR') return def add_custom_layer(self, material, thickness, units, dielectric, loss_tangent, stack_position=-1): """Add a layer with custom properties to the AR stack. Arguments --------- material : string The name of the layer thickness : float The thickness of the layer units : string The units of length for the AR coating layer. Must be one of: { 'mil', 'inch', 'mm', 'm', 'um', 'in', 'micron' } dielectric : float The dielectric constant of the AR coating layer loss_tangent : float The loss tangent of the AR coating layer stack_position : int, optional The position of the layer in the AR coating stack, indexed from 0. Default is -1 (i.e., layer is automatically added to the end (bottom?) of the stack. """ layer = Layer() layer.units = units layer.thickness = thickness layer.dielectric = dielectric layer.losstangent = loss_tangent if (stack_position == -1): self.stack.append(layer) else: self.stack.insert(stack_position, layer) return def display_sim_parameters(self): """Display all the simulation parameters in one place.""" pprint.pprint(vars(self)) return def clear_structure(self): """Remove all elements from the current AR ``structure``.""" self.structure = [] return def remove_layer(self, layer_pos): """Remove the specified layer from the AR coating stack. Arguments --------- layer_pos : int The list index of the layer to remove from the AR coating stack """ self.stack.pop(layer_pos) return def run_sim(self): """Take the attributes of the ``Builder()`` object and execute the simulation at each frequency in ``Builder().freq_sweep``. Save the output to a columnized, tab-separated text file. Returns ------- transmission : array A three-element array. The first element is a list of frequencies, the second elements is a list of the transmissions at each frequency, and the third is a list of the reflections at each frequency. """ t0 = time.time() print('Beginning AR coating simulation') self._d_converter() self._interconnect() f_list = [] t_list = [] r_list = [] for f in self.freq_sweep: results = self.sim_single_freq(f) f_list.append(f) t_list.append(results['T']) r_list.append(results['R']) fs = np.asarray(f_list) ts = np.asarray(t_list) rs = np.asarray(r_list) results = np.array([fs, ts, rs]) t = time.ctime(time.time()) data_name = self._make_save_path(self.save_path, self.save_name) header = 'Frequency (Hz)\t\tTransmission amplitude\t\tReflection amplitude' # log_name = self._make_save_path(self.save_path, self.log_name) # log = self._make_log() with open(data_name, 'wb') as f: np.savetxt(f, np.c_[fs, ts, rs], delimiter='\t', header=header) # with open(log_name, 'wb') as f: # for line in log: # f.writelines(line) # f.write('\n') print('Finished running AR coating simulation') t1 = time.time() t_elapsed = t1-t0 print('Elapsed time: {t}s\n'.format(t=t_elapsed)) return results def set_freq_sweep(self, lower_bound, upper_bound, resolution=1, units='ghz'): """Set the frequency range over which the simulation will run. Arguments --------- lower_bound : float The low end of the frequency range, given in GHz. upper_bound : float The high end of the frequency range, given in GHz. reolution : float, optional The interval at which to sample the frequency range, given in GHz. Defaults to 1 GHz. units : str The units of frequency. Must be one of: Hz, hz, KHz, khz, MHz, mhz, GHz, ghz """ convert = {'Hz':1.0, 'hz':1.0, 'KHz':1e3, 'khz':1e3, 'MHz':1e6, 'mhz':1e6, 'GHz':1e9, 'ghz':1e9} low = lower_bound*convert[units] high = upper_bound*convert[units] samples = (high-low)/resolution self.freq_sweep = np.linspace(low, high, samples) return # def set_source_layer(self, material): # """Change the source layer. # Arguments # --------- # material : string # A key in the dielectrics dictionary. # """ # self.source = SourceLayer(material) # return # def set_terminator_layer(self, material): # """Change the terminator layer. # Arguments # --------- # material : string # A key in the dielectrics dictionary. # """ # self.terminator = TerminatorLayer(material) # return def show_materials(self): """List the materials with known properties. The listed material names are keys in the materials properties dictionary. """ print('\nThe materials with known dielectric properties are:\n') pprint.pprint(mats.Electrical.props) # pprint.pprint(mats.Electrical.DIELECTRIC) print('\nThe materials with known loss tangents are:\n') pprint.pprint(mats.Electrical.props) # pprint.pprint(mats.Electrical.LOSS_TAN) return def sim_single_freq(self, frequency, polarization='s', theta_0=0): """Run the model simulation for a single frequency. Arguments --------- frequency : float The frequency at which to evaluate the model (in Hz). polarization : string, optional The polarization of the source wave. Must be one of: 's', 'p', or 'u'. Default is 's'. ### NOTE ### I've chosen 's' polarization as the default because this simulator only handles normal incidence waves, and and at normal incidence 's' and 'p' are equivalent. theta_0 : float, optional The angle of incidence at the first interface. Returns ------- result : dict dict = { 'T' : array; the total transmission through the model. 'R' : array; the total reflection through the model. } """ # check the desired polarization # if polarization == 'u': # return self._unpolarized_simulation(frequency) n = self._sort_ns() # get all refractive indices d = self._sort_ds() # get all thicknesses tan = self._sort_tans() # get all loss tans k = self._find_ks(n, frequency, tan) # find all wavevectors, k delta = self._find_k_offsets(k, d) # calculate all offsets r, t = self._calc_R_T_amp(polarization, n, delta) # get trans, ref amps T = self._get_T(polarization, t, n[0], n[-1]) # find net trans, ref power R = self._get_R(r) result = {'T':T, 'R':R} return result def snell(self, indices, theta_0): """Caclulate the Snell angles for the entire model. Arguments --------- indices : list The list of indices of refraction for all elements in the model, ordered from source to terminator. theta_0 : float The angle of incidence at the first interface. """ return sp.arcsin(np.real_if_close(n_list[0]*np.sin(th_0) / n_list)) class MCMC: """Contains the methods specific to ``emcee``, the MCMC Hammer, and helper methods to set up MCMC simulations and visualize the results. """ def __init__(self): self.name = 'blah' self.priors = [] def __repr__(self): return '{} (MCMC object)'.format(self.name) def add_prior(self, layer_number, prior_type, low_bound, hi_bound, units='mil'): """Add a prior to a part of the model in order to constrain the total simulation space. Can only place constraints on thickness and dielectric for now. Arguments --------- layer_number : int The position of the layer in the AR coating stack. Indexed from 1, so incident `vacuum` is 0 and first AR coating layer is 1. prior_type : string Flags the prior as either a cut to dielectric constant or thickness. One of 'thickness', 't', 'dielectric', or 'd'. low_bound : float The lower boundary of the range. hi_bound : float The higher boundary of the range. units : string, optional The units of the lower and upper bounds. Only applies to 'thickness' cuts because dielectric constants are unitless. Defaults to `mils`. """ prior = {'layer_number':layer_number, 'prior_type':prior_type, \ 'low_bound':low_bound, 'hi_bound':hi_bound, 'units':units} self.priors.append(prior) return def lnlikelihood(self): return def lnprior(self): """Define the known prior attributes of the model in order to constrain the simulation space. """ return def lnprobability(self): """The logspace sum of ``lnprior`` and ``lnlikelihood``. """ return def sort_priors(self): """Sort the contents of ``self.prior`` by layer number Returns ------- sorted_priors : list A list of priors sorted by layer number. If a layer has both thickness and dielectric priors, the thickness dielectric is first and the dielectric is second. """ return
normal
{ "blob_id": "a2292bc9cee57c5d4a7d36c66510ce4b4f3e20da", "index": 3687, "step-1": "<mask token>\n\n\nclass SubstrateLayer(Layer):\n <mask token>\n <mask token>\n\n def __repr__(self):\n return '{} (substrate)'.format(self.name)\n\n\nclass TerminatorLayer(Layer):\n \"\"\"A special case of ``Layer``; represents the layer upon which the simulated wave \n terminates.\n\n Attributes\n ----------\n thickness : float\n The thickness of the terminating layer. Defaults to ``numpy.inf`` since\n the model doesn't care about the thickness of the terminating layer. \n The thickness of the terminating layer should not be changed under \n normal operations.\n type : string\n The type of layer. Default is `Terminator`, which is an element of the model,\n but not the coating. Other acceptable types are `Source` and `Layer`.\n \"\"\"\n\n def __init__(self):\n Layer.__init__(self)\n self.thickness = np.inf\n self.type = 'Terminator'\n\n def __repr__(self):\n \"\"\"Return a nice string formatted representation of the layer.\"\"\"\n return '{} (terminator layer)'.format(self.name)\n\n\nclass Builder:\n \"\"\"The main body of the simulator code.\n\n Attributes\n ----------\n bands : list\n A list of n tuples, with each tuple composed of a lower and upper limit\n for a frequency band in units of hertz. Default is the SPT-3G bands.\n freq_sweep : array\n The range of frequencies to be simulated. Defaults to 0. Set a frequency\n sweep by calling ``set_freq_sweep()``.\n optimization_frequency : float\n The frequency (in Hz) at which to calculate the ideal thickness for a given\n material. Defaults to 160e9 Hz (160 GHz).\n save_name : string\n The name under which the results of the simulation are saved. Defaults to\n 'transmission_data_XXXXX.txt' where `XXXXX` is a time-stamp to avoid\n overwriting previous simulation results.\n save_path : string\n The path to which the simulation results will be saved. Defaults to the \n current working directory.\n source : object\n ``Layer`` object ``SourceLayer`` that defines where the wave emanates from.\n Default is `None`.\n stack : list\n The user-defined layers incorporated in the simulation EXCEPT the source\n and terminator layers. Default is empty list.\n structure : list\n The layers incorporated in the simulation INCLUDING the source and\n terminator layers. Default is empty list. The list is populated \n by creating layers and calling ``_interconnect()``.\n terminator : object\n ``Layer`` object ``TerminatorLayer`` that defines where the wave terminates.\n Defaults is `None`.\n \"\"\"\n\n def __init__(self):\n self.bands = [(81700000000.0, 107500000000.0), (128600000000.0, \n 167200000000.0), (196900000000.0, 249200000000.0)]\n self.freq_sweep = 0.0\n self.log_name = 'log_simulation_{t}.txt'.format(t=time.ctime(time.\n time()))\n self.optimization_frequency = 160000000000.0\n self.save_name = 'transmission_data_{t}.txt'.format(t=time.ctime(\n time.time()))\n self.save_path = '.'\n self.source = None\n self.stack = []\n self.structure = []\n self.terminator = None\n\n def _calc_R_T_amp(self, polarization, n, delta):\n \"\"\"Calculate the reflected and transmitted amplitudes\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's', 'p', or 'u'.\n n : array\n An array of refractive indices, ordered from source to terminator\n delta : array\n An array of wavevector offsets\n \n Returns\n -------\n (r, t) : tuple\n A tuple where 'r' is the reflected amplitude, and 't' is the\n transmitted amplitude\n \"\"\"\n t_amp = np.zeros((len(self.structure), len(self.structure)), dtype=\n complex)\n r_amp = np.zeros((len(self.structure), len(self.structure)), dtype=\n complex)\n for i in range(len(self.structure) - 1):\n t_amp[i, i + 1] = self._t_at_interface(polarization, n[i], n[i + 1]\n )\n r_amp[i, i + 1] = self._r_at_interface(polarization, n[i], n[i + 1]\n )\n M = np.zeros((len(self.structure), 2, 2), dtype=complex)\n m_r_amp = np.zeros((len(self.structure), 2, 2), dtype=complex)\n m_t_amp = np.zeros((len(self.structure), 2, 2), dtype=complex)\n for i in range(1, len(self.structure) - 1):\n m_t_amp[i] = self._make_2x2(np.exp(-1.0j * delta[i]), 0.0, 0.0,\n np.exp(1.0j * delta[i]), dtype=complex)\n m_r_amp[i] = self._make_2x2(1.0, r_amp[i, i + 1], r_amp[i, i + \n 1], 1.0, dtype=complex)\n m_temp = np.dot(m_t_amp, m_r_amp)\n for i in range(1, len(self.structure) - 1):\n M[i] = 1 / t_amp[i, i + 1] * np.dot(self._make_2x2(np.exp(-1.0j *\n delta[i]), 0.0, 0.0, np.exp(1.0j * delta[i]), dtype=complex\n ), self._make_2x2(1.0, r_amp[i, i + 1], r_amp[i, i + 1], \n 1.0, dtype=complex))\n M_prime = self._make_2x2(1.0, 0.0, 0.0, 1.0, dtype=complex)\n for i in range(1, len(self.structure) - 1):\n M_prime = np.dot(M_prime, M[i])\n mod_M_prime = self._make_2x2(1.0, r_amp[0, 1], r_amp[0, 1], 1.0,\n dtype=complex) / t_amp[0, 1]\n M_prime = np.dot(self._make_2x2(1.0, r_amp[0, 1], r_amp[0, 1], 1.0,\n dtype=complex) / t_amp[0, 1], M_prime)\n t = 1 / M_prime[0, 0]\n r = M_prime[0, 1] / M_prime[0, 0]\n return r, t\n\n def _d_converter(self):\n \"\"\"Check the units of all elements in the connected ar coating\n stack. Convert the lengths of the layers to meters if they are\n not already in meters.\n \"\"\"\n units = {'um': 1e-06, 'mm': 0.001, 'inch': 0.0254, 'in': 0.0254,\n 'micron': 1e-06, 'mil': 2.54e-05, 'm': 1.0}\n for i in self.stack:\n i.thickness = i.thickness * units[i.units]\n return\n\n def _find_ks(self, n, frequency, tan, lossy=True):\n \"\"\"Calculate the wavenumbers.\n\n Arguments\n ---------\n n : array\n An array of refractive indices, ordered from source to\n terminator\n frequency : float\n The frequency at which to calculate the wavevector, k\n tan : array\n An array of loss tangents, ordered from vacuum to substrate\n lossy : boolean, optional\n If `True` the wavevector will be found for a lossy material.\n If `False` the wavevector will be found for lossless material.\n Default is `True`.\n Returns\n -------\n k : complex\n The complex wavenumber, k\n \"\"\"\n if lossy:\n k = 2 * np.pi * n * frequency * (1 + 0.5j * tan) / 300000000.0\n else:\n k = 2 * np.pi * n * frequency / 300000000.0\n return k\n\n def _find_k_offsets(self, k, d):\n \"\"\"Calculate the wavenumber offset, delta.\n\n Arguments\n ---------\n k : array\n The wavevector\n d : array\n An array of thicknesses, ordered from source to terminator\n\n Returns\n -------\n delta : array\n The wavenumber offset\n \"\"\"\n olderr = sp.seterr(invalid='ignore')\n delta = k * d\n sp.seterr(**olderr)\n return delta\n\n def _get_R(self, net_r_amp):\n \"\"\"Return fraction of reflected power.\n\n Arguments\n ---------\n net_r_amp : float\n The net reflection amplitude after calculating the transfer matrix.\n \"\"\"\n return np.abs(net_r_amp) ** 2\n\n def _get_T(self, polarization, net_t_amp, n_i, n_f, theta_i=0.0,\n theta_f=0.0):\n \"\"\"Return the fraction of transmitted power.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. One of: 's' or 'p'.\n net_t_amp : float\n The net transmission amplitude after calculating the transfer matrix.\n n_i : float\n The index of refraction of material 'i'.\n n_f : float\n The index of refraction of material 'f'.\n theta_i : float, optional\n The angle of incidence at interface 'i'. Default is 0.\n theta_f : float, optional\n The angle of incidence at interface 'f'. Default is 0.\n \"\"\"\n if polarization == 's':\n return np.abs(net_t_amp ** 2) * (n_f / n_i)\n elif polarization == 'p':\n return np.abs(net_t_amp ** 2) * (n_f / n_i)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _get_bandpass_stats(self):\n mean = []\n for band in self.bands:\n pass\n pass\n\n def _interconnect(self):\n \"\"\"Connect all the AR coating layer objects, ensuring that the source\n and terminator layers come first and last, respectively.\n \"\"\"\n self.clear_structure()\n self.structure.append(self.source)\n for i in range(len(self.stack)):\n self.structure.append(self.stack[i])\n self.structure.append(self.terminator)\n return\n\n def _make_2x2(self, A11, A12, A21, A22, dtype=float):\n \"\"\"Return a 2x2 array quickly.\n\n Arguments\n ---------\n A11 : float\n Array element [0,0].\n A12 : float\n Array element [0,1].\n A21 : float\n Array element [1,0].\n A22 : float\n Array element [1,1].\n dtype : dtype, optional\n The datatype of the array. Defaults to float.\n \"\"\"\n array = np.empty((2, 2), dtype=dtype)\n array[0, 0] = A11\n array[0, 1] = A12\n array[1, 0] = A21\n array[1, 1] = A22\n return array\n\n def _make_log(self):\n pass\n\n def _make_save_path(self, save_path, save_name):\n \"\"\"Assemble the file name and path to the results file.\n \n Returns\n -------\n path : string\n The full path to the save destination for the simulation results\n \"\"\"\n if save_name.endswith('.txt'):\n path = os.path.join(save_path, save_name)\n else:\n self.save_name = save_name + '.txt'\n path = os.path.join(save_path, save_name)\n return path\n\n def _r_at_interface(self, polarization, n_1, n_2):\n \"\"\"Calculate the reflected amplitude at an interface.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's' or 'p'.\n n_1 : float\n The index of refraction of the first material.\n n_2 : float\n The index of refraction of the second material.\n\n Returns\n -------\n reflected amplitude : float\n The amplitude of the reflected power\n \"\"\"\n if polarization == 's':\n return (n_1 - n_2) / (n_1 + n_2)\n elif polarization == 'p':\n return (n_1 - n_2) / (n_1 + n_2)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _sort_ns(self):\n \"\"\"Organize the refractive indices of the layers in the simulation.\n\n Returns\n -------\n n : array\n The ordered list of indices of refraction, from source to terminator\n \"\"\"\n n = []\n for layer in self.structure:\n n.append(layer.get_index())\n n = np.asarray(n)\n return n\n\n def _sort_ds(self):\n \"\"\"Organize the layers' thicknesses in the simulation.\n\n Returns\n -------\n d : array\n The ordered list of thicknesses, from source to terminator\n \"\"\"\n d = []\n for layer in self.structure:\n if layer.type == 'Layer' or layer.type == 'Substrate':\n d.append(layer.thickness)\n d.insert(0, self.structure[0].thickness)\n d.append(self.structure[-1].thickness)\n d = np.asarray(d)\n return d\n\n def _sort_tans(self):\n \"\"\"Organize the loss tangents of the layers in the simulation.\n\n Returns\n -------\n tan : array\n The ordered list of loss tangents, from source to terminator\n \"\"\"\n tan = []\n for layer in self.structure:\n tan.append(layer.losstangent)\n tan = np.asarray(tan)\n return tan\n\n def _t_at_interface(self, polarization, n_1, n_2):\n \"\"\"Calculate the transmission amplitude at an interface.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's' or 'p'.\n n_1 : float\n The index of refraction of the first material.\n n_2 : float\n The index of refraction of the second material.\n\n Returns\n -------\n transmitted_amplitude : float\n The amplitude of the transmitted power\n \"\"\"\n if polarization == 's':\n return 2 * n_1 / (n_1 + n_2)\n elif polarization == 'p':\n return 2 * n_1 / (n_1 + n_2)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _unpolarized_simulation(self, frequency, theta_0=0):\n \"\"\"Handle the special case of unpolarized light by running the model\n for both 's' and 'p' polarizations and computing the mean of the two\n results.\n\n Arguments\n ---------\n frequency : float\n The frequency (in Hz) at which to evaluate the model.\n theta_0 : float, optional\n The angle of incidence at the initial interface. Default is 0.\n \"\"\"\n s_data = self.simulate(frequency, 's', theta_0)\n p_data = self.simulate(frequency, 'p', theta_0)\n T = (s_data + p_data) / 2\n return T\n\n def add_layer(self, material, thickness=5.0, units='mil', type='layer',\n stack_position=-1):\n \"\"\"Create a layer from the set of pre-programmed materials and add it\n to the AR coating stack\n\n Arguments\n ---------\n material : string\n A key in the dictionary of materials found in materials.py.\n You can view these materials by calling\n 'show_materials()'.\n thickness : float, optional\n The thickness of the AR coating layer material. Assumed to\n be given in 'mil' (i.e. thousandths of an inch) unless\n otherwise stated. Default is 5.\n units : string, optional\n The units of length for the AR coating layer. Default is 'mil'.\n Must be one of:\n { 'mil', 'inch', 'mm', 'm', 'um', 'in', 'micron' }\n type : string, optional\n The layer type. Default is 'layer', which corresponds to\n an AR layer. Other options are 'source' or 'terminator', which\n correspond to source and terminator layers, respectively.\n stack_position : int, optional\n The position of the layer in the AR coating stack, indexed\n from 0. Default is -1 (i.e., layer is automatically added\n to the end (bottom?) of the stack.\n \"\"\"\n type = type.lower()\n if type == 'layer':\n layer = Layer()\n layer.name = material.lower()\n layer.thickness = thickness\n layer.units = units\n try:\n layer.dielectric = mats.Electrical.props[layer.name][0]\n except:\n raise KeyError(\"I don't know that material!\")\n try:\n layer.losstangent = mats.Electrical.props[layer.name][1]\n except:\n layer.losstangent = 0\n print(\"\\nI don't know this loss tangent. Setting loss to 0!\")\n if stack_position == -1:\n self.stack.append(layer)\n else:\n self.stack.insert(stack_position, layer)\n elif type == 'source':\n self.source = SourceLayer()\n self.source.name = material.lower()\n try:\n self.source.dielectric = mats.Electrical.props[self.source.name\n ][0]\n except:\n raise KeyError(\"I don't know that material!\")\n try:\n self.source.losstangent = mats.Electrical.props[self.source\n .name][1]\n except:\n self.source.losstangent = 0\n print(\"\\nI don't know this loss tangent. Setting loss to 0!\")\n elif type == 'terminator':\n self.terminator = TerminatorLayer()\n self.terminator.name = material.lower()\n try:\n self.terminator.dielectric = mats.Electrical.props[self.\n terminator.name][0]\n except:\n raise KeyError(\"I don't know that material!\")\n try:\n self.terminator.losstangent = mats.Electrical.props[self.\n terminator.name][1]\n except:\n self.terminator.losstangent = 0\n print(\"\\nI don't know this loss tangent. Setting loss to 0!\")\n else:\n raise ValueError('Type must be one of LAYER, SOURCE, or TERMINATOR'\n )\n return\n\n def add_custom_layer(self, material, thickness, units, dielectric,\n loss_tangent, stack_position=-1):\n \"\"\"Add a layer with custom properties to the AR stack.\n\n Arguments\n ---------\n material : string\n The name of the layer\n thickness : float\n The thickness of the layer\n units : string\n The units of length for the AR coating layer. Must be one of:\n { 'mil', 'inch', 'mm', 'm', 'um', 'in', 'micron' }\n dielectric : float\n The dielectric constant of the AR coating layer\n loss_tangent : float\n The loss tangent of the AR coating layer\n stack_position : int, optional\n The position of the layer in the AR coating stack, indexed\n from 0. Default is -1 (i.e., layer is automatically added\n to the end (bottom?) of the stack.\n \"\"\"\n layer = Layer()\n layer.units = units\n layer.thickness = thickness\n layer.dielectric = dielectric\n layer.losstangent = loss_tangent\n if stack_position == -1:\n self.stack.append(layer)\n else:\n self.stack.insert(stack_position, layer)\n return\n\n def display_sim_parameters(self):\n \"\"\"Display all the simulation parameters in one place.\"\"\"\n pprint.pprint(vars(self))\n return\n\n def clear_structure(self):\n \"\"\"Remove all elements from the current AR ``structure``.\"\"\"\n self.structure = []\n return\n\n def remove_layer(self, layer_pos):\n \"\"\"Remove the specified layer from the AR coating stack.\n\n Arguments\n ---------\n layer_pos : int\n The list index of the layer to remove from the AR coating stack\n \"\"\"\n self.stack.pop(layer_pos)\n return\n\n def run_sim(self):\n \"\"\"Take the attributes of the ``Builder()`` object and execute the\n simulation at each frequency in ``Builder().freq_sweep``. Save the\n output to a columnized, tab-separated text file.\n\n Returns\n -------\n transmission : array\n A three-element array. The first element is a list of\n frequencies, the second elements is a list of the\n transmissions at each frequency, and the third is a list of\n the reflections at each frequency.\n \"\"\"\n t0 = time.time()\n print('Beginning AR coating simulation')\n self._d_converter()\n self._interconnect()\n f_list = []\n t_list = []\n r_list = []\n for f in self.freq_sweep:\n results = self.sim_single_freq(f)\n f_list.append(f)\n t_list.append(results['T'])\n r_list.append(results['R'])\n fs = np.asarray(f_list)\n ts = np.asarray(t_list)\n rs = np.asarray(r_list)\n results = np.array([fs, ts, rs])\n t = time.ctime(time.time())\n data_name = self._make_save_path(self.save_path, self.save_name)\n header = (\n 'Frequency (Hz)\\t\\tTransmission amplitude\\t\\tReflection amplitude')\n with open(data_name, 'wb') as f:\n np.savetxt(f, np.c_[fs, ts, rs], delimiter='\\t', header=header)\n print('Finished running AR coating simulation')\n t1 = time.time()\n t_elapsed = t1 - t0\n print('Elapsed time: {t}s\\n'.format(t=t_elapsed))\n return results\n\n def set_freq_sweep(self, lower_bound, upper_bound, resolution=1, units=\n 'ghz'):\n \"\"\"Set the frequency range over which the simulation will run.\n \n Arguments\n ---------\n lower_bound : float\n The low end of the frequency range, given in GHz.\n upper_bound : float\n The high end of the frequency range, given in GHz.\n reolution : float, optional\n The interval at which to sample the frequency range, given in GHz.\n Defaults to 1 GHz.\n units : str\n The units of frequency. Must be one of:\n Hz, hz, KHz, khz, MHz, mhz, GHz, ghz\n \"\"\"\n convert = {'Hz': 1.0, 'hz': 1.0, 'KHz': 1000.0, 'khz': 1000.0,\n 'MHz': 1000000.0, 'mhz': 1000000.0, 'GHz': 1000000000.0, 'ghz':\n 1000000000.0}\n low = lower_bound * convert[units]\n high = upper_bound * convert[units]\n samples = (high - low) / resolution\n self.freq_sweep = np.linspace(low, high, samples)\n return\n\n def show_materials(self):\n \"\"\"List the materials with known properties. The listed material names \n are keys in the materials properties dictionary. \n \"\"\"\n print('\\nThe materials with known dielectric properties are:\\n')\n pprint.pprint(mats.Electrical.props)\n print('\\nThe materials with known loss tangents are:\\n')\n pprint.pprint(mats.Electrical.props)\n return\n\n def sim_single_freq(self, frequency, polarization='s', theta_0=0):\n \"\"\"Run the model simulation for a single frequency.\n\n Arguments\n ---------\n frequency : float\n The frequency at which to evaluate the model (in Hz).\n polarization : string, optional\n The polarization of the source wave. Must be one of: 's', \n 'p', or 'u'. Default is 's'.\n \n ### NOTE ###\n I've chosen 's' polarization as the default because this \n simulator only handles normal incidence waves, and and at \n normal incidence 's' and 'p' are equivalent.\n theta_0 : float, optional\n The angle of incidence at the first interface.\n\n Returns\n -------\n result : dict\n dict = {\n 'T' : array; the total transmission through the model.\n 'R' : array; the total reflection through the model.\n }\n \"\"\"\n n = self._sort_ns()\n d = self._sort_ds()\n tan = self._sort_tans()\n k = self._find_ks(n, frequency, tan)\n delta = self._find_k_offsets(k, d)\n r, t = self._calc_R_T_amp(polarization, n, delta)\n T = self._get_T(polarization, t, n[0], n[-1])\n R = self._get_R(r)\n result = {'T': T, 'R': R}\n return result\n\n def snell(self, indices, theta_0):\n \"\"\"Caclulate the Snell angles for the entire model.\n\n Arguments\n ---------\n indices : list\n The list of indices of refraction for all elements in the model,\n ordered from source to terminator.\n theta_0 : float\n The angle of incidence at the first interface.\n \"\"\"\n return sp.arcsin(np.real_if_close(n_list[0] * np.sin(th_0) / n_list))\n\n\nclass MCMC:\n \"\"\"Contains the methods specific to ``emcee``, the MCMC Hammer, and helper\n methods to set up MCMC simulations and visualize the results.\n \"\"\"\n\n def __init__(self):\n self.name = 'blah'\n self.priors = []\n\n def __repr__(self):\n return '{} (MCMC object)'.format(self.name)\n\n def add_prior(self, layer_number, prior_type, low_bound, hi_bound,\n units='mil'):\n \"\"\"Add a prior to a part of the model in order to constrain the total\n simulation space. Can only place constraints on thickness and dielectric\n for now.\n\n Arguments\n ---------\n layer_number : int\n The position of the layer in the AR coating stack. Indexed from 1, so\n incident `vacuum` is 0 and first AR coating layer is 1.\n prior_type : string\n Flags the prior as either a cut to dielectric constant or thickness.\n One of 'thickness', 't', 'dielectric', or 'd'.\n low_bound : float\n The lower boundary of the range.\n hi_bound : float\n The higher boundary of the range.\n units : string, optional\n The units of the lower and upper bounds. Only applies to 'thickness'\n cuts because dielectric constants are unitless. Defaults to `mils`.\n \"\"\"\n prior = {'layer_number': layer_number, 'prior_type': prior_type,\n 'low_bound': low_bound, 'hi_bound': hi_bound, 'units': units}\n self.priors.append(prior)\n return\n\n def lnlikelihood(self):\n return\n\n def lnprior(self):\n \"\"\"Define the known prior attributes of the model in order to constrain\n the simulation space.\n \"\"\"\n return\n\n def lnprobability(self):\n \"\"\"The logspace sum of ``lnprior`` and ``lnlikelihood``.\n \"\"\"\n return\n\n def sort_priors(self):\n \"\"\"Sort the contents of ``self.prior`` by layer number\n \n Returns\n -------\n sorted_priors : list\n A list of priors sorted by layer number. If a layer has both\n thickness and dielectric priors, the thickness dielectric is first\n and the dielectric is second.\n \"\"\"\n return\n", "step-2": "<mask token>\n\n\nclass SourceLayer(Layer):\n \"\"\"A special case of ``Layer``; represents the layer from which the simulated wave \n emanates.\n\n Attributes\n ----------\n thickness : float\n The thickness of the source layer. Defaults to ``numpy.inf`` since the model\n doesn't care about the thickness of source layer. The thickness of the\n source layer should not be changed under normal operations.\n type : string\n The type of layer. Default is `Source`, which is an element of the model,\n but not the coating. Other acceptable types are `Layer` and `Terminator`.\n \"\"\"\n\n def __init__(self):\n Layer.__init__(self)\n self.thickness = np.inf\n self.type = 'Source'\n\n def __repr__(self):\n \"\"\"Return a nice string formatted representation of the layer.\"\"\"\n return '{} (source layer)'.format(self.name)\n\n\nclass SubstrateLayer(Layer):\n \"\"\"A special case of ``Layer``; represents the layer to which the AR coating is \n attached.\n\n Attributes\n ----------\n thickness : float\n The thickness of the substrate layer. Defaults to 250 mils, which is \n the typical thickness of a sample puck used in the Berkeley FTS setup.\n This may be changed as is necessary, but the units must (eventually) be\n converted to meters before being fed to the simulator.\n type : string\n The type of layer\n \"\"\"\n\n def __init__(self):\n Layer.__init__(self)\n self.thickness = 250.0\n self.type = 'Substrate'\n\n def __repr__(self):\n return '{} (substrate)'.format(self.name)\n\n\nclass TerminatorLayer(Layer):\n \"\"\"A special case of ``Layer``; represents the layer upon which the simulated wave \n terminates.\n\n Attributes\n ----------\n thickness : float\n The thickness of the terminating layer. Defaults to ``numpy.inf`` since\n the model doesn't care about the thickness of the terminating layer. \n The thickness of the terminating layer should not be changed under \n normal operations.\n type : string\n The type of layer. Default is `Terminator`, which is an element of the model,\n but not the coating. Other acceptable types are `Source` and `Layer`.\n \"\"\"\n\n def __init__(self):\n Layer.__init__(self)\n self.thickness = np.inf\n self.type = 'Terminator'\n\n def __repr__(self):\n \"\"\"Return a nice string formatted representation of the layer.\"\"\"\n return '{} (terminator layer)'.format(self.name)\n\n\nclass Builder:\n \"\"\"The main body of the simulator code.\n\n Attributes\n ----------\n bands : list\n A list of n tuples, with each tuple composed of a lower and upper limit\n for a frequency band in units of hertz. Default is the SPT-3G bands.\n freq_sweep : array\n The range of frequencies to be simulated. Defaults to 0. Set a frequency\n sweep by calling ``set_freq_sweep()``.\n optimization_frequency : float\n The frequency (in Hz) at which to calculate the ideal thickness for a given\n material. Defaults to 160e9 Hz (160 GHz).\n save_name : string\n The name under which the results of the simulation are saved. Defaults to\n 'transmission_data_XXXXX.txt' where `XXXXX` is a time-stamp to avoid\n overwriting previous simulation results.\n save_path : string\n The path to which the simulation results will be saved. Defaults to the \n current working directory.\n source : object\n ``Layer`` object ``SourceLayer`` that defines where the wave emanates from.\n Default is `None`.\n stack : list\n The user-defined layers incorporated in the simulation EXCEPT the source\n and terminator layers. Default is empty list.\n structure : list\n The layers incorporated in the simulation INCLUDING the source and\n terminator layers. Default is empty list. The list is populated \n by creating layers and calling ``_interconnect()``.\n terminator : object\n ``Layer`` object ``TerminatorLayer`` that defines where the wave terminates.\n Defaults is `None`.\n \"\"\"\n\n def __init__(self):\n self.bands = [(81700000000.0, 107500000000.0), (128600000000.0, \n 167200000000.0), (196900000000.0, 249200000000.0)]\n self.freq_sweep = 0.0\n self.log_name = 'log_simulation_{t}.txt'.format(t=time.ctime(time.\n time()))\n self.optimization_frequency = 160000000000.0\n self.save_name = 'transmission_data_{t}.txt'.format(t=time.ctime(\n time.time()))\n self.save_path = '.'\n self.source = None\n self.stack = []\n self.structure = []\n self.terminator = None\n\n def _calc_R_T_amp(self, polarization, n, delta):\n \"\"\"Calculate the reflected and transmitted amplitudes\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's', 'p', or 'u'.\n n : array\n An array of refractive indices, ordered from source to terminator\n delta : array\n An array of wavevector offsets\n \n Returns\n -------\n (r, t) : tuple\n A tuple where 'r' is the reflected amplitude, and 't' is the\n transmitted amplitude\n \"\"\"\n t_amp = np.zeros((len(self.structure), len(self.structure)), dtype=\n complex)\n r_amp = np.zeros((len(self.structure), len(self.structure)), dtype=\n complex)\n for i in range(len(self.structure) - 1):\n t_amp[i, i + 1] = self._t_at_interface(polarization, n[i], n[i + 1]\n )\n r_amp[i, i + 1] = self._r_at_interface(polarization, n[i], n[i + 1]\n )\n M = np.zeros((len(self.structure), 2, 2), dtype=complex)\n m_r_amp = np.zeros((len(self.structure), 2, 2), dtype=complex)\n m_t_amp = np.zeros((len(self.structure), 2, 2), dtype=complex)\n for i in range(1, len(self.structure) - 1):\n m_t_amp[i] = self._make_2x2(np.exp(-1.0j * delta[i]), 0.0, 0.0,\n np.exp(1.0j * delta[i]), dtype=complex)\n m_r_amp[i] = self._make_2x2(1.0, r_amp[i, i + 1], r_amp[i, i + \n 1], 1.0, dtype=complex)\n m_temp = np.dot(m_t_amp, m_r_amp)\n for i in range(1, len(self.structure) - 1):\n M[i] = 1 / t_amp[i, i + 1] * np.dot(self._make_2x2(np.exp(-1.0j *\n delta[i]), 0.0, 0.0, np.exp(1.0j * delta[i]), dtype=complex\n ), self._make_2x2(1.0, r_amp[i, i + 1], r_amp[i, i + 1], \n 1.0, dtype=complex))\n M_prime = self._make_2x2(1.0, 0.0, 0.0, 1.0, dtype=complex)\n for i in range(1, len(self.structure) - 1):\n M_prime = np.dot(M_prime, M[i])\n mod_M_prime = self._make_2x2(1.0, r_amp[0, 1], r_amp[0, 1], 1.0,\n dtype=complex) / t_amp[0, 1]\n M_prime = np.dot(self._make_2x2(1.0, r_amp[0, 1], r_amp[0, 1], 1.0,\n dtype=complex) / t_amp[0, 1], M_prime)\n t = 1 / M_prime[0, 0]\n r = M_prime[0, 1] / M_prime[0, 0]\n return r, t\n\n def _d_converter(self):\n \"\"\"Check the units of all elements in the connected ar coating\n stack. Convert the lengths of the layers to meters if they are\n not already in meters.\n \"\"\"\n units = {'um': 1e-06, 'mm': 0.001, 'inch': 0.0254, 'in': 0.0254,\n 'micron': 1e-06, 'mil': 2.54e-05, 'm': 1.0}\n for i in self.stack:\n i.thickness = i.thickness * units[i.units]\n return\n\n def _find_ks(self, n, frequency, tan, lossy=True):\n \"\"\"Calculate the wavenumbers.\n\n Arguments\n ---------\n n : array\n An array of refractive indices, ordered from source to\n terminator\n frequency : float\n The frequency at which to calculate the wavevector, k\n tan : array\n An array of loss tangents, ordered from vacuum to substrate\n lossy : boolean, optional\n If `True` the wavevector will be found for a lossy material.\n If `False` the wavevector will be found for lossless material.\n Default is `True`.\n Returns\n -------\n k : complex\n The complex wavenumber, k\n \"\"\"\n if lossy:\n k = 2 * np.pi * n * frequency * (1 + 0.5j * tan) / 300000000.0\n else:\n k = 2 * np.pi * n * frequency / 300000000.0\n return k\n\n def _find_k_offsets(self, k, d):\n \"\"\"Calculate the wavenumber offset, delta.\n\n Arguments\n ---------\n k : array\n The wavevector\n d : array\n An array of thicknesses, ordered from source to terminator\n\n Returns\n -------\n delta : array\n The wavenumber offset\n \"\"\"\n olderr = sp.seterr(invalid='ignore')\n delta = k * d\n sp.seterr(**olderr)\n return delta\n\n def _get_R(self, net_r_amp):\n \"\"\"Return fraction of reflected power.\n\n Arguments\n ---------\n net_r_amp : float\n The net reflection amplitude after calculating the transfer matrix.\n \"\"\"\n return np.abs(net_r_amp) ** 2\n\n def _get_T(self, polarization, net_t_amp, n_i, n_f, theta_i=0.0,\n theta_f=0.0):\n \"\"\"Return the fraction of transmitted power.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. One of: 's' or 'p'.\n net_t_amp : float\n The net transmission amplitude after calculating the transfer matrix.\n n_i : float\n The index of refraction of material 'i'.\n n_f : float\n The index of refraction of material 'f'.\n theta_i : float, optional\n The angle of incidence at interface 'i'. Default is 0.\n theta_f : float, optional\n The angle of incidence at interface 'f'. Default is 0.\n \"\"\"\n if polarization == 's':\n return np.abs(net_t_amp ** 2) * (n_f / n_i)\n elif polarization == 'p':\n return np.abs(net_t_amp ** 2) * (n_f / n_i)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _get_bandpass_stats(self):\n mean = []\n for band in self.bands:\n pass\n pass\n\n def _interconnect(self):\n \"\"\"Connect all the AR coating layer objects, ensuring that the source\n and terminator layers come first and last, respectively.\n \"\"\"\n self.clear_structure()\n self.structure.append(self.source)\n for i in range(len(self.stack)):\n self.structure.append(self.stack[i])\n self.structure.append(self.terminator)\n return\n\n def _make_2x2(self, A11, A12, A21, A22, dtype=float):\n \"\"\"Return a 2x2 array quickly.\n\n Arguments\n ---------\n A11 : float\n Array element [0,0].\n A12 : float\n Array element [0,1].\n A21 : float\n Array element [1,0].\n A22 : float\n Array element [1,1].\n dtype : dtype, optional\n The datatype of the array. Defaults to float.\n \"\"\"\n array = np.empty((2, 2), dtype=dtype)\n array[0, 0] = A11\n array[0, 1] = A12\n array[1, 0] = A21\n array[1, 1] = A22\n return array\n\n def _make_log(self):\n pass\n\n def _make_save_path(self, save_path, save_name):\n \"\"\"Assemble the file name and path to the results file.\n \n Returns\n -------\n path : string\n The full path to the save destination for the simulation results\n \"\"\"\n if save_name.endswith('.txt'):\n path = os.path.join(save_path, save_name)\n else:\n self.save_name = save_name + '.txt'\n path = os.path.join(save_path, save_name)\n return path\n\n def _r_at_interface(self, polarization, n_1, n_2):\n \"\"\"Calculate the reflected amplitude at an interface.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's' or 'p'.\n n_1 : float\n The index of refraction of the first material.\n n_2 : float\n The index of refraction of the second material.\n\n Returns\n -------\n reflected amplitude : float\n The amplitude of the reflected power\n \"\"\"\n if polarization == 's':\n return (n_1 - n_2) / (n_1 + n_2)\n elif polarization == 'p':\n return (n_1 - n_2) / (n_1 + n_2)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _sort_ns(self):\n \"\"\"Organize the refractive indices of the layers in the simulation.\n\n Returns\n -------\n n : array\n The ordered list of indices of refraction, from source to terminator\n \"\"\"\n n = []\n for layer in self.structure:\n n.append(layer.get_index())\n n = np.asarray(n)\n return n\n\n def _sort_ds(self):\n \"\"\"Organize the layers' thicknesses in the simulation.\n\n Returns\n -------\n d : array\n The ordered list of thicknesses, from source to terminator\n \"\"\"\n d = []\n for layer in self.structure:\n if layer.type == 'Layer' or layer.type == 'Substrate':\n d.append(layer.thickness)\n d.insert(0, self.structure[0].thickness)\n d.append(self.structure[-1].thickness)\n d = np.asarray(d)\n return d\n\n def _sort_tans(self):\n \"\"\"Organize the loss tangents of the layers in the simulation.\n\n Returns\n -------\n tan : array\n The ordered list of loss tangents, from source to terminator\n \"\"\"\n tan = []\n for layer in self.structure:\n tan.append(layer.losstangent)\n tan = np.asarray(tan)\n return tan\n\n def _t_at_interface(self, polarization, n_1, n_2):\n \"\"\"Calculate the transmission amplitude at an interface.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's' or 'p'.\n n_1 : float\n The index of refraction of the first material.\n n_2 : float\n The index of refraction of the second material.\n\n Returns\n -------\n transmitted_amplitude : float\n The amplitude of the transmitted power\n \"\"\"\n if polarization == 's':\n return 2 * n_1 / (n_1 + n_2)\n elif polarization == 'p':\n return 2 * n_1 / (n_1 + n_2)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _unpolarized_simulation(self, frequency, theta_0=0):\n \"\"\"Handle the special case of unpolarized light by running the model\n for both 's' and 'p' polarizations and computing the mean of the two\n results.\n\n Arguments\n ---------\n frequency : float\n The frequency (in Hz) at which to evaluate the model.\n theta_0 : float, optional\n The angle of incidence at the initial interface. Default is 0.\n \"\"\"\n s_data = self.simulate(frequency, 's', theta_0)\n p_data = self.simulate(frequency, 'p', theta_0)\n T = (s_data + p_data) / 2\n return T\n\n def add_layer(self, material, thickness=5.0, units='mil', type='layer',\n stack_position=-1):\n \"\"\"Create a layer from the set of pre-programmed materials and add it\n to the AR coating stack\n\n Arguments\n ---------\n material : string\n A key in the dictionary of materials found in materials.py.\n You can view these materials by calling\n 'show_materials()'.\n thickness : float, optional\n The thickness of the AR coating layer material. Assumed to\n be given in 'mil' (i.e. thousandths of an inch) unless\n otherwise stated. Default is 5.\n units : string, optional\n The units of length for the AR coating layer. Default is 'mil'.\n Must be one of:\n { 'mil', 'inch', 'mm', 'm', 'um', 'in', 'micron' }\n type : string, optional\n The layer type. Default is 'layer', which corresponds to\n an AR layer. Other options are 'source' or 'terminator', which\n correspond to source and terminator layers, respectively.\n stack_position : int, optional\n The position of the layer in the AR coating stack, indexed\n from 0. Default is -1 (i.e., layer is automatically added\n to the end (bottom?) of the stack.\n \"\"\"\n type = type.lower()\n if type == 'layer':\n layer = Layer()\n layer.name = material.lower()\n layer.thickness = thickness\n layer.units = units\n try:\n layer.dielectric = mats.Electrical.props[layer.name][0]\n except:\n raise KeyError(\"I don't know that material!\")\n try:\n layer.losstangent = mats.Electrical.props[layer.name][1]\n except:\n layer.losstangent = 0\n print(\"\\nI don't know this loss tangent. Setting loss to 0!\")\n if stack_position == -1:\n self.stack.append(layer)\n else:\n self.stack.insert(stack_position, layer)\n elif type == 'source':\n self.source = SourceLayer()\n self.source.name = material.lower()\n try:\n self.source.dielectric = mats.Electrical.props[self.source.name\n ][0]\n except:\n raise KeyError(\"I don't know that material!\")\n try:\n self.source.losstangent = mats.Electrical.props[self.source\n .name][1]\n except:\n self.source.losstangent = 0\n print(\"\\nI don't know this loss tangent. Setting loss to 0!\")\n elif type == 'terminator':\n self.terminator = TerminatorLayer()\n self.terminator.name = material.lower()\n try:\n self.terminator.dielectric = mats.Electrical.props[self.\n terminator.name][0]\n except:\n raise KeyError(\"I don't know that material!\")\n try:\n self.terminator.losstangent = mats.Electrical.props[self.\n terminator.name][1]\n except:\n self.terminator.losstangent = 0\n print(\"\\nI don't know this loss tangent. Setting loss to 0!\")\n else:\n raise ValueError('Type must be one of LAYER, SOURCE, or TERMINATOR'\n )\n return\n\n def add_custom_layer(self, material, thickness, units, dielectric,\n loss_tangent, stack_position=-1):\n \"\"\"Add a layer with custom properties to the AR stack.\n\n Arguments\n ---------\n material : string\n The name of the layer\n thickness : float\n The thickness of the layer\n units : string\n The units of length for the AR coating layer. Must be one of:\n { 'mil', 'inch', 'mm', 'm', 'um', 'in', 'micron' }\n dielectric : float\n The dielectric constant of the AR coating layer\n loss_tangent : float\n The loss tangent of the AR coating layer\n stack_position : int, optional\n The position of the layer in the AR coating stack, indexed\n from 0. Default is -1 (i.e., layer is automatically added\n to the end (bottom?) of the stack.\n \"\"\"\n layer = Layer()\n layer.units = units\n layer.thickness = thickness\n layer.dielectric = dielectric\n layer.losstangent = loss_tangent\n if stack_position == -1:\n self.stack.append(layer)\n else:\n self.stack.insert(stack_position, layer)\n return\n\n def display_sim_parameters(self):\n \"\"\"Display all the simulation parameters in one place.\"\"\"\n pprint.pprint(vars(self))\n return\n\n def clear_structure(self):\n \"\"\"Remove all elements from the current AR ``structure``.\"\"\"\n self.structure = []\n return\n\n def remove_layer(self, layer_pos):\n \"\"\"Remove the specified layer from the AR coating stack.\n\n Arguments\n ---------\n layer_pos : int\n The list index of the layer to remove from the AR coating stack\n \"\"\"\n self.stack.pop(layer_pos)\n return\n\n def run_sim(self):\n \"\"\"Take the attributes of the ``Builder()`` object and execute the\n simulation at each frequency in ``Builder().freq_sweep``. Save the\n output to a columnized, tab-separated text file.\n\n Returns\n -------\n transmission : array\n A three-element array. The first element is a list of\n frequencies, the second elements is a list of the\n transmissions at each frequency, and the third is a list of\n the reflections at each frequency.\n \"\"\"\n t0 = time.time()\n print('Beginning AR coating simulation')\n self._d_converter()\n self._interconnect()\n f_list = []\n t_list = []\n r_list = []\n for f in self.freq_sweep:\n results = self.sim_single_freq(f)\n f_list.append(f)\n t_list.append(results['T'])\n r_list.append(results['R'])\n fs = np.asarray(f_list)\n ts = np.asarray(t_list)\n rs = np.asarray(r_list)\n results = np.array([fs, ts, rs])\n t = time.ctime(time.time())\n data_name = self._make_save_path(self.save_path, self.save_name)\n header = (\n 'Frequency (Hz)\\t\\tTransmission amplitude\\t\\tReflection amplitude')\n with open(data_name, 'wb') as f:\n np.savetxt(f, np.c_[fs, ts, rs], delimiter='\\t', header=header)\n print('Finished running AR coating simulation')\n t1 = time.time()\n t_elapsed = t1 - t0\n print('Elapsed time: {t}s\\n'.format(t=t_elapsed))\n return results\n\n def set_freq_sweep(self, lower_bound, upper_bound, resolution=1, units=\n 'ghz'):\n \"\"\"Set the frequency range over which the simulation will run.\n \n Arguments\n ---------\n lower_bound : float\n The low end of the frequency range, given in GHz.\n upper_bound : float\n The high end of the frequency range, given in GHz.\n reolution : float, optional\n The interval at which to sample the frequency range, given in GHz.\n Defaults to 1 GHz.\n units : str\n The units of frequency. Must be one of:\n Hz, hz, KHz, khz, MHz, mhz, GHz, ghz\n \"\"\"\n convert = {'Hz': 1.0, 'hz': 1.0, 'KHz': 1000.0, 'khz': 1000.0,\n 'MHz': 1000000.0, 'mhz': 1000000.0, 'GHz': 1000000000.0, 'ghz':\n 1000000000.0}\n low = lower_bound * convert[units]\n high = upper_bound * convert[units]\n samples = (high - low) / resolution\n self.freq_sweep = np.linspace(low, high, samples)\n return\n\n def show_materials(self):\n \"\"\"List the materials with known properties. The listed material names \n are keys in the materials properties dictionary. \n \"\"\"\n print('\\nThe materials with known dielectric properties are:\\n')\n pprint.pprint(mats.Electrical.props)\n print('\\nThe materials with known loss tangents are:\\n')\n pprint.pprint(mats.Electrical.props)\n return\n\n def sim_single_freq(self, frequency, polarization='s', theta_0=0):\n \"\"\"Run the model simulation for a single frequency.\n\n Arguments\n ---------\n frequency : float\n The frequency at which to evaluate the model (in Hz).\n polarization : string, optional\n The polarization of the source wave. Must be one of: 's', \n 'p', or 'u'. Default is 's'.\n \n ### NOTE ###\n I've chosen 's' polarization as the default because this \n simulator only handles normal incidence waves, and and at \n normal incidence 's' and 'p' are equivalent.\n theta_0 : float, optional\n The angle of incidence at the first interface.\n\n Returns\n -------\n result : dict\n dict = {\n 'T' : array; the total transmission through the model.\n 'R' : array; the total reflection through the model.\n }\n \"\"\"\n n = self._sort_ns()\n d = self._sort_ds()\n tan = self._sort_tans()\n k = self._find_ks(n, frequency, tan)\n delta = self._find_k_offsets(k, d)\n r, t = self._calc_R_T_amp(polarization, n, delta)\n T = self._get_T(polarization, t, n[0], n[-1])\n R = self._get_R(r)\n result = {'T': T, 'R': R}\n return result\n\n def snell(self, indices, theta_0):\n \"\"\"Caclulate the Snell angles for the entire model.\n\n Arguments\n ---------\n indices : list\n The list of indices of refraction for all elements in the model,\n ordered from source to terminator.\n theta_0 : float\n The angle of incidence at the first interface.\n \"\"\"\n return sp.arcsin(np.real_if_close(n_list[0] * np.sin(th_0) / n_list))\n\n\nclass MCMC:\n \"\"\"Contains the methods specific to ``emcee``, the MCMC Hammer, and helper\n methods to set up MCMC simulations and visualize the results.\n \"\"\"\n\n def __init__(self):\n self.name = 'blah'\n self.priors = []\n\n def __repr__(self):\n return '{} (MCMC object)'.format(self.name)\n\n def add_prior(self, layer_number, prior_type, low_bound, hi_bound,\n units='mil'):\n \"\"\"Add a prior to a part of the model in order to constrain the total\n simulation space. Can only place constraints on thickness and dielectric\n for now.\n\n Arguments\n ---------\n layer_number : int\n The position of the layer in the AR coating stack. Indexed from 1, so\n incident `vacuum` is 0 and first AR coating layer is 1.\n prior_type : string\n Flags the prior as either a cut to dielectric constant or thickness.\n One of 'thickness', 't', 'dielectric', or 'd'.\n low_bound : float\n The lower boundary of the range.\n hi_bound : float\n The higher boundary of the range.\n units : string, optional\n The units of the lower and upper bounds. Only applies to 'thickness'\n cuts because dielectric constants are unitless. Defaults to `mils`.\n \"\"\"\n prior = {'layer_number': layer_number, 'prior_type': prior_type,\n 'low_bound': low_bound, 'hi_bound': hi_bound, 'units': units}\n self.priors.append(prior)\n return\n\n def lnlikelihood(self):\n return\n\n def lnprior(self):\n \"\"\"Define the known prior attributes of the model in order to constrain\n the simulation space.\n \"\"\"\n return\n\n def lnprobability(self):\n \"\"\"The logspace sum of ``lnprior`` and ``lnlikelihood``.\n \"\"\"\n return\n\n def sort_priors(self):\n \"\"\"Sort the contents of ``self.prior`` by layer number\n \n Returns\n -------\n sorted_priors : list\n A list of priors sorted by layer number. If a layer has both\n thickness and dielectric priors, the thickness dielectric is first\n and the dielectric is second.\n \"\"\"\n return\n", "step-3": "<mask token>\n\n\nclass Layer:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_index(self):\n \"\"\"Return the refractive index of the layer.\"\"\"\n return np.sqrt(self.dielectric)\n <mask token>\n\n\nclass SourceLayer(Layer):\n \"\"\"A special case of ``Layer``; represents the layer from which the simulated wave \n emanates.\n\n Attributes\n ----------\n thickness : float\n The thickness of the source layer. Defaults to ``numpy.inf`` since the model\n doesn't care about the thickness of source layer. The thickness of the\n source layer should not be changed under normal operations.\n type : string\n The type of layer. Default is `Source`, which is an element of the model,\n but not the coating. Other acceptable types are `Layer` and `Terminator`.\n \"\"\"\n\n def __init__(self):\n Layer.__init__(self)\n self.thickness = np.inf\n self.type = 'Source'\n\n def __repr__(self):\n \"\"\"Return a nice string formatted representation of the layer.\"\"\"\n return '{} (source layer)'.format(self.name)\n\n\nclass SubstrateLayer(Layer):\n \"\"\"A special case of ``Layer``; represents the layer to which the AR coating is \n attached.\n\n Attributes\n ----------\n thickness : float\n The thickness of the substrate layer. Defaults to 250 mils, which is \n the typical thickness of a sample puck used in the Berkeley FTS setup.\n This may be changed as is necessary, but the units must (eventually) be\n converted to meters before being fed to the simulator.\n type : string\n The type of layer\n \"\"\"\n\n def __init__(self):\n Layer.__init__(self)\n self.thickness = 250.0\n self.type = 'Substrate'\n\n def __repr__(self):\n return '{} (substrate)'.format(self.name)\n\n\nclass TerminatorLayer(Layer):\n \"\"\"A special case of ``Layer``; represents the layer upon which the simulated wave \n terminates.\n\n Attributes\n ----------\n thickness : float\n The thickness of the terminating layer. Defaults to ``numpy.inf`` since\n the model doesn't care about the thickness of the terminating layer. \n The thickness of the terminating layer should not be changed under \n normal operations.\n type : string\n The type of layer. Default is `Terminator`, which is an element of the model,\n but not the coating. Other acceptable types are `Source` and `Layer`.\n \"\"\"\n\n def __init__(self):\n Layer.__init__(self)\n self.thickness = np.inf\n self.type = 'Terminator'\n\n def __repr__(self):\n \"\"\"Return a nice string formatted representation of the layer.\"\"\"\n return '{} (terminator layer)'.format(self.name)\n\n\nclass Builder:\n \"\"\"The main body of the simulator code.\n\n Attributes\n ----------\n bands : list\n A list of n tuples, with each tuple composed of a lower and upper limit\n for a frequency band in units of hertz. Default is the SPT-3G bands.\n freq_sweep : array\n The range of frequencies to be simulated. Defaults to 0. Set a frequency\n sweep by calling ``set_freq_sweep()``.\n optimization_frequency : float\n The frequency (in Hz) at which to calculate the ideal thickness for a given\n material. Defaults to 160e9 Hz (160 GHz).\n save_name : string\n The name under which the results of the simulation are saved. Defaults to\n 'transmission_data_XXXXX.txt' where `XXXXX` is a time-stamp to avoid\n overwriting previous simulation results.\n save_path : string\n The path to which the simulation results will be saved. Defaults to the \n current working directory.\n source : object\n ``Layer`` object ``SourceLayer`` that defines where the wave emanates from.\n Default is `None`.\n stack : list\n The user-defined layers incorporated in the simulation EXCEPT the source\n and terminator layers. Default is empty list.\n structure : list\n The layers incorporated in the simulation INCLUDING the source and\n terminator layers. Default is empty list. The list is populated \n by creating layers and calling ``_interconnect()``.\n terminator : object\n ``Layer`` object ``TerminatorLayer`` that defines where the wave terminates.\n Defaults is `None`.\n \"\"\"\n\n def __init__(self):\n self.bands = [(81700000000.0, 107500000000.0), (128600000000.0, \n 167200000000.0), (196900000000.0, 249200000000.0)]\n self.freq_sweep = 0.0\n self.log_name = 'log_simulation_{t}.txt'.format(t=time.ctime(time.\n time()))\n self.optimization_frequency = 160000000000.0\n self.save_name = 'transmission_data_{t}.txt'.format(t=time.ctime(\n time.time()))\n self.save_path = '.'\n self.source = None\n self.stack = []\n self.structure = []\n self.terminator = None\n\n def _calc_R_T_amp(self, polarization, n, delta):\n \"\"\"Calculate the reflected and transmitted amplitudes\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's', 'p', or 'u'.\n n : array\n An array of refractive indices, ordered from source to terminator\n delta : array\n An array of wavevector offsets\n \n Returns\n -------\n (r, t) : tuple\n A tuple where 'r' is the reflected amplitude, and 't' is the\n transmitted amplitude\n \"\"\"\n t_amp = np.zeros((len(self.structure), len(self.structure)), dtype=\n complex)\n r_amp = np.zeros((len(self.structure), len(self.structure)), dtype=\n complex)\n for i in range(len(self.structure) - 1):\n t_amp[i, i + 1] = self._t_at_interface(polarization, n[i], n[i + 1]\n )\n r_amp[i, i + 1] = self._r_at_interface(polarization, n[i], n[i + 1]\n )\n M = np.zeros((len(self.structure), 2, 2), dtype=complex)\n m_r_amp = np.zeros((len(self.structure), 2, 2), dtype=complex)\n m_t_amp = np.zeros((len(self.structure), 2, 2), dtype=complex)\n for i in range(1, len(self.structure) - 1):\n m_t_amp[i] = self._make_2x2(np.exp(-1.0j * delta[i]), 0.0, 0.0,\n np.exp(1.0j * delta[i]), dtype=complex)\n m_r_amp[i] = self._make_2x2(1.0, r_amp[i, i + 1], r_amp[i, i + \n 1], 1.0, dtype=complex)\n m_temp = np.dot(m_t_amp, m_r_amp)\n for i in range(1, len(self.structure) - 1):\n M[i] = 1 / t_amp[i, i + 1] * np.dot(self._make_2x2(np.exp(-1.0j *\n delta[i]), 0.0, 0.0, np.exp(1.0j * delta[i]), dtype=complex\n ), self._make_2x2(1.0, r_amp[i, i + 1], r_amp[i, i + 1], \n 1.0, dtype=complex))\n M_prime = self._make_2x2(1.0, 0.0, 0.0, 1.0, dtype=complex)\n for i in range(1, len(self.structure) - 1):\n M_prime = np.dot(M_prime, M[i])\n mod_M_prime = self._make_2x2(1.0, r_amp[0, 1], r_amp[0, 1], 1.0,\n dtype=complex) / t_amp[0, 1]\n M_prime = np.dot(self._make_2x2(1.0, r_amp[0, 1], r_amp[0, 1], 1.0,\n dtype=complex) / t_amp[0, 1], M_prime)\n t = 1 / M_prime[0, 0]\n r = M_prime[0, 1] / M_prime[0, 0]\n return r, t\n\n def _d_converter(self):\n \"\"\"Check the units of all elements in the connected ar coating\n stack. Convert the lengths of the layers to meters if they are\n not already in meters.\n \"\"\"\n units = {'um': 1e-06, 'mm': 0.001, 'inch': 0.0254, 'in': 0.0254,\n 'micron': 1e-06, 'mil': 2.54e-05, 'm': 1.0}\n for i in self.stack:\n i.thickness = i.thickness * units[i.units]\n return\n\n def _find_ks(self, n, frequency, tan, lossy=True):\n \"\"\"Calculate the wavenumbers.\n\n Arguments\n ---------\n n : array\n An array of refractive indices, ordered from source to\n terminator\n frequency : float\n The frequency at which to calculate the wavevector, k\n tan : array\n An array of loss tangents, ordered from vacuum to substrate\n lossy : boolean, optional\n If `True` the wavevector will be found for a lossy material.\n If `False` the wavevector will be found for lossless material.\n Default is `True`.\n Returns\n -------\n k : complex\n The complex wavenumber, k\n \"\"\"\n if lossy:\n k = 2 * np.pi * n * frequency * (1 + 0.5j * tan) / 300000000.0\n else:\n k = 2 * np.pi * n * frequency / 300000000.0\n return k\n\n def _find_k_offsets(self, k, d):\n \"\"\"Calculate the wavenumber offset, delta.\n\n Arguments\n ---------\n k : array\n The wavevector\n d : array\n An array of thicknesses, ordered from source to terminator\n\n Returns\n -------\n delta : array\n The wavenumber offset\n \"\"\"\n olderr = sp.seterr(invalid='ignore')\n delta = k * d\n sp.seterr(**olderr)\n return delta\n\n def _get_R(self, net_r_amp):\n \"\"\"Return fraction of reflected power.\n\n Arguments\n ---------\n net_r_amp : float\n The net reflection amplitude after calculating the transfer matrix.\n \"\"\"\n return np.abs(net_r_amp) ** 2\n\n def _get_T(self, polarization, net_t_amp, n_i, n_f, theta_i=0.0,\n theta_f=0.0):\n \"\"\"Return the fraction of transmitted power.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. One of: 's' or 'p'.\n net_t_amp : float\n The net transmission amplitude after calculating the transfer matrix.\n n_i : float\n The index of refraction of material 'i'.\n n_f : float\n The index of refraction of material 'f'.\n theta_i : float, optional\n The angle of incidence at interface 'i'. Default is 0.\n theta_f : float, optional\n The angle of incidence at interface 'f'. Default is 0.\n \"\"\"\n if polarization == 's':\n return np.abs(net_t_amp ** 2) * (n_f / n_i)\n elif polarization == 'p':\n return np.abs(net_t_amp ** 2) * (n_f / n_i)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _get_bandpass_stats(self):\n mean = []\n for band in self.bands:\n pass\n pass\n\n def _interconnect(self):\n \"\"\"Connect all the AR coating layer objects, ensuring that the source\n and terminator layers come first and last, respectively.\n \"\"\"\n self.clear_structure()\n self.structure.append(self.source)\n for i in range(len(self.stack)):\n self.structure.append(self.stack[i])\n self.structure.append(self.terminator)\n return\n\n def _make_2x2(self, A11, A12, A21, A22, dtype=float):\n \"\"\"Return a 2x2 array quickly.\n\n Arguments\n ---------\n A11 : float\n Array element [0,0].\n A12 : float\n Array element [0,1].\n A21 : float\n Array element [1,0].\n A22 : float\n Array element [1,1].\n dtype : dtype, optional\n The datatype of the array. Defaults to float.\n \"\"\"\n array = np.empty((2, 2), dtype=dtype)\n array[0, 0] = A11\n array[0, 1] = A12\n array[1, 0] = A21\n array[1, 1] = A22\n return array\n\n def _make_log(self):\n pass\n\n def _make_save_path(self, save_path, save_name):\n \"\"\"Assemble the file name and path to the results file.\n \n Returns\n -------\n path : string\n The full path to the save destination for the simulation results\n \"\"\"\n if save_name.endswith('.txt'):\n path = os.path.join(save_path, save_name)\n else:\n self.save_name = save_name + '.txt'\n path = os.path.join(save_path, save_name)\n return path\n\n def _r_at_interface(self, polarization, n_1, n_2):\n \"\"\"Calculate the reflected amplitude at an interface.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's' or 'p'.\n n_1 : float\n The index of refraction of the first material.\n n_2 : float\n The index of refraction of the second material.\n\n Returns\n -------\n reflected amplitude : float\n The amplitude of the reflected power\n \"\"\"\n if polarization == 's':\n return (n_1 - n_2) / (n_1 + n_2)\n elif polarization == 'p':\n return (n_1 - n_2) / (n_1 + n_2)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _sort_ns(self):\n \"\"\"Organize the refractive indices of the layers in the simulation.\n\n Returns\n -------\n n : array\n The ordered list of indices of refraction, from source to terminator\n \"\"\"\n n = []\n for layer in self.structure:\n n.append(layer.get_index())\n n = np.asarray(n)\n return n\n\n def _sort_ds(self):\n \"\"\"Organize the layers' thicknesses in the simulation.\n\n Returns\n -------\n d : array\n The ordered list of thicknesses, from source to terminator\n \"\"\"\n d = []\n for layer in self.structure:\n if layer.type == 'Layer' or layer.type == 'Substrate':\n d.append(layer.thickness)\n d.insert(0, self.structure[0].thickness)\n d.append(self.structure[-1].thickness)\n d = np.asarray(d)\n return d\n\n def _sort_tans(self):\n \"\"\"Organize the loss tangents of the layers in the simulation.\n\n Returns\n -------\n tan : array\n The ordered list of loss tangents, from source to terminator\n \"\"\"\n tan = []\n for layer in self.structure:\n tan.append(layer.losstangent)\n tan = np.asarray(tan)\n return tan\n\n def _t_at_interface(self, polarization, n_1, n_2):\n \"\"\"Calculate the transmission amplitude at an interface.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's' or 'p'.\n n_1 : float\n The index of refraction of the first material.\n n_2 : float\n The index of refraction of the second material.\n\n Returns\n -------\n transmitted_amplitude : float\n The amplitude of the transmitted power\n \"\"\"\n if polarization == 's':\n return 2 * n_1 / (n_1 + n_2)\n elif polarization == 'p':\n return 2 * n_1 / (n_1 + n_2)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _unpolarized_simulation(self, frequency, theta_0=0):\n \"\"\"Handle the special case of unpolarized light by running the model\n for both 's' and 'p' polarizations and computing the mean of the two\n results.\n\n Arguments\n ---------\n frequency : float\n The frequency (in Hz) at which to evaluate the model.\n theta_0 : float, optional\n The angle of incidence at the initial interface. Default is 0.\n \"\"\"\n s_data = self.simulate(frequency, 's', theta_0)\n p_data = self.simulate(frequency, 'p', theta_0)\n T = (s_data + p_data) / 2\n return T\n\n def add_layer(self, material, thickness=5.0, units='mil', type='layer',\n stack_position=-1):\n \"\"\"Create a layer from the set of pre-programmed materials and add it\n to the AR coating stack\n\n Arguments\n ---------\n material : string\n A key in the dictionary of materials found in materials.py.\n You can view these materials by calling\n 'show_materials()'.\n thickness : float, optional\n The thickness of the AR coating layer material. Assumed to\n be given in 'mil' (i.e. thousandths of an inch) unless\n otherwise stated. Default is 5.\n units : string, optional\n The units of length for the AR coating layer. Default is 'mil'.\n Must be one of:\n { 'mil', 'inch', 'mm', 'm', 'um', 'in', 'micron' }\n type : string, optional\n The layer type. Default is 'layer', which corresponds to\n an AR layer. Other options are 'source' or 'terminator', which\n correspond to source and terminator layers, respectively.\n stack_position : int, optional\n The position of the layer in the AR coating stack, indexed\n from 0. Default is -1 (i.e., layer is automatically added\n to the end (bottom?) of the stack.\n \"\"\"\n type = type.lower()\n if type == 'layer':\n layer = Layer()\n layer.name = material.lower()\n layer.thickness = thickness\n layer.units = units\n try:\n layer.dielectric = mats.Electrical.props[layer.name][0]\n except:\n raise KeyError(\"I don't know that material!\")\n try:\n layer.losstangent = mats.Electrical.props[layer.name][1]\n except:\n layer.losstangent = 0\n print(\"\\nI don't know this loss tangent. Setting loss to 0!\")\n if stack_position == -1:\n self.stack.append(layer)\n else:\n self.stack.insert(stack_position, layer)\n elif type == 'source':\n self.source = SourceLayer()\n self.source.name = material.lower()\n try:\n self.source.dielectric = mats.Electrical.props[self.source.name\n ][0]\n except:\n raise KeyError(\"I don't know that material!\")\n try:\n self.source.losstangent = mats.Electrical.props[self.source\n .name][1]\n except:\n self.source.losstangent = 0\n print(\"\\nI don't know this loss tangent. Setting loss to 0!\")\n elif type == 'terminator':\n self.terminator = TerminatorLayer()\n self.terminator.name = material.lower()\n try:\n self.terminator.dielectric = mats.Electrical.props[self.\n terminator.name][0]\n except:\n raise KeyError(\"I don't know that material!\")\n try:\n self.terminator.losstangent = mats.Electrical.props[self.\n terminator.name][1]\n except:\n self.terminator.losstangent = 0\n print(\"\\nI don't know this loss tangent. Setting loss to 0!\")\n else:\n raise ValueError('Type must be one of LAYER, SOURCE, or TERMINATOR'\n )\n return\n\n def add_custom_layer(self, material, thickness, units, dielectric,\n loss_tangent, stack_position=-1):\n \"\"\"Add a layer with custom properties to the AR stack.\n\n Arguments\n ---------\n material : string\n The name of the layer\n thickness : float\n The thickness of the layer\n units : string\n The units of length for the AR coating layer. Must be one of:\n { 'mil', 'inch', 'mm', 'm', 'um', 'in', 'micron' }\n dielectric : float\n The dielectric constant of the AR coating layer\n loss_tangent : float\n The loss tangent of the AR coating layer\n stack_position : int, optional\n The position of the layer in the AR coating stack, indexed\n from 0. Default is -1 (i.e., layer is automatically added\n to the end (bottom?) of the stack.\n \"\"\"\n layer = Layer()\n layer.units = units\n layer.thickness = thickness\n layer.dielectric = dielectric\n layer.losstangent = loss_tangent\n if stack_position == -1:\n self.stack.append(layer)\n else:\n self.stack.insert(stack_position, layer)\n return\n\n def display_sim_parameters(self):\n \"\"\"Display all the simulation parameters in one place.\"\"\"\n pprint.pprint(vars(self))\n return\n\n def clear_structure(self):\n \"\"\"Remove all elements from the current AR ``structure``.\"\"\"\n self.structure = []\n return\n\n def remove_layer(self, layer_pos):\n \"\"\"Remove the specified layer from the AR coating stack.\n\n Arguments\n ---------\n layer_pos : int\n The list index of the layer to remove from the AR coating stack\n \"\"\"\n self.stack.pop(layer_pos)\n return\n\n def run_sim(self):\n \"\"\"Take the attributes of the ``Builder()`` object and execute the\n simulation at each frequency in ``Builder().freq_sweep``. Save the\n output to a columnized, tab-separated text file.\n\n Returns\n -------\n transmission : array\n A three-element array. The first element is a list of\n frequencies, the second elements is a list of the\n transmissions at each frequency, and the third is a list of\n the reflections at each frequency.\n \"\"\"\n t0 = time.time()\n print('Beginning AR coating simulation')\n self._d_converter()\n self._interconnect()\n f_list = []\n t_list = []\n r_list = []\n for f in self.freq_sweep:\n results = self.sim_single_freq(f)\n f_list.append(f)\n t_list.append(results['T'])\n r_list.append(results['R'])\n fs = np.asarray(f_list)\n ts = np.asarray(t_list)\n rs = np.asarray(r_list)\n results = np.array([fs, ts, rs])\n t = time.ctime(time.time())\n data_name = self._make_save_path(self.save_path, self.save_name)\n header = (\n 'Frequency (Hz)\\t\\tTransmission amplitude\\t\\tReflection amplitude')\n with open(data_name, 'wb') as f:\n np.savetxt(f, np.c_[fs, ts, rs], delimiter='\\t', header=header)\n print('Finished running AR coating simulation')\n t1 = time.time()\n t_elapsed = t1 - t0\n print('Elapsed time: {t}s\\n'.format(t=t_elapsed))\n return results\n\n def set_freq_sweep(self, lower_bound, upper_bound, resolution=1, units=\n 'ghz'):\n \"\"\"Set the frequency range over which the simulation will run.\n \n Arguments\n ---------\n lower_bound : float\n The low end of the frequency range, given in GHz.\n upper_bound : float\n The high end of the frequency range, given in GHz.\n reolution : float, optional\n The interval at which to sample the frequency range, given in GHz.\n Defaults to 1 GHz.\n units : str\n The units of frequency. Must be one of:\n Hz, hz, KHz, khz, MHz, mhz, GHz, ghz\n \"\"\"\n convert = {'Hz': 1.0, 'hz': 1.0, 'KHz': 1000.0, 'khz': 1000.0,\n 'MHz': 1000000.0, 'mhz': 1000000.0, 'GHz': 1000000000.0, 'ghz':\n 1000000000.0}\n low = lower_bound * convert[units]\n high = upper_bound * convert[units]\n samples = (high - low) / resolution\n self.freq_sweep = np.linspace(low, high, samples)\n return\n\n def show_materials(self):\n \"\"\"List the materials with known properties. The listed material names \n are keys in the materials properties dictionary. \n \"\"\"\n print('\\nThe materials with known dielectric properties are:\\n')\n pprint.pprint(mats.Electrical.props)\n print('\\nThe materials with known loss tangents are:\\n')\n pprint.pprint(mats.Electrical.props)\n return\n\n def sim_single_freq(self, frequency, polarization='s', theta_0=0):\n \"\"\"Run the model simulation for a single frequency.\n\n Arguments\n ---------\n frequency : float\n The frequency at which to evaluate the model (in Hz).\n polarization : string, optional\n The polarization of the source wave. Must be one of: 's', \n 'p', or 'u'. Default is 's'.\n \n ### NOTE ###\n I've chosen 's' polarization as the default because this \n simulator only handles normal incidence waves, and and at \n normal incidence 's' and 'p' are equivalent.\n theta_0 : float, optional\n The angle of incidence at the first interface.\n\n Returns\n -------\n result : dict\n dict = {\n 'T' : array; the total transmission through the model.\n 'R' : array; the total reflection through the model.\n }\n \"\"\"\n n = self._sort_ns()\n d = self._sort_ds()\n tan = self._sort_tans()\n k = self._find_ks(n, frequency, tan)\n delta = self._find_k_offsets(k, d)\n r, t = self._calc_R_T_amp(polarization, n, delta)\n T = self._get_T(polarization, t, n[0], n[-1])\n R = self._get_R(r)\n result = {'T': T, 'R': R}\n return result\n\n def snell(self, indices, theta_0):\n \"\"\"Caclulate the Snell angles for the entire model.\n\n Arguments\n ---------\n indices : list\n The list of indices of refraction for all elements in the model,\n ordered from source to terminator.\n theta_0 : float\n The angle of incidence at the first interface.\n \"\"\"\n return sp.arcsin(np.real_if_close(n_list[0] * np.sin(th_0) / n_list))\n\n\nclass MCMC:\n \"\"\"Contains the methods specific to ``emcee``, the MCMC Hammer, and helper\n methods to set up MCMC simulations and visualize the results.\n \"\"\"\n\n def __init__(self):\n self.name = 'blah'\n self.priors = []\n\n def __repr__(self):\n return '{} (MCMC object)'.format(self.name)\n\n def add_prior(self, layer_number, prior_type, low_bound, hi_bound,\n units='mil'):\n \"\"\"Add a prior to a part of the model in order to constrain the total\n simulation space. Can only place constraints on thickness and dielectric\n for now.\n\n Arguments\n ---------\n layer_number : int\n The position of the layer in the AR coating stack. Indexed from 1, so\n incident `vacuum` is 0 and first AR coating layer is 1.\n prior_type : string\n Flags the prior as either a cut to dielectric constant or thickness.\n One of 'thickness', 't', 'dielectric', or 'd'.\n low_bound : float\n The lower boundary of the range.\n hi_bound : float\n The higher boundary of the range.\n units : string, optional\n The units of the lower and upper bounds. Only applies to 'thickness'\n cuts because dielectric constants are unitless. Defaults to `mils`.\n \"\"\"\n prior = {'layer_number': layer_number, 'prior_type': prior_type,\n 'low_bound': low_bound, 'hi_bound': hi_bound, 'units': units}\n self.priors.append(prior)\n return\n\n def lnlikelihood(self):\n return\n\n def lnprior(self):\n \"\"\"Define the known prior attributes of the model in order to constrain\n the simulation space.\n \"\"\"\n return\n\n def lnprobability(self):\n \"\"\"The logspace sum of ``lnprior`` and ``lnlikelihood``.\n \"\"\"\n return\n\n def sort_priors(self):\n \"\"\"Sort the contents of ``self.prior`` by layer number\n \n Returns\n -------\n sorted_priors : list\n A list of priors sorted by layer number. If a layer has both\n thickness and dielectric priors, the thickness dielectric is first\n and the dielectric is second.\n \"\"\"\n return\n", "step-4": "<mask token>\n\n\nclass Layer:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_index(self):\n \"\"\"Return the refractive index of the layer.\"\"\"\n return np.sqrt(self.dielectric)\n\n def ideal_thickness(self, opt_freq=160000000000.0):\n \"\"\"Return the ideal quarter wavelength thickness of the AR coating layer\n at a given optimization frequency.\n \n Arguments\n ---------\n opt_freq : float, optional\n The optimization frequency (in Hz) for the layers thickness. Defaults \n to 160 GHz.\n \"\"\"\n return 1 / np.sqrt(self.dielectric) * 300000000.0 / (4 * opt_freq)\n\n\nclass SourceLayer(Layer):\n \"\"\"A special case of ``Layer``; represents the layer from which the simulated wave \n emanates.\n\n Attributes\n ----------\n thickness : float\n The thickness of the source layer. Defaults to ``numpy.inf`` since the model\n doesn't care about the thickness of source layer. The thickness of the\n source layer should not be changed under normal operations.\n type : string\n The type of layer. Default is `Source`, which is an element of the model,\n but not the coating. Other acceptable types are `Layer` and `Terminator`.\n \"\"\"\n\n def __init__(self):\n Layer.__init__(self)\n self.thickness = np.inf\n self.type = 'Source'\n\n def __repr__(self):\n \"\"\"Return a nice string formatted representation of the layer.\"\"\"\n return '{} (source layer)'.format(self.name)\n\n\nclass SubstrateLayer(Layer):\n \"\"\"A special case of ``Layer``; represents the layer to which the AR coating is \n attached.\n\n Attributes\n ----------\n thickness : float\n The thickness of the substrate layer. Defaults to 250 mils, which is \n the typical thickness of a sample puck used in the Berkeley FTS setup.\n This may be changed as is necessary, but the units must (eventually) be\n converted to meters before being fed to the simulator.\n type : string\n The type of layer\n \"\"\"\n\n def __init__(self):\n Layer.__init__(self)\n self.thickness = 250.0\n self.type = 'Substrate'\n\n def __repr__(self):\n return '{} (substrate)'.format(self.name)\n\n\nclass TerminatorLayer(Layer):\n \"\"\"A special case of ``Layer``; represents the layer upon which the simulated wave \n terminates.\n\n Attributes\n ----------\n thickness : float\n The thickness of the terminating layer. Defaults to ``numpy.inf`` since\n the model doesn't care about the thickness of the terminating layer. \n The thickness of the terminating layer should not be changed under \n normal operations.\n type : string\n The type of layer. Default is `Terminator`, which is an element of the model,\n but not the coating. Other acceptable types are `Source` and `Layer`.\n \"\"\"\n\n def __init__(self):\n Layer.__init__(self)\n self.thickness = np.inf\n self.type = 'Terminator'\n\n def __repr__(self):\n \"\"\"Return a nice string formatted representation of the layer.\"\"\"\n return '{} (terminator layer)'.format(self.name)\n\n\nclass Builder:\n \"\"\"The main body of the simulator code.\n\n Attributes\n ----------\n bands : list\n A list of n tuples, with each tuple composed of a lower and upper limit\n for a frequency band in units of hertz. Default is the SPT-3G bands.\n freq_sweep : array\n The range of frequencies to be simulated. Defaults to 0. Set a frequency\n sweep by calling ``set_freq_sweep()``.\n optimization_frequency : float\n The frequency (in Hz) at which to calculate the ideal thickness for a given\n material. Defaults to 160e9 Hz (160 GHz).\n save_name : string\n The name under which the results of the simulation are saved. Defaults to\n 'transmission_data_XXXXX.txt' where `XXXXX` is a time-stamp to avoid\n overwriting previous simulation results.\n save_path : string\n The path to which the simulation results will be saved. Defaults to the \n current working directory.\n source : object\n ``Layer`` object ``SourceLayer`` that defines where the wave emanates from.\n Default is `None`.\n stack : list\n The user-defined layers incorporated in the simulation EXCEPT the source\n and terminator layers. Default is empty list.\n structure : list\n The layers incorporated in the simulation INCLUDING the source and\n terminator layers. Default is empty list. The list is populated \n by creating layers and calling ``_interconnect()``.\n terminator : object\n ``Layer`` object ``TerminatorLayer`` that defines where the wave terminates.\n Defaults is `None`.\n \"\"\"\n\n def __init__(self):\n self.bands = [(81700000000.0, 107500000000.0), (128600000000.0, \n 167200000000.0), (196900000000.0, 249200000000.0)]\n self.freq_sweep = 0.0\n self.log_name = 'log_simulation_{t}.txt'.format(t=time.ctime(time.\n time()))\n self.optimization_frequency = 160000000000.0\n self.save_name = 'transmission_data_{t}.txt'.format(t=time.ctime(\n time.time()))\n self.save_path = '.'\n self.source = None\n self.stack = []\n self.structure = []\n self.terminator = None\n\n def _calc_R_T_amp(self, polarization, n, delta):\n \"\"\"Calculate the reflected and transmitted amplitudes\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's', 'p', or 'u'.\n n : array\n An array of refractive indices, ordered from source to terminator\n delta : array\n An array of wavevector offsets\n \n Returns\n -------\n (r, t) : tuple\n A tuple where 'r' is the reflected amplitude, and 't' is the\n transmitted amplitude\n \"\"\"\n t_amp = np.zeros((len(self.structure), len(self.structure)), dtype=\n complex)\n r_amp = np.zeros((len(self.structure), len(self.structure)), dtype=\n complex)\n for i in range(len(self.structure) - 1):\n t_amp[i, i + 1] = self._t_at_interface(polarization, n[i], n[i + 1]\n )\n r_amp[i, i + 1] = self._r_at_interface(polarization, n[i], n[i + 1]\n )\n M = np.zeros((len(self.structure), 2, 2), dtype=complex)\n m_r_amp = np.zeros((len(self.structure), 2, 2), dtype=complex)\n m_t_amp = np.zeros((len(self.structure), 2, 2), dtype=complex)\n for i in range(1, len(self.structure) - 1):\n m_t_amp[i] = self._make_2x2(np.exp(-1.0j * delta[i]), 0.0, 0.0,\n np.exp(1.0j * delta[i]), dtype=complex)\n m_r_amp[i] = self._make_2x2(1.0, r_amp[i, i + 1], r_amp[i, i + \n 1], 1.0, dtype=complex)\n m_temp = np.dot(m_t_amp, m_r_amp)\n for i in range(1, len(self.structure) - 1):\n M[i] = 1 / t_amp[i, i + 1] * np.dot(self._make_2x2(np.exp(-1.0j *\n delta[i]), 0.0, 0.0, np.exp(1.0j * delta[i]), dtype=complex\n ), self._make_2x2(1.0, r_amp[i, i + 1], r_amp[i, i + 1], \n 1.0, dtype=complex))\n M_prime = self._make_2x2(1.0, 0.0, 0.0, 1.0, dtype=complex)\n for i in range(1, len(self.structure) - 1):\n M_prime = np.dot(M_prime, M[i])\n mod_M_prime = self._make_2x2(1.0, r_amp[0, 1], r_amp[0, 1], 1.0,\n dtype=complex) / t_amp[0, 1]\n M_prime = np.dot(self._make_2x2(1.0, r_amp[0, 1], r_amp[0, 1], 1.0,\n dtype=complex) / t_amp[0, 1], M_prime)\n t = 1 / M_prime[0, 0]\n r = M_prime[0, 1] / M_prime[0, 0]\n return r, t\n\n def _d_converter(self):\n \"\"\"Check the units of all elements in the connected ar coating\n stack. Convert the lengths of the layers to meters if they are\n not already in meters.\n \"\"\"\n units = {'um': 1e-06, 'mm': 0.001, 'inch': 0.0254, 'in': 0.0254,\n 'micron': 1e-06, 'mil': 2.54e-05, 'm': 1.0}\n for i in self.stack:\n i.thickness = i.thickness * units[i.units]\n return\n\n def _find_ks(self, n, frequency, tan, lossy=True):\n \"\"\"Calculate the wavenumbers.\n\n Arguments\n ---------\n n : array\n An array of refractive indices, ordered from source to\n terminator\n frequency : float\n The frequency at which to calculate the wavevector, k\n tan : array\n An array of loss tangents, ordered from vacuum to substrate\n lossy : boolean, optional\n If `True` the wavevector will be found for a lossy material.\n If `False` the wavevector will be found for lossless material.\n Default is `True`.\n Returns\n -------\n k : complex\n The complex wavenumber, k\n \"\"\"\n if lossy:\n k = 2 * np.pi * n * frequency * (1 + 0.5j * tan) / 300000000.0\n else:\n k = 2 * np.pi * n * frequency / 300000000.0\n return k\n\n def _find_k_offsets(self, k, d):\n \"\"\"Calculate the wavenumber offset, delta.\n\n Arguments\n ---------\n k : array\n The wavevector\n d : array\n An array of thicknesses, ordered from source to terminator\n\n Returns\n -------\n delta : array\n The wavenumber offset\n \"\"\"\n olderr = sp.seterr(invalid='ignore')\n delta = k * d\n sp.seterr(**olderr)\n return delta\n\n def _get_R(self, net_r_amp):\n \"\"\"Return fraction of reflected power.\n\n Arguments\n ---------\n net_r_amp : float\n The net reflection amplitude after calculating the transfer matrix.\n \"\"\"\n return np.abs(net_r_amp) ** 2\n\n def _get_T(self, polarization, net_t_amp, n_i, n_f, theta_i=0.0,\n theta_f=0.0):\n \"\"\"Return the fraction of transmitted power.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. One of: 's' or 'p'.\n net_t_amp : float\n The net transmission amplitude after calculating the transfer matrix.\n n_i : float\n The index of refraction of material 'i'.\n n_f : float\n The index of refraction of material 'f'.\n theta_i : float, optional\n The angle of incidence at interface 'i'. Default is 0.\n theta_f : float, optional\n The angle of incidence at interface 'f'. Default is 0.\n \"\"\"\n if polarization == 's':\n return np.abs(net_t_amp ** 2) * (n_f / n_i)\n elif polarization == 'p':\n return np.abs(net_t_amp ** 2) * (n_f / n_i)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _get_bandpass_stats(self):\n mean = []\n for band in self.bands:\n pass\n pass\n\n def _interconnect(self):\n \"\"\"Connect all the AR coating layer objects, ensuring that the source\n and terminator layers come first and last, respectively.\n \"\"\"\n self.clear_structure()\n self.structure.append(self.source)\n for i in range(len(self.stack)):\n self.structure.append(self.stack[i])\n self.structure.append(self.terminator)\n return\n\n def _make_2x2(self, A11, A12, A21, A22, dtype=float):\n \"\"\"Return a 2x2 array quickly.\n\n Arguments\n ---------\n A11 : float\n Array element [0,0].\n A12 : float\n Array element [0,1].\n A21 : float\n Array element [1,0].\n A22 : float\n Array element [1,1].\n dtype : dtype, optional\n The datatype of the array. Defaults to float.\n \"\"\"\n array = np.empty((2, 2), dtype=dtype)\n array[0, 0] = A11\n array[0, 1] = A12\n array[1, 0] = A21\n array[1, 1] = A22\n return array\n\n def _make_log(self):\n pass\n\n def _make_save_path(self, save_path, save_name):\n \"\"\"Assemble the file name and path to the results file.\n \n Returns\n -------\n path : string\n The full path to the save destination for the simulation results\n \"\"\"\n if save_name.endswith('.txt'):\n path = os.path.join(save_path, save_name)\n else:\n self.save_name = save_name + '.txt'\n path = os.path.join(save_path, save_name)\n return path\n\n def _r_at_interface(self, polarization, n_1, n_2):\n \"\"\"Calculate the reflected amplitude at an interface.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's' or 'p'.\n n_1 : float\n The index of refraction of the first material.\n n_2 : float\n The index of refraction of the second material.\n\n Returns\n -------\n reflected amplitude : float\n The amplitude of the reflected power\n \"\"\"\n if polarization == 's':\n return (n_1 - n_2) / (n_1 + n_2)\n elif polarization == 'p':\n return (n_1 - n_2) / (n_1 + n_2)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _sort_ns(self):\n \"\"\"Organize the refractive indices of the layers in the simulation.\n\n Returns\n -------\n n : array\n The ordered list of indices of refraction, from source to terminator\n \"\"\"\n n = []\n for layer in self.structure:\n n.append(layer.get_index())\n n = np.asarray(n)\n return n\n\n def _sort_ds(self):\n \"\"\"Organize the layers' thicknesses in the simulation.\n\n Returns\n -------\n d : array\n The ordered list of thicknesses, from source to terminator\n \"\"\"\n d = []\n for layer in self.structure:\n if layer.type == 'Layer' or layer.type == 'Substrate':\n d.append(layer.thickness)\n d.insert(0, self.structure[0].thickness)\n d.append(self.structure[-1].thickness)\n d = np.asarray(d)\n return d\n\n def _sort_tans(self):\n \"\"\"Organize the loss tangents of the layers in the simulation.\n\n Returns\n -------\n tan : array\n The ordered list of loss tangents, from source to terminator\n \"\"\"\n tan = []\n for layer in self.structure:\n tan.append(layer.losstangent)\n tan = np.asarray(tan)\n return tan\n\n def _t_at_interface(self, polarization, n_1, n_2):\n \"\"\"Calculate the transmission amplitude at an interface.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's' or 'p'.\n n_1 : float\n The index of refraction of the first material.\n n_2 : float\n The index of refraction of the second material.\n\n Returns\n -------\n transmitted_amplitude : float\n The amplitude of the transmitted power\n \"\"\"\n if polarization == 's':\n return 2 * n_1 / (n_1 + n_2)\n elif polarization == 'p':\n return 2 * n_1 / (n_1 + n_2)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _unpolarized_simulation(self, frequency, theta_0=0):\n \"\"\"Handle the special case of unpolarized light by running the model\n for both 's' and 'p' polarizations and computing the mean of the two\n results.\n\n Arguments\n ---------\n frequency : float\n The frequency (in Hz) at which to evaluate the model.\n theta_0 : float, optional\n The angle of incidence at the initial interface. Default is 0.\n \"\"\"\n s_data = self.simulate(frequency, 's', theta_0)\n p_data = self.simulate(frequency, 'p', theta_0)\n T = (s_data + p_data) / 2\n return T\n\n def add_layer(self, material, thickness=5.0, units='mil', type='layer',\n stack_position=-1):\n \"\"\"Create a layer from the set of pre-programmed materials and add it\n to the AR coating stack\n\n Arguments\n ---------\n material : string\n A key in the dictionary of materials found in materials.py.\n You can view these materials by calling\n 'show_materials()'.\n thickness : float, optional\n The thickness of the AR coating layer material. Assumed to\n be given in 'mil' (i.e. thousandths of an inch) unless\n otherwise stated. Default is 5.\n units : string, optional\n The units of length for the AR coating layer. Default is 'mil'.\n Must be one of:\n { 'mil', 'inch', 'mm', 'm', 'um', 'in', 'micron' }\n type : string, optional\n The layer type. Default is 'layer', which corresponds to\n an AR layer. Other options are 'source' or 'terminator', which\n correspond to source and terminator layers, respectively.\n stack_position : int, optional\n The position of the layer in the AR coating stack, indexed\n from 0. Default is -1 (i.e., layer is automatically added\n to the end (bottom?) of the stack.\n \"\"\"\n type = type.lower()\n if type == 'layer':\n layer = Layer()\n layer.name = material.lower()\n layer.thickness = thickness\n layer.units = units\n try:\n layer.dielectric = mats.Electrical.props[layer.name][0]\n except:\n raise KeyError(\"I don't know that material!\")\n try:\n layer.losstangent = mats.Electrical.props[layer.name][1]\n except:\n layer.losstangent = 0\n print(\"\\nI don't know this loss tangent. Setting loss to 0!\")\n if stack_position == -1:\n self.stack.append(layer)\n else:\n self.stack.insert(stack_position, layer)\n elif type == 'source':\n self.source = SourceLayer()\n self.source.name = material.lower()\n try:\n self.source.dielectric = mats.Electrical.props[self.source.name\n ][0]\n except:\n raise KeyError(\"I don't know that material!\")\n try:\n self.source.losstangent = mats.Electrical.props[self.source\n .name][1]\n except:\n self.source.losstangent = 0\n print(\"\\nI don't know this loss tangent. Setting loss to 0!\")\n elif type == 'terminator':\n self.terminator = TerminatorLayer()\n self.terminator.name = material.lower()\n try:\n self.terminator.dielectric = mats.Electrical.props[self.\n terminator.name][0]\n except:\n raise KeyError(\"I don't know that material!\")\n try:\n self.terminator.losstangent = mats.Electrical.props[self.\n terminator.name][1]\n except:\n self.terminator.losstangent = 0\n print(\"\\nI don't know this loss tangent. Setting loss to 0!\")\n else:\n raise ValueError('Type must be one of LAYER, SOURCE, or TERMINATOR'\n )\n return\n\n def add_custom_layer(self, material, thickness, units, dielectric,\n loss_tangent, stack_position=-1):\n \"\"\"Add a layer with custom properties to the AR stack.\n\n Arguments\n ---------\n material : string\n The name of the layer\n thickness : float\n The thickness of the layer\n units : string\n The units of length for the AR coating layer. Must be one of:\n { 'mil', 'inch', 'mm', 'm', 'um', 'in', 'micron' }\n dielectric : float\n The dielectric constant of the AR coating layer\n loss_tangent : float\n The loss tangent of the AR coating layer\n stack_position : int, optional\n The position of the layer in the AR coating stack, indexed\n from 0. Default is -1 (i.e., layer is automatically added\n to the end (bottom?) of the stack.\n \"\"\"\n layer = Layer()\n layer.units = units\n layer.thickness = thickness\n layer.dielectric = dielectric\n layer.losstangent = loss_tangent\n if stack_position == -1:\n self.stack.append(layer)\n else:\n self.stack.insert(stack_position, layer)\n return\n\n def display_sim_parameters(self):\n \"\"\"Display all the simulation parameters in one place.\"\"\"\n pprint.pprint(vars(self))\n return\n\n def clear_structure(self):\n \"\"\"Remove all elements from the current AR ``structure``.\"\"\"\n self.structure = []\n return\n\n def remove_layer(self, layer_pos):\n \"\"\"Remove the specified layer from the AR coating stack.\n\n Arguments\n ---------\n layer_pos : int\n The list index of the layer to remove from the AR coating stack\n \"\"\"\n self.stack.pop(layer_pos)\n return\n\n def run_sim(self):\n \"\"\"Take the attributes of the ``Builder()`` object and execute the\n simulation at each frequency in ``Builder().freq_sweep``. Save the\n output to a columnized, tab-separated text file.\n\n Returns\n -------\n transmission : array\n A three-element array. The first element is a list of\n frequencies, the second elements is a list of the\n transmissions at each frequency, and the third is a list of\n the reflections at each frequency.\n \"\"\"\n t0 = time.time()\n print('Beginning AR coating simulation')\n self._d_converter()\n self._interconnect()\n f_list = []\n t_list = []\n r_list = []\n for f in self.freq_sweep:\n results = self.sim_single_freq(f)\n f_list.append(f)\n t_list.append(results['T'])\n r_list.append(results['R'])\n fs = np.asarray(f_list)\n ts = np.asarray(t_list)\n rs = np.asarray(r_list)\n results = np.array([fs, ts, rs])\n t = time.ctime(time.time())\n data_name = self._make_save_path(self.save_path, self.save_name)\n header = (\n 'Frequency (Hz)\\t\\tTransmission amplitude\\t\\tReflection amplitude')\n with open(data_name, 'wb') as f:\n np.savetxt(f, np.c_[fs, ts, rs], delimiter='\\t', header=header)\n print('Finished running AR coating simulation')\n t1 = time.time()\n t_elapsed = t1 - t0\n print('Elapsed time: {t}s\\n'.format(t=t_elapsed))\n return results\n\n def set_freq_sweep(self, lower_bound, upper_bound, resolution=1, units=\n 'ghz'):\n \"\"\"Set the frequency range over which the simulation will run.\n \n Arguments\n ---------\n lower_bound : float\n The low end of the frequency range, given in GHz.\n upper_bound : float\n The high end of the frequency range, given in GHz.\n reolution : float, optional\n The interval at which to sample the frequency range, given in GHz.\n Defaults to 1 GHz.\n units : str\n The units of frequency. Must be one of:\n Hz, hz, KHz, khz, MHz, mhz, GHz, ghz\n \"\"\"\n convert = {'Hz': 1.0, 'hz': 1.0, 'KHz': 1000.0, 'khz': 1000.0,\n 'MHz': 1000000.0, 'mhz': 1000000.0, 'GHz': 1000000000.0, 'ghz':\n 1000000000.0}\n low = lower_bound * convert[units]\n high = upper_bound * convert[units]\n samples = (high - low) / resolution\n self.freq_sweep = np.linspace(low, high, samples)\n return\n\n def show_materials(self):\n \"\"\"List the materials with known properties. The listed material names \n are keys in the materials properties dictionary. \n \"\"\"\n print('\\nThe materials with known dielectric properties are:\\n')\n pprint.pprint(mats.Electrical.props)\n print('\\nThe materials with known loss tangents are:\\n')\n pprint.pprint(mats.Electrical.props)\n return\n\n def sim_single_freq(self, frequency, polarization='s', theta_0=0):\n \"\"\"Run the model simulation for a single frequency.\n\n Arguments\n ---------\n frequency : float\n The frequency at which to evaluate the model (in Hz).\n polarization : string, optional\n The polarization of the source wave. Must be one of: 's', \n 'p', or 'u'. Default is 's'.\n \n ### NOTE ###\n I've chosen 's' polarization as the default because this \n simulator only handles normal incidence waves, and and at \n normal incidence 's' and 'p' are equivalent.\n theta_0 : float, optional\n The angle of incidence at the first interface.\n\n Returns\n -------\n result : dict\n dict = {\n 'T' : array; the total transmission through the model.\n 'R' : array; the total reflection through the model.\n }\n \"\"\"\n n = self._sort_ns()\n d = self._sort_ds()\n tan = self._sort_tans()\n k = self._find_ks(n, frequency, tan)\n delta = self._find_k_offsets(k, d)\n r, t = self._calc_R_T_amp(polarization, n, delta)\n T = self._get_T(polarization, t, n[0], n[-1])\n R = self._get_R(r)\n result = {'T': T, 'R': R}\n return result\n\n def snell(self, indices, theta_0):\n \"\"\"Caclulate the Snell angles for the entire model.\n\n Arguments\n ---------\n indices : list\n The list of indices of refraction for all elements in the model,\n ordered from source to terminator.\n theta_0 : float\n The angle of incidence at the first interface.\n \"\"\"\n return sp.arcsin(np.real_if_close(n_list[0] * np.sin(th_0) / n_list))\n\n\nclass MCMC:\n \"\"\"Contains the methods specific to ``emcee``, the MCMC Hammer, and helper\n methods to set up MCMC simulations and visualize the results.\n \"\"\"\n\n def __init__(self):\n self.name = 'blah'\n self.priors = []\n\n def __repr__(self):\n return '{} (MCMC object)'.format(self.name)\n\n def add_prior(self, layer_number, prior_type, low_bound, hi_bound,\n units='mil'):\n \"\"\"Add a prior to a part of the model in order to constrain the total\n simulation space. Can only place constraints on thickness and dielectric\n for now.\n\n Arguments\n ---------\n layer_number : int\n The position of the layer in the AR coating stack. Indexed from 1, so\n incident `vacuum` is 0 and first AR coating layer is 1.\n prior_type : string\n Flags the prior as either a cut to dielectric constant or thickness.\n One of 'thickness', 't', 'dielectric', or 'd'.\n low_bound : float\n The lower boundary of the range.\n hi_bound : float\n The higher boundary of the range.\n units : string, optional\n The units of the lower and upper bounds. Only applies to 'thickness'\n cuts because dielectric constants are unitless. Defaults to `mils`.\n \"\"\"\n prior = {'layer_number': layer_number, 'prior_type': prior_type,\n 'low_bound': low_bound, 'hi_bound': hi_bound, 'units': units}\n self.priors.append(prior)\n return\n\n def lnlikelihood(self):\n return\n\n def lnprior(self):\n \"\"\"Define the known prior attributes of the model in order to constrain\n the simulation space.\n \"\"\"\n return\n\n def lnprobability(self):\n \"\"\"The logspace sum of ``lnprior`` and ``lnlikelihood``.\n \"\"\"\n return\n\n def sort_priors(self):\n \"\"\"Sort the contents of ``self.prior`` by layer number\n \n Returns\n -------\n sorted_priors : list\n A list of priors sorted by layer number. If a layer has both\n thickness and dielectric priors, the thickness dielectric is first\n and the dielectric is second.\n \"\"\"\n return\n", "step-5": "\"\"\"\nSimulator contains the tools needed to set up a multilayer antireflection\ncoating simulation.\n\nBased on transfer matrix method outlined in Hou, H.S. 1974.\n\"\"\"\n\n# Author: Andrew Nadolski (with lots of help from previous work by Colin Merkel,\n# Steve Byrnes, and Aritoki Suzuki)\n# Filename: simulator.py\n\n\nimport glob\nimport os\nimport pprint\nimport time\nimport materials as mats\nimport numpy as np\nimport scipy as sp\n\n\nclass Layer:\n \"\"\"A layer in the AR coating.\n\n Attributes\n ----------\n name : string\n The name of the material comprising the layer. Default is 'Generic layer'\n thickness : float\n The thickness of the layer material. Default is 5 mil.\n type : string\n The type of layer. Default is `Layer`, which is an element of the AR\n coating. Other acceptable types are `Source` and `Terminator`.\n dielectric : float\n The dielectric constant of the layer material. Default is 1.\n losstangent : float\n The loss tangent of the material. Default is 0.\n \"\"\"\n def __init__(self):\n self.name = 'Generic layer'\n self.thickness = 5.\n self.type = 'Layer'\n self.units = 'mil'\n self.dielectric = 1.\n self.losstangent = 0.\n\n def __repr__(self):\n \"\"\"Return a nice string formatted representation of the layer.\"\"\"\n return '{} (AR layer)'.format(self.name)\n\n def display_layer_parameters(self):\n \"\"\"Display the attributes of the layer.\"\"\"\n pprint.pprint(vars(self))\n return\n\n def get_index(self):\n \"\"\"Return the refractive index of the layer.\"\"\"\n return (np.sqrt(self.dielectric))\n\n def ideal_thickness(self, opt_freq=160e9):\n \"\"\"Return the ideal quarter wavelength thickness of the AR coating layer\n at a given optimization frequency.\n \n Arguments\n ---------\n opt_freq : float, optional\n The optimization frequency (in Hz) for the layers thickness. Defaults \n to 160 GHz.\n \"\"\"\n return (1/np.sqrt(self.dielectric)*3e8/(4*opt_freq))\n\n\nclass SourceLayer(Layer):\n \"\"\"A special case of ``Layer``; represents the layer from which the simulated wave \n emanates.\n\n Attributes\n ----------\n thickness : float\n The thickness of the source layer. Defaults to ``numpy.inf`` since the model\n doesn't care about the thickness of source layer. The thickness of the\n source layer should not be changed under normal operations.\n type : string\n The type of layer. Default is `Source`, which is an element of the model,\n but not the coating. Other acceptable types are `Layer` and `Terminator`.\n \"\"\"\n def __init__(self):\n Layer.__init__(self)\n self.thickness = np.inf\n self.type = 'Source'\n\n def __repr__(self):\n \"\"\"Return a nice string formatted representation of the layer.\"\"\"\n return '{} (source layer)'.format(self.name)\n\n\nclass SubstrateLayer(Layer):\n \"\"\"A special case of ``Layer``; represents the layer to which the AR coating is \n attached.\n\n Attributes\n ----------\n thickness : float\n The thickness of the substrate layer. Defaults to 250 mils, which is \n the typical thickness of a sample puck used in the Berkeley FTS setup.\n This may be changed as is necessary, but the units must (eventually) be\n converted to meters before being fed to the simulator.\n type : string\n The type of layer\n \"\"\"\n def __init__(self):\n Layer.__init__(self)\n self.thickness = 250.\n self.type = 'Substrate'\n \n def __repr__(self):\n return '{} (substrate)'.format(self.name)\n\n\nclass TerminatorLayer(Layer):\n \"\"\"A special case of ``Layer``; represents the layer upon which the simulated wave \n terminates.\n\n Attributes\n ----------\n thickness : float\n The thickness of the terminating layer. Defaults to ``numpy.inf`` since\n the model doesn't care about the thickness of the terminating layer. \n The thickness of the terminating layer should not be changed under \n normal operations.\n type : string\n The type of layer. Default is `Terminator`, which is an element of the model,\n but not the coating. Other acceptable types are `Source` and `Layer`.\n \"\"\"\n def __init__(self):\n Layer.__init__(self)\n self.thickness = np.inf\n self.type = 'Terminator'\n\n def __repr__(self):\n \"\"\"Return a nice string formatted representation of the layer.\"\"\"\n return '{} (terminator layer)'.format(self.name)\n\n\nclass Builder:\n \"\"\"The main body of the simulator code.\n\n Attributes\n ----------\n bands : list\n A list of n tuples, with each tuple composed of a lower and upper limit\n for a frequency band in units of hertz. Default is the SPT-3G bands.\n freq_sweep : array\n The range of frequencies to be simulated. Defaults to 0. Set a frequency\n sweep by calling ``set_freq_sweep()``.\n optimization_frequency : float\n The frequency (in Hz) at which to calculate the ideal thickness for a given\n material. Defaults to 160e9 Hz (160 GHz).\n save_name : string\n The name under which the results of the simulation are saved. Defaults to\n 'transmission_data_XXXXX.txt' where `XXXXX` is a time-stamp to avoid\n overwriting previous simulation results.\n save_path : string\n The path to which the simulation results will be saved. Defaults to the \n current working directory.\n source : object\n ``Layer`` object ``SourceLayer`` that defines where the wave emanates from.\n Default is `None`.\n stack : list\n The user-defined layers incorporated in the simulation EXCEPT the source\n and terminator layers. Default is empty list.\n structure : list\n The layers incorporated in the simulation INCLUDING the source and\n terminator layers. Default is empty list. The list is populated \n by creating layers and calling ``_interconnect()``.\n terminator : object\n ``Layer`` object ``TerminatorLayer`` that defines where the wave terminates.\n Defaults is `None`.\n \"\"\"\n def __init__(self):\n self.bands = [(81.7e9, 107.5e9),(128.6e9, 167.2e9),(196.9e9, 249.2e9)]\n self.freq_sweep = 0.\n self.log_name = 'log_simulation_{t}.txt'.format(t=time.ctime(time.time()))\n self.optimization_frequency = 160e9 # given in Hz, i.e. 160 GHz\n self.save_name = 'transmission_data_{t}.txt'.format(t=time.ctime(time.time()))\n self.save_path = '.'\n self.source = None\n self.stack = []\n self.structure = []\n self.terminator = None\n\n def _calc_R_T_amp(self, polarization, n, delta):\n \"\"\"Calculate the reflected and transmitted amplitudes\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's', 'p', or 'u'.\n n : array\n An array of refractive indices, ordered from source to terminator\n delta : array\n An array of wavevector offsets\n \n Returns\n -------\n (r, t) : tuple\n A tuple where 'r' is the reflected amplitude, and 't' is the\n transmitted amplitude\n \"\"\"\n t_amp = np.zeros((len(self.structure), len(self.structure)), dtype=complex)\n r_amp = np.zeros((len(self.structure), len(self.structure)), dtype=complex)\n# # debugging statement\n# print(\"\\nr_amp is:\")\n# for i in range(len(self.structure)):\n# for j in range(len(self.structure)):\n# print(\"{}{} {}\".format(i,j,r_amp[i][j]))\n# # debugging statement\n# print(\"\\nt_amp is:\")\n# for i in range(len(self.structure)):\n# for j in range(len(self.structure)):\n# print(\"{}{} {}\".format(i,j,t_amp[i][j]))\n\n for i in range(len(self.structure)-1):\n t_amp[i,i+1] = self._t_at_interface(polarization, n[i], n[i+1])\n r_amp[i,i+1] = self._r_at_interface(polarization, n[i], n[i+1])\n# # debugging statement\n# print(\"\\nmod r_amp is:\")\n# for i in range(len(self.structure)):\n# for j in range(len(self.structure)):\n# print(\"{}{} {}\".format(i,j,r_amp[i][j]))\n# # debugging statement\n# print(\"\\nmod t_amp is:\")\n# for i in range(len(self.structure)):\n# for j in range(len(self.structure)):\n# print(\"{}{} {}\".format(i,j,t_amp[i][j]))\n\n M = np.zeros((len(self.structure),2,2),dtype=complex)\n# # debugging statement\n# print(\"\\nThe 'M' matrix is:\")\n# for i in range(len(self.structure)):\n# for j in range(2):\n# for k in range(2):\n# print(\"M{}{}{} ---> {}\".format(i,j,k,M[i][j][k]))\n\n m_r_amp = np.zeros((len(self.structure),2,2), dtype=complex)\n m_t_amp = np.zeros((len(self.structure),2,2), dtype=complex)\n for i in range(1,len(self.structure)-1):\n m_t_amp[i] = self._make_2x2(np.exp(-1j*delta[i]), 0., 0., np.exp(1j*delta[i]), dtype=complex)\n m_r_amp[i] = self._make_2x2(1., r_amp[i,i+1], r_amp[i,i+1], 1., dtype=complex)\n\n# # debugging statement\n# print(\"\\nThe temporary 'm_r_amp' matrix is:\")\n# for i in range(len(self.structure)):\n# for j in range(2):\n# for k in range(2):\n# print(\"m_r_amp{}{}{} ---> {}\".format(i,j,k,m_r_amp[i][j][k]))\n\n# # debugging statement\n# print(\"\\nThe temporary 'm_t_amp' matrix is:\")\n# for i in range(len(self.structure)):\n# for j in range(2):\n# for k in range(2):\n# print(\"m_t_amp{}{}{} ---> {}\".format(i,j,k,m_t_amp[i][j][k]))\n\n m_temp = np.dot(m_t_amp, m_r_amp)\n\n# # debugging statement\n# print(\"\\nThe 'm_temp' matrix is:\")\n# for i in m_temp:\n# print i\n# for i in range(len(self.structure)):\n# for j in range(2):\n# for k in range(2):\n# print(\"m_temp{}{}{} ---> {}\".format(i,j,k,m_temp[i][j][k]))\n\n for i in range(1,len(self.structure)-1):\n M[i] = 1/t_amp[i,i+1] * np.dot(self._make_2x2(np.exp(-1j*delta[i]),\n 0., 0., np.exp(1j*delta[i]),\n dtype=complex),\n self._make_2x2(1., r_amp[i,i+1], \\\n r_amp[i,i+1], 1., \\\n dtype=complex))\n# # debugging statement\n# print(\"\\nThe modified 'M' matrix is:\")\n# for i in range(len(self.structure)):\n# for j in range(2):\n# for k in range(2):\n# print(\"mod M{}{}{} ---> {}\".format(i,j,k,M[i][j][k]))\n\n M_prime = self._make_2x2(1., 0., 0., 1., dtype=complex)\n\n# # debugging statement\n# print(\"\\nThe first modified 'M_prime' matrix is:\")\n# for i in range(2):\n# for j in range(2):\n# print(\"1st mod M_prime{}{} ---> {}\".format(i,j,M_prime[i][j]))\n\n for i in range(1, len(self.structure)-1):\n# print(\"\\n'M_prime' #{} is:\\n{}\".format(i,M_prime))\n M_prime = np.dot(M_prime, M[i])\n\n# # debugging statement\n# print(\"\\nThe second modified 'M_prime' matrix is:\")\n# for i in range(2):\n# for j in range(2):\n# print(\"2nd mod M_prime{}{} ---> {}\".format(i,j,M_prime[i][j]))\n\n# print(\"\\nr_amp01 is ---> {}\".format(r_amp[0,1]))\n# print(\"t_amp01 is ---> {}\".format(t_amp[0,1]))\n\n mod_M_prime = self._make_2x2(1.,r_amp[0,1], r_amp[0,1], 1., dtype=complex)/t_amp[0,1]\n\n# # debugging statement\n# print(\"\\nThe third modified 'M_prime' matrix is:\")\n# for i in range(2):\n# for j in range(2):\n# print(\"3rd mod M_prime{}{} ---> {}\".format(i, j, mod_M_prime[i][j]))\n\n M_prime = np.dot(self._make_2x2(1., r_amp[0,1], r_amp[0,1], 1., \\\n dtype=complex)/t_amp[0,1], M_prime)\n\n# # debugging statement\n# print(\"\\nThe 'M_final' matrix is:\")\n# for i in range(2):\n# for j in range(2):\n# print(\"M_final{}{} ---> {}\".format(i, j, M_prime[i][j]))\n\n t = 1/M_prime[0,0]\n r = M_prime[0,1]/M_prime[0,0]\n\n# # debugging statement\n# print(\"\\n't' ---> {}\".format(t))\n# print(\"'r' ---> {}\".format(r))\n\n return (r, t)\n\n def _d_converter(self):\n \"\"\"Check the units of all elements in the connected ar coating\n stack. Convert the lengths of the layers to meters if they are\n not already in meters.\n \"\"\"\n units = {'um':1e-6, 'mm':1e-3, 'inch':2.54e-2, 'in':2.54e-2,\\\n 'micron':1e-6, 'mil':2.54e-5, 'm':1.0}\n for i in self.stack:\n i.thickness = i.thickness*units[i.units]\n return\n \n def _find_ks(self, n, frequency, tan, lossy=True):\n \"\"\"Calculate the wavenumbers.\n\n Arguments\n ---------\n n : array\n An array of refractive indices, ordered from source to\n terminator\n frequency : float\n The frequency at which to calculate the wavevector, k\n tan : array\n An array of loss tangents, ordered from vacuum to substrate\n lossy : boolean, optional\n If `True` the wavevector will be found for a lossy material.\n If `False` the wavevector will be found for lossless material.\n Default is `True`.\n Returns\n -------\n k : complex\n The complex wavenumber, k\n \"\"\"\n if lossy:\n k = 2*np.pi*n*frequency*(1+0.5j*tan)/3e8 # New expression for loss (as of 9/13/16), this one is more physical (i.e. subtractive)\n# k = 2*np.pi*n*frequency*(1-0.5j*tan)/3e8 # Original expression for loss (pre 9/13/16), but it is incorrectly ADDITIVE\n else:\n k = 2*np.pi*n*frequency/3e8\n return k\n\n def _find_k_offsets(self, k, d):\n \"\"\"Calculate the wavenumber offset, delta.\n\n Arguments\n ---------\n k : array\n The wavevector\n d : array\n An array of thicknesses, ordered from source to terminator\n\n Returns\n -------\n delta : array\n The wavenumber offset\n \"\"\"\n olderr = sp.seterr(invalid= 'ignore') # turn off 'invalid multiplication' error;\n # it's just the 'inf' boundaries\n delta = k * d\n sp.seterr(**olderr) # turn the error back on\n return delta\n\n def _get_R(self, net_r_amp):\n \"\"\"Return fraction of reflected power.\n\n Arguments\n ---------\n net_r_amp : float\n The net reflection amplitude after calculating the transfer matrix.\n \"\"\"\n return np.abs(net_r_amp)**2\n\n def _get_T(self, polarization, net_t_amp, n_i, n_f, theta_i=0., theta_f=0.):\n \"\"\"Return the fraction of transmitted power.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. One of: 's' or 'p'.\n net_t_amp : float\n The net transmission amplitude after calculating the transfer matrix.\n n_i : float\n The index of refraction of material 'i'.\n n_f : float\n The index of refraction of material 'f'.\n theta_i : float, optional\n The angle of incidence at interface 'i'. Default is 0.\n theta_f : float, optional\n The angle of incidence at interface 'f'. Default is 0.\n \"\"\"\n if (polarization=='s'):\n return np.abs(net_t_amp**2) * (n_f/n_i)\n elif (polarization=='p'):\n return np.abs(net_t_amp**2) * (n_f/n_i)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _get_bandpass_stats(self):\n mean = []\n for band in self.bands:\n pass\n pass\n\n def _interconnect(self):\n \"\"\"Connect all the AR coating layer objects, ensuring that the source\n and terminator layers come first and last, respectively.\n \"\"\"\n self.clear_structure()\n self.structure.append(self.source)\n for i in range(len(self.stack)):\n self.structure.append(self.stack[i])\n self.structure.append(self.terminator)\n return\n\n def _make_2x2(self, A11, A12, A21, A22, dtype=float):\n \"\"\"Return a 2x2 array quickly.\n\n Arguments\n ---------\n A11 : float\n Array element [0,0].\n A12 : float\n Array element [0,1].\n A21 : float\n Array element [1,0].\n A22 : float\n Array element [1,1].\n dtype : dtype, optional\n The datatype of the array. Defaults to float.\n \"\"\"\n array = np.empty((2,2), dtype=dtype)\n array[0,0] = A11\n array[0,1] = A12\n array[1,0] = A21\n array[1,1] = A22\n return array\n\n def _make_log(self):\n pass\n\n def _make_save_path(self, save_path, save_name):\n \"\"\"Assemble the file name and path to the results file.\n \n Returns\n -------\n path : string\n The full path to the save destination for the simulation results\n \"\"\"\n if save_name.endswith('.txt'):\n path = os.path.join(save_path, save_name)\n else:\n self.save_name = save_name+'.txt'\n path = os.path.join(save_path, save_name)\n return path\n\n def _r_at_interface(self, polarization, n_1, n_2):\n \"\"\"Calculate the reflected amplitude at an interface.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's' or 'p'.\n n_1 : float\n The index of refraction of the first material.\n n_2 : float\n The index of refraction of the second material.\n\n Returns\n -------\n reflected amplitude : float\n The amplitude of the reflected power\n \"\"\"\n if polarization == 's':\n return ((n_1-n_2)/(n_1+n_2))\n elif polarization == 'p':\n return ((n_1-n_2)/(n_1+n_2))\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _sort_ns(self):\n \"\"\"Organize the refractive indices of the layers in the simulation.\n\n Returns\n -------\n n : array\n The ordered list of indices of refraction, from source to terminator\n \"\"\"\n n = []\n for layer in self.structure:\n n.append(layer.get_index())\n n = np.asarray(n)\n return n\n\n def _sort_ds(self):\n \"\"\"Organize the layers' thicknesses in the simulation.\n\n Returns\n -------\n d : array\n The ordered list of thicknesses, from source to terminator\n \"\"\"\n d = []\n for layer in self.structure:\n if (layer.type == 'Layer' or layer.type == 'Substrate'):\n d.append(layer.thickness)\n d.insert(0, self.structure[0].thickness)\n d.append(self.structure[-1].thickness)\n d = np.asarray(d)\n return d\n\n def _sort_tans(self):\n \"\"\"Organize the loss tangents of the layers in the simulation.\n\n Returns\n -------\n tan : array\n The ordered list of loss tangents, from source to terminator\n \"\"\"\n tan = []\n for layer in self.structure:\n tan.append(layer.losstangent)\n tan = np.asarray(tan)\n return tan\n\n def _t_at_interface(self, polarization, n_1, n_2):\n \"\"\"Calculate the transmission amplitude at an interface.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's' or 'p'.\n n_1 : float\n The index of refraction of the first material.\n n_2 : float\n The index of refraction of the second material.\n\n Returns\n -------\n transmitted_amplitude : float\n The amplitude of the transmitted power\n \"\"\"\n if polarization == 's':\n return 2*n_1/(n_1 + n_2)\n elif polarization == 'p':\n return 2*n_1/(n_1 + n_2)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _unpolarized_simulation(self, frequency, theta_0=0):\n \"\"\"Handle the special case of unpolarized light by running the model\n for both 's' and 'p' polarizations and computing the mean of the two\n results.\n\n Arguments\n ---------\n frequency : float\n The frequency (in Hz) at which to evaluate the model.\n theta_0 : float, optional\n The angle of incidence at the initial interface. Default is 0.\n \"\"\"\n s_data = self.simulate(frequency, 's', theta_0)\n p_data = self.simulate(frequency, 'p', theta_0)\n T = (s_data + p_data)/2\n return T\n \n def add_layer(self, material, thickness=5.0, units='mil', type='layer', \\\n stack_position=-1):\n \"\"\"Create a layer from the set of pre-programmed materials and add it\n to the AR coating stack\n\n Arguments\n ---------\n material : string\n A key in the dictionary of materials found in materials.py.\n You can view these materials by calling\n 'show_materials()'.\n thickness : float, optional\n The thickness of the AR coating layer material. Assumed to\n be given in 'mil' (i.e. thousandths of an inch) unless\n otherwise stated. Default is 5.\n units : string, optional\n The units of length for the AR coating layer. Default is 'mil'.\n Must be one of:\n { 'mil', 'inch', 'mm', 'm', 'um', 'in', 'micron' }\n type : string, optional\n The layer type. Default is 'layer', which corresponds to\n an AR layer. Other options are 'source' or 'terminator', which\n correspond to source and terminator layers, respectively.\n stack_position : int, optional\n The position of the layer in the AR coating stack, indexed\n from 0. Default is -1 (i.e., layer is automatically added\n to the end (bottom?) of the stack.\n \"\"\"\n\n type = type.lower()\n if type == 'layer':\n layer = Layer()\n layer.name = material.lower()\n layer.thickness = thickness\n layer.units = units\n try:\n# layer.dielectric = mats.Electrical.DIELECTRIC[layer.name]\n layer.dielectric = mats.Electrical.props[layer.name][0]\n except:\n raise KeyError('I don\\'t know that material!')\n try:\n# layer.losstangent = mats.Electrical.LOSS_TAN[layer.name]\n layer.losstangent = mats.Electrical.props[layer.name][1]\n except:\n layer.losstangent = 0\n print('\\nI don\\'t know this loss tangent. Setting loss to 0!')\n if (stack_position == -1):\n self.stack.append(layer)\n else:\n self.stack.insert(stack_position, layer)\n elif type == 'source':\n self.source = SourceLayer()\n self.source.name = material.lower()\n try:\n# self.source.dielectric = mats.Electrical.DIELECTRIC[self.source.name]\n self.source.dielectric = mats.Electrical.props[self.source.name][0]\n except:\n raise KeyError('I don\\'t know that material!')\n try:\n# self.source.losstangent = mats.Electrical.LOSS_TAN[self.source.name]\n self.source.losstangent = mats.Electrical.props[self.source.name][1]\n except:\n self.source.losstangent = 0\n print('\\nI don\\'t know this loss tangent. Setting loss to 0!')\n elif type == 'terminator':\n self.terminator = TerminatorLayer()\n self.terminator.name = material.lower()\n try:\n# self.terminator.dielectric = mats.Electrical.DIELECTRIC[self.terminator.name]\n self.terminator.dielectric = mats.Electrical.props[self.terminator.name][0]\n except:\n raise KeyError('I don\\'t know that material!')\n try:\n# self.terminator.losstangent = mats.Electrical.LOSS_TAN[self.terminator.name]\n self.terminator.losstangent = mats.Electrical.props[self.terminator.name][1]\n except:\n self.terminator.losstangent = 0\n print('\\nI don\\'t know this loss tangent. Setting loss to 0!')\n else:\n raise ValueError('Type must be one of LAYER, SOURCE, or TERMINATOR')\n return\n\n def add_custom_layer(self, material, thickness, units, dielectric, loss_tangent, stack_position=-1):\n \"\"\"Add a layer with custom properties to the AR stack.\n\n Arguments\n ---------\n material : string\n The name of the layer\n thickness : float\n The thickness of the layer\n units : string\n The units of length for the AR coating layer. Must be one of:\n { 'mil', 'inch', 'mm', 'm', 'um', 'in', 'micron' }\n dielectric : float\n The dielectric constant of the AR coating layer\n loss_tangent : float\n The loss tangent of the AR coating layer\n stack_position : int, optional\n The position of the layer in the AR coating stack, indexed\n from 0. Default is -1 (i.e., layer is automatically added\n to the end (bottom?) of the stack.\n \"\"\"\n layer = Layer()\n layer.units = units\n layer.thickness = thickness\n layer.dielectric = dielectric\n layer.losstangent = loss_tangent\n if (stack_position == -1):\n self.stack.append(layer)\n else:\n self.stack.insert(stack_position, layer)\n return\n\n def display_sim_parameters(self):\n \"\"\"Display all the simulation parameters in one place.\"\"\"\n pprint.pprint(vars(self))\n return\n\n def clear_structure(self):\n \"\"\"Remove all elements from the current AR ``structure``.\"\"\"\n self.structure = []\n return\n\n def remove_layer(self, layer_pos):\n \"\"\"Remove the specified layer from the AR coating stack.\n\n Arguments\n ---------\n layer_pos : int\n The list index of the layer to remove from the AR coating stack\n \"\"\"\n self.stack.pop(layer_pos)\n return\n\n def run_sim(self):\n \"\"\"Take the attributes of the ``Builder()`` object and execute the\n simulation at each frequency in ``Builder().freq_sweep``. Save the\n output to a columnized, tab-separated text file.\n\n Returns\n -------\n transmission : array\n A three-element array. The first element is a list of\n frequencies, the second elements is a list of the\n transmissions at each frequency, and the third is a list of\n the reflections at each frequency.\n \"\"\"\n t0 = time.time()\n print('Beginning AR coating simulation')\n self._d_converter()\n self._interconnect()\n f_list = []\n t_list = []\n r_list = []\n for f in self.freq_sweep:\n results = self.sim_single_freq(f)\n f_list.append(f)\n t_list.append(results['T'])\n r_list.append(results['R'])\n fs = np.asarray(f_list)\n ts = np.asarray(t_list)\n rs = np.asarray(r_list)\n results = np.array([fs, ts, rs])\n t = time.ctime(time.time())\n data_name = self._make_save_path(self.save_path, self.save_name)\n header = 'Frequency (Hz)\\t\\tTransmission amplitude\\t\\tReflection amplitude'\n# log_name = self._make_save_path(self.save_path, self.log_name)\n# log = self._make_log()\n with open(data_name, 'wb') as f:\n np.savetxt(f, np.c_[fs, ts, rs], delimiter='\\t', header=header)\n# with open(log_name, 'wb') as f:\n# for line in log:\n# f.writelines(line)\n# f.write('\\n')\n print('Finished running AR coating simulation')\n t1 = time.time()\n t_elapsed = t1-t0\n print('Elapsed time: {t}s\\n'.format(t=t_elapsed))\n return results\n\n def set_freq_sweep(self, lower_bound, upper_bound, resolution=1, units='ghz'):\n \"\"\"Set the frequency range over which the simulation will run.\n \n Arguments\n ---------\n lower_bound : float\n The low end of the frequency range, given in GHz.\n upper_bound : float\n The high end of the frequency range, given in GHz.\n reolution : float, optional\n The interval at which to sample the frequency range, given in GHz.\n Defaults to 1 GHz.\n units : str\n The units of frequency. Must be one of:\n Hz, hz, KHz, khz, MHz, mhz, GHz, ghz\n \"\"\"\n convert = {'Hz':1.0, 'hz':1.0, 'KHz':1e3, 'khz':1e3, 'MHz':1e6,\n 'mhz':1e6, 'GHz':1e9, 'ghz':1e9}\n low = lower_bound*convert[units]\n high = upper_bound*convert[units]\n samples = (high-low)/resolution\n self.freq_sweep = np.linspace(low, high, samples)\n return\n\n# def set_source_layer(self, material):\n# \"\"\"Change the source layer.\n\n# Arguments\n# ---------\n# material : string\n# A key in the dielectrics dictionary.\n# \"\"\"\n# self.source = SourceLayer(material)\n# return\n\n# def set_terminator_layer(self, material):\n# \"\"\"Change the terminator layer.\n\n# Arguments\n# ---------\n# material : string\n# A key in the dielectrics dictionary.\n# \"\"\"\n# self.terminator = TerminatorLayer(material)\n# return\n\n def show_materials(self):\n \"\"\"List the materials with known properties. The listed material names \n are keys in the materials properties dictionary. \n \"\"\"\n print('\\nThe materials with known dielectric properties are:\\n')\n pprint.pprint(mats.Electrical.props)\n# pprint.pprint(mats.Electrical.DIELECTRIC)\n print('\\nThe materials with known loss tangents are:\\n')\n pprint.pprint(mats.Electrical.props)\n# pprint.pprint(mats.Electrical.LOSS_TAN)\n return\n\n def sim_single_freq(self, frequency, polarization='s', theta_0=0):\n \"\"\"Run the model simulation for a single frequency.\n\n Arguments\n ---------\n frequency : float\n The frequency at which to evaluate the model (in Hz).\n polarization : string, optional\n The polarization of the source wave. Must be one of: 's', \n 'p', or 'u'. Default is 's'.\n \n ### NOTE ###\n I've chosen 's' polarization as the default because this \n simulator only handles normal incidence waves, and and at \n normal incidence 's' and 'p' are equivalent.\n theta_0 : float, optional\n The angle of incidence at the first interface.\n\n Returns\n -------\n result : dict\n dict = {\n 'T' : array; the total transmission through the model.\n 'R' : array; the total reflection through the model.\n }\n \"\"\"\n # check the desired polarization\n# if polarization == 'u':\n# return self._unpolarized_simulation(frequency)\n n = self._sort_ns() # get all refractive indices\n d = self._sort_ds() # get all thicknesses\n tan = self._sort_tans() # get all loss tans\n k = self._find_ks(n, frequency, tan) # find all wavevectors, k\n delta = self._find_k_offsets(k, d) # calculate all offsets\n r, t = self._calc_R_T_amp(polarization, n, delta) # get trans, ref amps\n T = self._get_T(polarization, t, n[0], n[-1]) # find net trans, ref power\n R = self._get_R(r)\n result = {'T':T, 'R':R}\n return result\n\n def snell(self, indices, theta_0):\n \"\"\"Caclulate the Snell angles for the entire model.\n\n Arguments\n ---------\n indices : list\n The list of indices of refraction for all elements in the model,\n ordered from source to terminator.\n theta_0 : float\n The angle of incidence at the first interface.\n \"\"\"\n return sp.arcsin(np.real_if_close(n_list[0]*np.sin(th_0) / n_list))\n\nclass MCMC:\n \"\"\"Contains the methods specific to ``emcee``, the MCMC Hammer, and helper\n methods to set up MCMC simulations and visualize the results.\n \"\"\"\n def __init__(self):\n self.name = 'blah'\n self.priors = []\n\n def __repr__(self):\n return '{} (MCMC object)'.format(self.name)\n\n def add_prior(self, layer_number, prior_type, low_bound, hi_bound, units='mil'):\n \"\"\"Add a prior to a part of the model in order to constrain the total\n simulation space. Can only place constraints on thickness and dielectric\n for now.\n\n Arguments\n ---------\n layer_number : int\n The position of the layer in the AR coating stack. Indexed from 1, so\n incident `vacuum` is 0 and first AR coating layer is 1.\n prior_type : string\n Flags the prior as either a cut to dielectric constant or thickness.\n One of 'thickness', 't', 'dielectric', or 'd'.\n low_bound : float\n The lower boundary of the range.\n hi_bound : float\n The higher boundary of the range.\n units : string, optional\n The units of the lower and upper bounds. Only applies to 'thickness'\n cuts because dielectric constants are unitless. Defaults to `mils`.\n \"\"\"\n prior = {'layer_number':layer_number, 'prior_type':prior_type, \\\n 'low_bound':low_bound, 'hi_bound':hi_bound, 'units':units}\n self.priors.append(prior)\n return\n\n def lnlikelihood(self):\n return\n\n def lnprior(self):\n \"\"\"Define the known prior attributes of the model in order to constrain\n the simulation space.\n \"\"\"\n \n return\n\n def lnprobability(self):\n \"\"\"The logspace sum of ``lnprior`` and ``lnlikelihood``.\n \"\"\"\n return\n\n def sort_priors(self):\n \"\"\"Sort the contents of ``self.prior`` by layer number\n \n Returns\n -------\n sorted_priors : list\n A list of priors sorted by layer number. If a layer has both\n thickness and dielectric priors, the thickness dielectric is first\n and the dielectric is second.\n \"\"\"\n return\n", "step-ids": [ 45, 51, 53, 54, 60 ] }
[ 45, 51, 53, 54, 60 ]
# -*- coding: utf-8 -*- """ Created on Tue Aug 10 17:48:19 2021 @author: LESLY """ from PICO_PLACA_class import PICO_PLACA """ Main program of "Pico y Placa" predictor""" def main(): print("Predictor") placa = input("Enter the license of your vehicle in the following format AAA-####: ") fecha = input("Enter the date in the following format AA/MM/DD: ") hora = input("Enter the time in the following format 00:00: ") prog =PICO_PLACA(placa,fecha,hora) estado = prog.verificar() if estado == "continue": estado = prog.validar() print("Your vehicle " + estado ) else: print(estado) if __name__ == '__main__': main()
normal
{ "blob_id": "c7e5851a41e1cdb33cd0daa103fbf702da6e5ff7", "index": 9818, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef main():\n print('Predictor')\n placa = input(\n 'Enter the license of your vehicle in the following format AAA-####: '\n )\n fecha = input('Enter the date in the following format AA/MM/DD: ')\n hora = input('Enter the time in the following format 00:00: ')\n prog = PICO_PLACA(placa, fecha, hora)\n estado = prog.verificar()\n if estado == 'continue':\n estado = prog.validar()\n print('Your vehicle ' + estado)\n else:\n print(estado)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef main():\n print('Predictor')\n placa = input(\n 'Enter the license of your vehicle in the following format AAA-####: '\n )\n fecha = input('Enter the date in the following format AA/MM/DD: ')\n hora = input('Enter the time in the following format 00:00: ')\n prog = PICO_PLACA(placa, fecha, hora)\n estado = prog.verificar()\n if estado == 'continue':\n estado = prog.validar()\n print('Your vehicle ' + estado)\n else:\n print(estado)\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "<mask token>\nfrom PICO_PLACA_class import PICO_PLACA\n<mask token>\n\n\ndef main():\n print('Predictor')\n placa = input(\n 'Enter the license of your vehicle in the following format AAA-####: '\n )\n fecha = input('Enter the date in the following format AA/MM/DD: ')\n hora = input('Enter the time in the following format 00:00: ')\n prog = PICO_PLACA(placa, fecha, hora)\n estado = prog.verificar()\n if estado == 'continue':\n estado = prog.validar()\n print('Your vehicle ' + estado)\n else:\n print(estado)\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Aug 10 17:48:19 2021\r\n\r\n@author: LESLY\r\n\"\"\"\r\n\r\n\r\nfrom PICO_PLACA_class import PICO_PLACA \r\n\r\n\"\"\" Main program of \"Pico y Placa\" predictor\"\"\"\r\ndef main():\r\n \r\n print(\"Predictor\")\r\n \r\n placa = input(\"Enter the license of your vehicle in the following format AAA-####: \")\r\n \r\n fecha = input(\"Enter the date in the following format AA/MM/DD: \") \r\n \r\n hora = input(\"Enter the time in the following format 00:00: \") \r\n\r\n \r\n prog =PICO_PLACA(placa,fecha,hora)\r\n estado = prog.verificar() \r\n \r\n if estado == \"continue\":\r\n estado = prog.validar()\r\n print(\"Your vehicle \" + estado ) \r\n else: \r\n print(estado)\r\n\r\nif __name__ == '__main__':\r\n main()", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# -*- coding: utf-8 -*- from flask import Blueprint, render_template, flash, redirect, url_for from flask_login import login_required, current_user from ..extensions import db from .forms import MyTaskForm from .models import MyTaskModel tasks = Blueprint('tasks', __name__, url_prefix='/tasks') @tasks.route('/my_tasks', methods=['GET', 'POST']) @login_required def my_tasks(): _all_tasks = MyTaskModel.query.filter_by(users_id=current_user.id).all() return render_template('tasks/my_tasks.html', all_tasks=_all_tasks, _active_tasks=True) @tasks.route('/view_task/<id>', methods=['GET', 'POST']) @login_required def view_task(id): _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first() if not _task: flash('Oops! Something went wrong!.', 'danger') return redirect(url_for("tasks.my_tasks")) return render_template('tasks/view_task.html', task=_task) @tasks.route('/add_task', methods=['GET', 'POST']) @login_required def add_task(): _task = MyTaskModel() _form = MyTaskForm() if _form.validate_on_submit(): _task.users_id = current_user.id _form.populate_obj(_task) db.session.add(_task) db.session.commit() db.session.refresh(_task) flash('Your task is added successfully!', 'success') return redirect(url_for("tasks.my_tasks")) return render_template('tasks/add_task.html', form=_form, _active_tasks=True) @tasks.route('/delete_task/<id>', methods=['GET', 'POST']) @login_required def delete_task(id): _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first() if not _task: flash('Oops! Something went wrong!.', 'danger') return redirect(url_for("tasks.my_tasks")) db.session.delete(_task) db.session.commit() flash('Your task is deleted successfully!', 'success') return redirect(url_for('tasks.my_tasks')) @tasks.route('/edit_task/<id>', methods=['GET', 'POST']) @login_required def edit_task(id): _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first() if not _task: flash('Oops! Something went wrong!.', 'danger') return redirect(url_for("tasks.my_tasks")) _form = MyTaskForm(obj=_task) if _form.validate_on_submit(): _task.users_id = current_user.id _form.populate_obj(_task) db.session.add(_task) db.session.commit() flash('Your task updated successfully!', 'success') return redirect(url_for("tasks.my_tasks")) return render_template('tasks/edit_task.html', form=_form, task=_task, _active_tasks=True)
normal
{ "blob_id": "7882504f08e871f2610ff633608eb3d380179041", "index": 1735, "step-1": "<mask token>\n\n\[email protected]('/my_tasks', methods=['GET', 'POST'])\n@login_required\ndef my_tasks():\n _all_tasks = MyTaskModel.query.filter_by(users_id=current_user.id).all()\n return render_template('tasks/my_tasks.html', all_tasks=_all_tasks,\n _active_tasks=True)\n\n\n<mask token>\n\n\[email protected]('/add_task', methods=['GET', 'POST'])\n@login_required\ndef add_task():\n _task = MyTaskModel()\n _form = MyTaskForm()\n if _form.validate_on_submit():\n _task.users_id = current_user.id\n _form.populate_obj(_task)\n db.session.add(_task)\n db.session.commit()\n db.session.refresh(_task)\n flash('Your task is added successfully!', 'success')\n return redirect(url_for('tasks.my_tasks'))\n return render_template('tasks/add_task.html', form=_form, _active_tasks\n =True)\n\n\[email protected]('/delete_task/<id>', methods=['GET', 'POST'])\n@login_required\ndef delete_task(id):\n _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first(\n )\n if not _task:\n flash('Oops! Something went wrong!.', 'danger')\n return redirect(url_for('tasks.my_tasks'))\n db.session.delete(_task)\n db.session.commit()\n flash('Your task is deleted successfully!', 'success')\n return redirect(url_for('tasks.my_tasks'))\n\n\[email protected]('/edit_task/<id>', methods=['GET', 'POST'])\n@login_required\ndef edit_task(id):\n _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first(\n )\n if not _task:\n flash('Oops! Something went wrong!.', 'danger')\n return redirect(url_for('tasks.my_tasks'))\n _form = MyTaskForm(obj=_task)\n if _form.validate_on_submit():\n _task.users_id = current_user.id\n _form.populate_obj(_task)\n db.session.add(_task)\n db.session.commit()\n flash('Your task updated successfully!', 'success')\n return redirect(url_for('tasks.my_tasks'))\n return render_template('tasks/edit_task.html', form=_form, task=_task,\n _active_tasks=True)\n", "step-2": "<mask token>\n\n\[email protected]('/my_tasks', methods=['GET', 'POST'])\n@login_required\ndef my_tasks():\n _all_tasks = MyTaskModel.query.filter_by(users_id=current_user.id).all()\n return render_template('tasks/my_tasks.html', all_tasks=_all_tasks,\n _active_tasks=True)\n\n\[email protected]('/view_task/<id>', methods=['GET', 'POST'])\n@login_required\ndef view_task(id):\n _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first(\n )\n if not _task:\n flash('Oops! Something went wrong!.', 'danger')\n return redirect(url_for('tasks.my_tasks'))\n return render_template('tasks/view_task.html', task=_task)\n\n\[email protected]('/add_task', methods=['GET', 'POST'])\n@login_required\ndef add_task():\n _task = MyTaskModel()\n _form = MyTaskForm()\n if _form.validate_on_submit():\n _task.users_id = current_user.id\n _form.populate_obj(_task)\n db.session.add(_task)\n db.session.commit()\n db.session.refresh(_task)\n flash('Your task is added successfully!', 'success')\n return redirect(url_for('tasks.my_tasks'))\n return render_template('tasks/add_task.html', form=_form, _active_tasks\n =True)\n\n\[email protected]('/delete_task/<id>', methods=['GET', 'POST'])\n@login_required\ndef delete_task(id):\n _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first(\n )\n if not _task:\n flash('Oops! Something went wrong!.', 'danger')\n return redirect(url_for('tasks.my_tasks'))\n db.session.delete(_task)\n db.session.commit()\n flash('Your task is deleted successfully!', 'success')\n return redirect(url_for('tasks.my_tasks'))\n\n\[email protected]('/edit_task/<id>', methods=['GET', 'POST'])\n@login_required\ndef edit_task(id):\n _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first(\n )\n if not _task:\n flash('Oops! Something went wrong!.', 'danger')\n return redirect(url_for('tasks.my_tasks'))\n _form = MyTaskForm(obj=_task)\n if _form.validate_on_submit():\n _task.users_id = current_user.id\n _form.populate_obj(_task)\n db.session.add(_task)\n db.session.commit()\n flash('Your task updated successfully!', 'success')\n return redirect(url_for('tasks.my_tasks'))\n return render_template('tasks/edit_task.html', form=_form, task=_task,\n _active_tasks=True)\n", "step-3": "<mask token>\ntasks = Blueprint('tasks', __name__, url_prefix='/tasks')\n\n\[email protected]('/my_tasks', methods=['GET', 'POST'])\n@login_required\ndef my_tasks():\n _all_tasks = MyTaskModel.query.filter_by(users_id=current_user.id).all()\n return render_template('tasks/my_tasks.html', all_tasks=_all_tasks,\n _active_tasks=True)\n\n\[email protected]('/view_task/<id>', methods=['GET', 'POST'])\n@login_required\ndef view_task(id):\n _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first(\n )\n if not _task:\n flash('Oops! Something went wrong!.', 'danger')\n return redirect(url_for('tasks.my_tasks'))\n return render_template('tasks/view_task.html', task=_task)\n\n\[email protected]('/add_task', methods=['GET', 'POST'])\n@login_required\ndef add_task():\n _task = MyTaskModel()\n _form = MyTaskForm()\n if _form.validate_on_submit():\n _task.users_id = current_user.id\n _form.populate_obj(_task)\n db.session.add(_task)\n db.session.commit()\n db.session.refresh(_task)\n flash('Your task is added successfully!', 'success')\n return redirect(url_for('tasks.my_tasks'))\n return render_template('tasks/add_task.html', form=_form, _active_tasks\n =True)\n\n\[email protected]('/delete_task/<id>', methods=['GET', 'POST'])\n@login_required\ndef delete_task(id):\n _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first(\n )\n if not _task:\n flash('Oops! Something went wrong!.', 'danger')\n return redirect(url_for('tasks.my_tasks'))\n db.session.delete(_task)\n db.session.commit()\n flash('Your task is deleted successfully!', 'success')\n return redirect(url_for('tasks.my_tasks'))\n\n\[email protected]('/edit_task/<id>', methods=['GET', 'POST'])\n@login_required\ndef edit_task(id):\n _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first(\n )\n if not _task:\n flash('Oops! Something went wrong!.', 'danger')\n return redirect(url_for('tasks.my_tasks'))\n _form = MyTaskForm(obj=_task)\n if _form.validate_on_submit():\n _task.users_id = current_user.id\n _form.populate_obj(_task)\n db.session.add(_task)\n db.session.commit()\n flash('Your task updated successfully!', 'success')\n return redirect(url_for('tasks.my_tasks'))\n return render_template('tasks/edit_task.html', form=_form, task=_task,\n _active_tasks=True)\n", "step-4": "from flask import Blueprint, render_template, flash, redirect, url_for\nfrom flask_login import login_required, current_user\nfrom ..extensions import db\nfrom .forms import MyTaskForm\nfrom .models import MyTaskModel\ntasks = Blueprint('tasks', __name__, url_prefix='/tasks')\n\n\[email protected]('/my_tasks', methods=['GET', 'POST'])\n@login_required\ndef my_tasks():\n _all_tasks = MyTaskModel.query.filter_by(users_id=current_user.id).all()\n return render_template('tasks/my_tasks.html', all_tasks=_all_tasks,\n _active_tasks=True)\n\n\[email protected]('/view_task/<id>', methods=['GET', 'POST'])\n@login_required\ndef view_task(id):\n _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first(\n )\n if not _task:\n flash('Oops! Something went wrong!.', 'danger')\n return redirect(url_for('tasks.my_tasks'))\n return render_template('tasks/view_task.html', task=_task)\n\n\[email protected]('/add_task', methods=['GET', 'POST'])\n@login_required\ndef add_task():\n _task = MyTaskModel()\n _form = MyTaskForm()\n if _form.validate_on_submit():\n _task.users_id = current_user.id\n _form.populate_obj(_task)\n db.session.add(_task)\n db.session.commit()\n db.session.refresh(_task)\n flash('Your task is added successfully!', 'success')\n return redirect(url_for('tasks.my_tasks'))\n return render_template('tasks/add_task.html', form=_form, _active_tasks\n =True)\n\n\[email protected]('/delete_task/<id>', methods=['GET', 'POST'])\n@login_required\ndef delete_task(id):\n _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first(\n )\n if not _task:\n flash('Oops! Something went wrong!.', 'danger')\n return redirect(url_for('tasks.my_tasks'))\n db.session.delete(_task)\n db.session.commit()\n flash('Your task is deleted successfully!', 'success')\n return redirect(url_for('tasks.my_tasks'))\n\n\[email protected]('/edit_task/<id>', methods=['GET', 'POST'])\n@login_required\ndef edit_task(id):\n _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first(\n )\n if not _task:\n flash('Oops! Something went wrong!.', 'danger')\n return redirect(url_for('tasks.my_tasks'))\n _form = MyTaskForm(obj=_task)\n if _form.validate_on_submit():\n _task.users_id = current_user.id\n _form.populate_obj(_task)\n db.session.add(_task)\n db.session.commit()\n flash('Your task updated successfully!', 'success')\n return redirect(url_for('tasks.my_tasks'))\n return render_template('tasks/edit_task.html', form=_form, task=_task,\n _active_tasks=True)\n", "step-5": "# -*- coding: utf-8 -*-\n\nfrom flask import Blueprint, render_template, flash, redirect, url_for\nfrom flask_login import login_required, current_user\n\nfrom ..extensions import db\n\nfrom .forms import MyTaskForm\nfrom .models import MyTaskModel\n\n\ntasks = Blueprint('tasks', __name__, url_prefix='/tasks')\n\n\[email protected]('/my_tasks', methods=['GET', 'POST'])\n@login_required\ndef my_tasks():\n\n _all_tasks = MyTaskModel.query.filter_by(users_id=current_user.id).all()\n\n return render_template('tasks/my_tasks.html',\n all_tasks=_all_tasks,\n _active_tasks=True)\n\n\[email protected]('/view_task/<id>', methods=['GET', 'POST'])\n@login_required\ndef view_task(id):\n _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first()\n\n if not _task:\n flash('Oops! Something went wrong!.', 'danger')\n return redirect(url_for(\"tasks.my_tasks\"))\n\n return render_template('tasks/view_task.html',\n task=_task)\n\n\[email protected]('/add_task', methods=['GET', 'POST'])\n@login_required\ndef add_task():\n\n _task = MyTaskModel()\n\n _form = MyTaskForm()\n\n if _form.validate_on_submit():\n\n _task.users_id = current_user.id\n\n _form.populate_obj(_task)\n\n db.session.add(_task)\n db.session.commit()\n\n db.session.refresh(_task)\n flash('Your task is added successfully!', 'success')\n return redirect(url_for(\"tasks.my_tasks\"))\n\n return render_template('tasks/add_task.html', form=_form, _active_tasks=True)\n\n\[email protected]('/delete_task/<id>', methods=['GET', 'POST'])\n@login_required\ndef delete_task(id):\n _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first()\n\n if not _task:\n flash('Oops! Something went wrong!.', 'danger')\n return redirect(url_for(\"tasks.my_tasks\"))\n\n db.session.delete(_task)\n db.session.commit()\n\n flash('Your task is deleted successfully!', 'success')\n return redirect(url_for('tasks.my_tasks'))\n\n\[email protected]('/edit_task/<id>', methods=['GET', 'POST'])\n@login_required\ndef edit_task(id):\n _task = MyTaskModel.query.filter_by(id=id, users_id=current_user.id).first()\n\n if not _task:\n flash('Oops! Something went wrong!.', 'danger')\n return redirect(url_for(\"tasks.my_tasks\"))\n\n _form = MyTaskForm(obj=_task)\n\n if _form.validate_on_submit():\n\n _task.users_id = current_user.id\n _form.populate_obj(_task)\n\n db.session.add(_task)\n db.session.commit()\n\n flash('Your task updated successfully!', 'success')\n return redirect(url_for(\"tasks.my_tasks\"))\n\n return render_template('tasks/edit_task.html', form=_form, task=_task, _active_tasks=True)\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
from csv import writer with open("movies.csv","w") as file: csv_writer=writer(file) csv_writer.writerow(['Name','Year']) csv_writer.writerow(['Ratchasan',2018]) csv_writer.writerow(['Vadachennai',2018]) csv_writer.writerow(['Naran',2007])
normal
{ "blob_id": "83e231480c618d290089340c642313bbba4f1070", "index": 2035, "step-1": "<mask token>\n", "step-2": "<mask token>\nwith open('movies.csv', 'w') as file:\n csv_writer = writer(file)\n csv_writer.writerow(['Name', 'Year'])\n csv_writer.writerow(['Ratchasan', 2018])\n csv_writer.writerow(['Vadachennai', 2018])\n csv_writer.writerow(['Naran', 2007])\n", "step-3": "from csv import writer\nwith open('movies.csv', 'w') as file:\n csv_writer = writer(file)\n csv_writer.writerow(['Name', 'Year'])\n csv_writer.writerow(['Ratchasan', 2018])\n csv_writer.writerow(['Vadachennai', 2018])\n csv_writer.writerow(['Naran', 2007])\n", "step-4": "from csv import writer\nwith open(\"movies.csv\",\"w\") as file:\n csv_writer=writer(file)\n csv_writer.writerow(['Name','Year'])\n csv_writer.writerow(['Ratchasan',2018])\n csv_writer.writerow(['Vadachennai',2018])\n csv_writer.writerow(['Naran',2007])\n \n\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
############################################################################### # Copyright (c), Forschungszentrum Jülich GmbH, IAS-1/PGI-1, Germany. # # All rights reserved. # # This file is part of the AiiDA-FLEUR package. # # # # The code is hosted on GitHub at https://github.com/JuDFTteam/aiida-fleur # # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.flapw.de or # # http://aiida-fleur.readthedocs.io/en/develop/ # ############################################################################### """ This module contains the FleurBaseWorkChain. FleurBaseWorkChain is a workchain that wraps the submission of the FLEUR calculation. Inheritance from the BaseRestartWorkChain allows to add scenarios to restart a calculation in an automatic way if an expected failure occurred. """ from aiida import orm from aiida.common import AttributeDict from aiida.engine import while_ from aiida.engine.processes.workchains import BaseRestartWorkChain from aiida.engine.processes.workchains.utils import process_handler, ProcessHandlerReport from aiida_fleur.tools.common_fleur_wf import optimize_calc_options from aiida_fleur.calculation.fleur import FleurCalculation from aiida_fleur.data.fleurinp import get_fleurinp_from_remote_data class FleurBaseWorkChain(BaseRestartWorkChain): """Workchain to run a FLEUR calculation with automated error handling and restarts""" _workflowversion = '0.2.1' _process_class = FleurCalculation @classmethod def define(cls, spec): super().define(spec) spec.expose_inputs(FleurCalculation, exclude=('metadata.options',)) spec.input('options', valid_type=orm.Dict, help='Optional parameters to set up computational details.') spec.input('description', valid_type=str, required=False, non_db=True, help='Calculation description.') spec.input('label', valid_type=str, required=False, non_db=True, help='Calculation label.') spec.input( 'add_comp_para', valid_type=orm.Dict, default=lambda: orm.Dict(dict={ 'only_even_MPI': False, 'forbid_single_mpi': False, 'max_queue_nodes': 20, 'max_queue_wallclock_sec': 86400 }), help='Gives additional control over computational parameters' 'only_even_MPI: set to true if you want to suppress odd number of MPI processes in parallelisation.' 'This might speedup a calculation for machines having even number of sockets per node.' 'max_queue_nodes: maximal number of nodes allowed on the remote machine. Used only to automatically solve some FLEUR failures.' 'max_queue_wallclock_sec: maximal wallclock time allowed on the remote machine. Used only to automatically solve some FLEUR failures.' ) spec.outline( cls.setup, cls.validate_inputs, while_(cls.should_run_process)( cls.run_process, cls.inspect_process, ), cls.results, ) spec.expose_outputs(FleurCalculation) spec.exit_code(311, 'ERROR_VACUUM_SPILL_RELAX', message='FLEUR calculation failed because an atom spilled to the' 'vacuum during relaxation') spec.exit_code(313, 'ERROR_MT_RADII_RELAX', message='Overlapping MT-spheres during relaxation.') spec.exit_code(388, 'ERROR_TIME_LIMIT_NO_SOLUTION', message='Computational resources are not optimal.') spec.exit_code(389, 'ERROR_MEMORY_ISSUE_NO_SOLUTION', message='Computational resources are not optimal.') spec.exit_code(390, 'ERROR_NOT_OPTIMAL_RESOURCES', message='Computational resources are not optimal.') spec.exit_code(399, 'ERROR_SOMETHING_WENT_WRONG', message='FleurCalculation failed and FleurBaseWorkChain has no strategy ' 'to resolve this') def validate_inputs(self): """ Validate inputs that might depend on each other and cannot be validated by the spec. Also define dictionary `inputs` in the context, that will contain the inputs for the calculation that will be launched in the `run_calculation` step. """ self.ctx.inputs = AttributeDict(self.exposed_inputs(FleurCalculation)) self.ctx.max_queue_nodes = self.inputs.add_comp_para['max_queue_nodes'] self.ctx.max_queue_wallclock_sec = self.inputs.add_comp_para['max_queue_wallclock_sec'] input_options = self.inputs.options.get_dict() self.ctx.optimize_resources = input_options.pop('optimize_resources', True) self.ctx.inputs.metadata.options = input_options if 'description' in self.inputs: self.ctx.inputs.metadata.description = self.inputs.description else: self.ctx.inputs.metadata.description = '' if 'label' in self.inputs: self.ctx.inputs.metadata.label = self.inputs.label else: self.ctx.inputs.metadata.label = '' if not self.ctx.optimize_resources: self.ctx.can_be_optimised = False # set this for handlers to not change resources return resources_input = self.ctx.inputs.metadata.options['resources'] try: self.ctx.num_machines = int(resources_input['num_machines']) self.ctx.num_mpiprocs_per_machine = int(resources_input['num_mpiprocs_per_machine']) except KeyError: self.ctx.can_be_optimised = False self.report('WARNING: Computation resources were not optimised.') else: try: self.ctx.num_cores_per_mpiproc = int(resources_input['num_cores_per_mpiproc']) self.ctx.use_omp = True self.ctx.suggest_mpi_omp_ratio = self.ctx.num_mpiprocs_per_machine / self.ctx.num_cores_per_mpiproc except KeyError: self.ctx.num_cores_per_mpiproc = 1 self.ctx.use_omp = False self.ctx.suggest_mpi_omp_ratio = 1 status = self.check_kpts() if status is None: self.ctx.can_be_optimised = True else: self.report('ERROR: Not optimal computational resources.') return status def check_kpts(self): """ This routine checks if the total number of requested cpus is a factor of kpts and makes an optimisation. If suggested number of num_mpiprocs_per_machine is 60% smaller than requested, it throws an exit code and calculation stop withour submission. """ if 'fleurinp' in self.ctx.inputs: fleurinp = self.ctx.inputs.fleurinp else: fleurinp = get_fleurinp_from_remote_data(self.ctx.inputs.parent_folder) only_even_MPI = self.inputs.add_comp_para['only_even_MPI'] forbid_single_mpi = self.inputs.add_comp_para['forbid_single_mpi'] try: machines, mpi_tasks, omp_threads, message = optimize_calc_options(self.ctx.num_machines, self.ctx.num_mpiprocs_per_machine, self.ctx.num_cores_per_mpiproc, self.ctx.use_omp, self.ctx.suggest_mpi_omp_ratio, fleurinp, only_even_MPI=only_even_MPI, forbid_single_mpi=forbid_single_mpi) except ValueError as exc: self.report(exc) return self.exit_codes.ERROR_NOT_OPTIMAL_RESOURCES self.report(message) self.ctx.inputs.metadata.options['resources']['num_machines'] = machines self.ctx.inputs.metadata.options['resources']['num_mpiprocs_per_machine'] = mpi_tasks if self.ctx.use_omp: self.ctx.inputs.metadata.options['resources']['num_cores_per_mpiproc'] = omp_threads if 'environment_variables' not in self.ctx.inputs.metadata.options: self.ctx.inputs.metadata.options['environment_variables'] = {} self.ctx.inputs.metadata.options['environment_variables']['OMP_NUM_THREADS'] = str(omp_threads) @process_handler(priority=1, exit_codes=[ FleurCalculation.exit_codes.ERROR_FLEUR_CALC_FAILED, FleurCalculation.exit_codes.ERROR_MT_RADII, FleurCalculation.exit_codes.ERROR_NO_RETRIEVED_FOLDER, FleurCalculation.exit_codes.ERROR_OPENING_OUTPUTS, FleurCalculation.exit_codes.ERROR_NO_OUTXML, FleurCalculation.exit_codes.ERROR_XMLOUT_PARSING_FAILED, FleurCalculation.exit_codes.ERROR_RELAX_PARSING_FAILED, FleurCalculation.exit_codes.ERROR_MISSING_DEPENDENCY, ]) def _handle_general_error(self, calculation): """ Calculation failed for unknown reason. """ self.ctx.restart_calc = calculation self.ctx.is_finished = True self.report('Calculation failed for a reason that can not be resolved automatically') self.results() return ProcessHandlerReport(True, self.exit_codes.ERROR_SOMETHING_WENT_WRONG) @process_handler(priority=48, exit_codes=FleurCalculation.exit_codes.ERROR_DROP_CDN) def _handle_dirac_equation(self, calculation): """ Sometimes relaxation calculation fails with Diraq problem which is usually caused by problems with reusing charge density. In this case we resubmit the calculation, dropping the input cdn. """ # try to drop remote folder and see if it helps is_fleurinp_from_relax = False if 'fleurinp' in self.ctx.inputs: if 'relax.xml' in self.ctx.inputs.fleurinp.files: is_fleurinp_from_relax = True if 'parent_folder' in self.ctx.inputs and is_fleurinp_from_relax: del self.ctx.inputs.parent_folder self.ctx.restart_calc = None self.ctx.is_finished = False self.report('Calculation seems to fail due to corrupted charge density (can happen' 'during relaxation). I drop cdn from previous step') return ProcessHandlerReport(True) self.ctx.restart_calc = calculation self.ctx.is_finished = True self.report('Can not drop charge density. If I drop the remote folder, there will be no inp.xml') self.results() return ProcessHandlerReport(True, self.exit_codes.ERROR_SOMETHING_WENT_WRONG) @process_handler(priority=52, exit_codes=FleurCalculation.exit_codes.ERROR_VACUUM_SPILL_RELAX) def _handle_vacuum_spill_error(self, calculation): """ Calculation failed for unknown reason. """ self.ctx.restart_calc = calculation self.ctx.is_finished = True self.report('FLEUR calculation failed because an atom spilled to the vacuum during' 'relaxation. Can be fixed via RelaxBaseWorkChain.') self.results() return ProcessHandlerReport(True, self.exit_codes.ERROR_VACUUM_SPILL_RELAX) @process_handler(priority=51, exit_codes=FleurCalculation.exit_codes.ERROR_MT_RADII_RELAX) def _handle_mt_relax_error(self, calculation): """ Calculation failed for unknown reason. """ self.ctx.restart_calc = calculation self.ctx.is_finished = True self.report('FLEUR calculation failed due to MT overlap. Can be fixed via RelaxBaseWorkChain') self.results() return ProcessHandlerReport(True, self.exit_codes.ERROR_MT_RADII_RELAX) @process_handler(priority=50, exit_codes=FleurCalculation.exit_codes.ERROR_NOT_ENOUGH_MEMORY) def _handle_not_enough_memory(self, calculation): """ Calculation failed due to lack of memory. Probably works for JURECA only, has to be tested for other systems. """ if not self.ctx.can_be_optimised: self.ctx.restart_calc = calculation self.ctx.is_finished = True self.report('I am not allowed to optimize your settings. Consider providing at least' 'num_machines and num_mpiprocs_per_machine') self.results() return ProcessHandlerReport(True, self.exit_codes.ERROR_MEMORY_ISSUE_NO_SOLUTION) self.ctx.restart_calc = None self.ctx.is_finished = False self.report('Calculation failed due to lack of memory, I resubmit it with twice larger' ' amount of computational nodes and smaller MPI/OMP ratio') # increase number of nodes propose_nodes = self.ctx.num_machines * 2 if propose_nodes > self.ctx.max_queue_nodes: propose_nodes = self.ctx.max_queue_nodes self.ctx.num_machines = propose_nodes self.ctx.suggest_mpi_omp_ratio = self.ctx.suggest_mpi_omp_ratio / 2 status = self.check_kpts() if status is not None: self.ctx.is_finished = True self.results() return ProcessHandlerReport(True, self.exit_codes.ERROR_NOT_OPTIMAL_RESOURCES) if 'settings' not in self.ctx.inputs: settings = {} else: settings = self.ctx.inputs.settings.get_dict() settings.setdefault('remove_from_remotecopy_list', []) if 'mixing_history*' not in settings['remove_from_remotecopy_list']: settings['remove_from_remotecopy_list'].append('mixing_history*') self.ctx.inputs.settings = orm.Dict(dict=settings) #check if the cdn.hdf can be reused #Out of memory can also occur after a couple of iterations if the mixing_history gets too large remote = calculation.base.links.get_outgoing().get_node_by_label('remote_folder') if _is_remote_reusable(self.ctx.inputs, calculation): if 'fleurinp' in self.ctx.inputs: del self.ctx.inputs.fleurinp self.ctx.inputs.parent_folder = remote return ProcessHandlerReport(True) @process_handler(priority=47, exit_codes=FleurCalculation.exit_codes.ERROR_TIME_LIMIT) def _handle_time_limits(self, calculation): """ If calculation fails due to time limits, we simply resubmit it. """ from aiida.common.exceptions import NotExistent # if previous calculation failed for the same reason, do not restart try: prev_calculation_remote = calculation.base.links.get_incoming().get_node_by_label('parent_folder') prev_calculation_status = prev_calculation_remote.creator.exit_status if prev_calculation_status in FleurCalculation.get_exit_statuses(['ERROR_TIME_LIMIT']): self.ctx.is_finished = True self.results() return ProcessHandlerReport(True) except NotExistent: pass self.report('FleurCalculation failed due to time limits, I restart it from where it ended') # increase wallclock time propose_wallclock = self.ctx.inputs.metadata.options['max_wallclock_seconds'] * 2 if propose_wallclock > self.ctx.max_queue_wallclock_sec: propose_wallclock = self.ctx.max_queue_wallclock_sec self.ctx.inputs.metadata.options['max_wallclock_seconds'] = propose_wallclock # increase number of nodes propose_nodes = self.ctx.num_machines * 2 if propose_nodes > self.ctx.max_queue_nodes: propose_nodes = self.ctx.max_queue_nodes self.ctx.num_machines = propose_nodes remote = calculation.base.links.get_outgoing().get_node_by_label('remote_folder') # resubmit providing inp.xml and cdn from the remote folder self.ctx.is_finished = False if _is_remote_reusable(self.ctx.inputs, calculation): if 'fleurinp' in self.ctx.inputs: del self.ctx.inputs.fleurinp self.ctx.inputs.parent_folder = remote return ProcessHandlerReport(True) def _is_remote_reusable(inputs, calculation): """ Check whether the remote folder of the given calculation can be resubmitted """ can_use_remote = False #If no charge density file is available to restart from the calculation will except #with a not nice error message. So we can only reuse the charge density if these files are available retrieved_filenames = calculation.base.links.get_outgoing().get_node_by_label('retrieved').list_object_names() if any(file in retrieved_filenames for file in ( 'cdn_last.hdf', 'cdn1', )): can_use_remote = True if 'fleurinp' in inputs: modes = inputs.fleurinp.get_fleur_modes() if modes['force_theorem'] or modes['dos'] or modes['band']: # in modes listed above it makes no sense copying cdn.hdf can_use_remote = False # without fleurinp it is harder to extract modes in this case # - simply try to reuse cdn.hdf and hope it works return can_use_remote
normal
{ "blob_id": "1d4a51cfbd5df9ac9074c816a140309e04fff021", "index": 4159, "step-1": "<mask token>\n\n\nclass FleurBaseWorkChain(BaseRestartWorkChain):\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def define(cls, spec):\n super().define(spec)\n spec.expose_inputs(FleurCalculation, exclude=('metadata.options',))\n spec.input('options', valid_type=orm.Dict, help=\n 'Optional parameters to set up computational details.')\n spec.input('description', valid_type=str, required=False, non_db=\n True, help='Calculation description.')\n spec.input('label', valid_type=str, required=False, non_db=True,\n help='Calculation label.')\n spec.input('add_comp_para', valid_type=orm.Dict, default=lambda :\n orm.Dict(dict={'only_even_MPI': False, 'forbid_single_mpi': \n False, 'max_queue_nodes': 20, 'max_queue_wallclock_sec': 86400}\n ), help=\n 'Gives additional control over computational parametersonly_even_MPI: set to true if you want to suppress odd number of MPI processes in parallelisation.This might speedup a calculation for machines having even number of sockets per node.max_queue_nodes: maximal number of nodes allowed on the remote machine. Used only to automatically solve some FLEUR failures.max_queue_wallclock_sec: maximal wallclock time allowed on the remote machine. Used only to automatically solve some FLEUR failures.'\n )\n spec.outline(cls.setup, cls.validate_inputs, while_(cls.\n should_run_process)(cls.run_process, cls.inspect_process), cls.\n results)\n spec.expose_outputs(FleurCalculation)\n spec.exit_code(311, 'ERROR_VACUUM_SPILL_RELAX', message=\n 'FLEUR calculation failed because an atom spilled to thevacuum during relaxation'\n )\n spec.exit_code(313, 'ERROR_MT_RADII_RELAX', message=\n 'Overlapping MT-spheres during relaxation.')\n spec.exit_code(388, 'ERROR_TIME_LIMIT_NO_SOLUTION', message=\n 'Computational resources are not optimal.')\n spec.exit_code(389, 'ERROR_MEMORY_ISSUE_NO_SOLUTION', message=\n 'Computational resources are not optimal.')\n spec.exit_code(390, 'ERROR_NOT_OPTIMAL_RESOURCES', message=\n 'Computational resources are not optimal.')\n spec.exit_code(399, 'ERROR_SOMETHING_WENT_WRONG', message=\n 'FleurCalculation failed and FleurBaseWorkChain has no strategy to resolve this'\n )\n <mask token>\n <mask token>\n <mask token>\n\n @process_handler(priority=48, exit_codes=FleurCalculation.exit_codes.\n ERROR_DROP_CDN)\n def _handle_dirac_equation(self, calculation):\n \"\"\"\n Sometimes relaxation calculation fails with Diraq problem which is usually caused by\n problems with reusing charge density. In this case we resubmit the calculation, dropping the input cdn.\n \"\"\"\n is_fleurinp_from_relax = False\n if 'fleurinp' in self.ctx.inputs:\n if 'relax.xml' in self.ctx.inputs.fleurinp.files:\n is_fleurinp_from_relax = True\n if 'parent_folder' in self.ctx.inputs and is_fleurinp_from_relax:\n del self.ctx.inputs.parent_folder\n self.ctx.restart_calc = None\n self.ctx.is_finished = False\n self.report(\n 'Calculation seems to fail due to corrupted charge density (can happenduring relaxation). I drop cdn from previous step'\n )\n return ProcessHandlerReport(True)\n self.ctx.restart_calc = calculation\n self.ctx.is_finished = True\n self.report(\n 'Can not drop charge density. If I drop the remote folder, there will be no inp.xml'\n )\n self.results()\n return ProcessHandlerReport(True, self.exit_codes.\n ERROR_SOMETHING_WENT_WRONG)\n <mask token>\n <mask token>\n <mask token>\n\n @process_handler(priority=47, exit_codes=FleurCalculation.exit_codes.\n ERROR_TIME_LIMIT)\n def _handle_time_limits(self, calculation):\n \"\"\"\n If calculation fails due to time limits, we simply resubmit it.\n \"\"\"\n from aiida.common.exceptions import NotExistent\n try:\n prev_calculation_remote = calculation.base.links.get_incoming(\n ).get_node_by_label('parent_folder')\n prev_calculation_status = (prev_calculation_remote.creator.\n exit_status)\n if prev_calculation_status in FleurCalculation.get_exit_statuses([\n 'ERROR_TIME_LIMIT']):\n self.ctx.is_finished = True\n self.results()\n return ProcessHandlerReport(True)\n except NotExistent:\n pass\n self.report(\n 'FleurCalculation failed due to time limits, I restart it from where it ended'\n )\n propose_wallclock = self.ctx.inputs.metadata.options[\n 'max_wallclock_seconds'] * 2\n if propose_wallclock > self.ctx.max_queue_wallclock_sec:\n propose_wallclock = self.ctx.max_queue_wallclock_sec\n self.ctx.inputs.metadata.options['max_wallclock_seconds'\n ] = propose_wallclock\n propose_nodes = self.ctx.num_machines * 2\n if propose_nodes > self.ctx.max_queue_nodes:\n propose_nodes = self.ctx.max_queue_nodes\n self.ctx.num_machines = propose_nodes\n remote = calculation.base.links.get_outgoing().get_node_by_label(\n 'remote_folder')\n self.ctx.is_finished = False\n if _is_remote_reusable(self.ctx.inputs, calculation):\n if 'fleurinp' in self.ctx.inputs:\n del self.ctx.inputs.fleurinp\n self.ctx.inputs.parent_folder = remote\n return ProcessHandlerReport(True)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass FleurBaseWorkChain(BaseRestartWorkChain):\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def define(cls, spec):\n super().define(spec)\n spec.expose_inputs(FleurCalculation, exclude=('metadata.options',))\n spec.input('options', valid_type=orm.Dict, help=\n 'Optional parameters to set up computational details.')\n spec.input('description', valid_type=str, required=False, non_db=\n True, help='Calculation description.')\n spec.input('label', valid_type=str, required=False, non_db=True,\n help='Calculation label.')\n spec.input('add_comp_para', valid_type=orm.Dict, default=lambda :\n orm.Dict(dict={'only_even_MPI': False, 'forbid_single_mpi': \n False, 'max_queue_nodes': 20, 'max_queue_wallclock_sec': 86400}\n ), help=\n 'Gives additional control over computational parametersonly_even_MPI: set to true if you want to suppress odd number of MPI processes in parallelisation.This might speedup a calculation for machines having even number of sockets per node.max_queue_nodes: maximal number of nodes allowed on the remote machine. Used only to automatically solve some FLEUR failures.max_queue_wallclock_sec: maximal wallclock time allowed on the remote machine. Used only to automatically solve some FLEUR failures.'\n )\n spec.outline(cls.setup, cls.validate_inputs, while_(cls.\n should_run_process)(cls.run_process, cls.inspect_process), cls.\n results)\n spec.expose_outputs(FleurCalculation)\n spec.exit_code(311, 'ERROR_VACUUM_SPILL_RELAX', message=\n 'FLEUR calculation failed because an atom spilled to thevacuum during relaxation'\n )\n spec.exit_code(313, 'ERROR_MT_RADII_RELAX', message=\n 'Overlapping MT-spheres during relaxation.')\n spec.exit_code(388, 'ERROR_TIME_LIMIT_NO_SOLUTION', message=\n 'Computational resources are not optimal.')\n spec.exit_code(389, 'ERROR_MEMORY_ISSUE_NO_SOLUTION', message=\n 'Computational resources are not optimal.')\n spec.exit_code(390, 'ERROR_NOT_OPTIMAL_RESOURCES', message=\n 'Computational resources are not optimal.')\n spec.exit_code(399, 'ERROR_SOMETHING_WENT_WRONG', message=\n 'FleurCalculation failed and FleurBaseWorkChain has no strategy to resolve this'\n )\n <mask token>\n\n def check_kpts(self):\n \"\"\"\n This routine checks if the total number of requested cpus\n is a factor of kpts and makes an optimisation.\n\n If suggested number of num_mpiprocs_per_machine is 60% smaller than\n requested, it throws an exit code and calculation stop withour submission.\n \"\"\"\n if 'fleurinp' in self.ctx.inputs:\n fleurinp = self.ctx.inputs.fleurinp\n else:\n fleurinp = get_fleurinp_from_remote_data(self.ctx.inputs.\n parent_folder)\n only_even_MPI = self.inputs.add_comp_para['only_even_MPI']\n forbid_single_mpi = self.inputs.add_comp_para['forbid_single_mpi']\n try:\n machines, mpi_tasks, omp_threads, message = optimize_calc_options(\n self.ctx.num_machines, self.ctx.num_mpiprocs_per_machine,\n self.ctx.num_cores_per_mpiproc, self.ctx.use_omp, self.ctx.\n suggest_mpi_omp_ratio, fleurinp, only_even_MPI=\n only_even_MPI, forbid_single_mpi=forbid_single_mpi)\n except ValueError as exc:\n self.report(exc)\n return self.exit_codes.ERROR_NOT_OPTIMAL_RESOURCES\n self.report(message)\n self.ctx.inputs.metadata.options['resources']['num_machines'\n ] = machines\n self.ctx.inputs.metadata.options['resources'][\n 'num_mpiprocs_per_machine'] = mpi_tasks\n if self.ctx.use_omp:\n self.ctx.inputs.metadata.options['resources'][\n 'num_cores_per_mpiproc'] = omp_threads\n if 'environment_variables' not in self.ctx.inputs.metadata.options:\n self.ctx.inputs.metadata.options['environment_variables'] = {}\n self.ctx.inputs.metadata.options['environment_variables'][\n 'OMP_NUM_THREADS'] = str(omp_threads)\n\n @process_handler(priority=1, exit_codes=[FleurCalculation.exit_codes.\n ERROR_FLEUR_CALC_FAILED, FleurCalculation.exit_codes.ERROR_MT_RADII,\n FleurCalculation.exit_codes.ERROR_NO_RETRIEVED_FOLDER,\n FleurCalculation.exit_codes.ERROR_OPENING_OUTPUTS, FleurCalculation\n .exit_codes.ERROR_NO_OUTXML, FleurCalculation.exit_codes.\n ERROR_XMLOUT_PARSING_FAILED, FleurCalculation.exit_codes.\n ERROR_RELAX_PARSING_FAILED, FleurCalculation.exit_codes.\n ERROR_MISSING_DEPENDENCY])\n def _handle_general_error(self, calculation):\n \"\"\"\n Calculation failed for unknown reason.\n \"\"\"\n self.ctx.restart_calc = calculation\n self.ctx.is_finished = True\n self.report(\n 'Calculation failed for a reason that can not be resolved automatically'\n )\n self.results()\n return ProcessHandlerReport(True, self.exit_codes.\n ERROR_SOMETHING_WENT_WRONG)\n\n @process_handler(priority=48, exit_codes=FleurCalculation.exit_codes.\n ERROR_DROP_CDN)\n def _handle_dirac_equation(self, calculation):\n \"\"\"\n Sometimes relaxation calculation fails with Diraq problem which is usually caused by\n problems with reusing charge density. In this case we resubmit the calculation, dropping the input cdn.\n \"\"\"\n is_fleurinp_from_relax = False\n if 'fleurinp' in self.ctx.inputs:\n if 'relax.xml' in self.ctx.inputs.fleurinp.files:\n is_fleurinp_from_relax = True\n if 'parent_folder' in self.ctx.inputs and is_fleurinp_from_relax:\n del self.ctx.inputs.parent_folder\n self.ctx.restart_calc = None\n self.ctx.is_finished = False\n self.report(\n 'Calculation seems to fail due to corrupted charge density (can happenduring relaxation). I drop cdn from previous step'\n )\n return ProcessHandlerReport(True)\n self.ctx.restart_calc = calculation\n self.ctx.is_finished = True\n self.report(\n 'Can not drop charge density. If I drop the remote folder, there will be no inp.xml'\n )\n self.results()\n return ProcessHandlerReport(True, self.exit_codes.\n ERROR_SOMETHING_WENT_WRONG)\n <mask token>\n\n @process_handler(priority=51, exit_codes=FleurCalculation.exit_codes.\n ERROR_MT_RADII_RELAX)\n def _handle_mt_relax_error(self, calculation):\n \"\"\"\n Calculation failed for unknown reason.\n \"\"\"\n self.ctx.restart_calc = calculation\n self.ctx.is_finished = True\n self.report(\n 'FLEUR calculation failed due to MT overlap. Can be fixed via RelaxBaseWorkChain'\n )\n self.results()\n return ProcessHandlerReport(True, self.exit_codes.ERROR_MT_RADII_RELAX)\n <mask token>\n\n @process_handler(priority=47, exit_codes=FleurCalculation.exit_codes.\n ERROR_TIME_LIMIT)\n def _handle_time_limits(self, calculation):\n \"\"\"\n If calculation fails due to time limits, we simply resubmit it.\n \"\"\"\n from aiida.common.exceptions import NotExistent\n try:\n prev_calculation_remote = calculation.base.links.get_incoming(\n ).get_node_by_label('parent_folder')\n prev_calculation_status = (prev_calculation_remote.creator.\n exit_status)\n if prev_calculation_status in FleurCalculation.get_exit_statuses([\n 'ERROR_TIME_LIMIT']):\n self.ctx.is_finished = True\n self.results()\n return ProcessHandlerReport(True)\n except NotExistent:\n pass\n self.report(\n 'FleurCalculation failed due to time limits, I restart it from where it ended'\n )\n propose_wallclock = self.ctx.inputs.metadata.options[\n 'max_wallclock_seconds'] * 2\n if propose_wallclock > self.ctx.max_queue_wallclock_sec:\n propose_wallclock = self.ctx.max_queue_wallclock_sec\n self.ctx.inputs.metadata.options['max_wallclock_seconds'\n ] = propose_wallclock\n propose_nodes = self.ctx.num_machines * 2\n if propose_nodes > self.ctx.max_queue_nodes:\n propose_nodes = self.ctx.max_queue_nodes\n self.ctx.num_machines = propose_nodes\n remote = calculation.base.links.get_outgoing().get_node_by_label(\n 'remote_folder')\n self.ctx.is_finished = False\n if _is_remote_reusable(self.ctx.inputs, calculation):\n if 'fleurinp' in self.ctx.inputs:\n del self.ctx.inputs.fleurinp\n self.ctx.inputs.parent_folder = remote\n return ProcessHandlerReport(True)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass FleurBaseWorkChain(BaseRestartWorkChain):\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def define(cls, spec):\n super().define(spec)\n spec.expose_inputs(FleurCalculation, exclude=('metadata.options',))\n spec.input('options', valid_type=orm.Dict, help=\n 'Optional parameters to set up computational details.')\n spec.input('description', valid_type=str, required=False, non_db=\n True, help='Calculation description.')\n spec.input('label', valid_type=str, required=False, non_db=True,\n help='Calculation label.')\n spec.input('add_comp_para', valid_type=orm.Dict, default=lambda :\n orm.Dict(dict={'only_even_MPI': False, 'forbid_single_mpi': \n False, 'max_queue_nodes': 20, 'max_queue_wallclock_sec': 86400}\n ), help=\n 'Gives additional control over computational parametersonly_even_MPI: set to true if you want to suppress odd number of MPI processes in parallelisation.This might speedup a calculation for machines having even number of sockets per node.max_queue_nodes: maximal number of nodes allowed on the remote machine. Used only to automatically solve some FLEUR failures.max_queue_wallclock_sec: maximal wallclock time allowed on the remote machine. Used only to automatically solve some FLEUR failures.'\n )\n spec.outline(cls.setup, cls.validate_inputs, while_(cls.\n should_run_process)(cls.run_process, cls.inspect_process), cls.\n results)\n spec.expose_outputs(FleurCalculation)\n spec.exit_code(311, 'ERROR_VACUUM_SPILL_RELAX', message=\n 'FLEUR calculation failed because an atom spilled to thevacuum during relaxation'\n )\n spec.exit_code(313, 'ERROR_MT_RADII_RELAX', message=\n 'Overlapping MT-spheres during relaxation.')\n spec.exit_code(388, 'ERROR_TIME_LIMIT_NO_SOLUTION', message=\n 'Computational resources are not optimal.')\n spec.exit_code(389, 'ERROR_MEMORY_ISSUE_NO_SOLUTION', message=\n 'Computational resources are not optimal.')\n spec.exit_code(390, 'ERROR_NOT_OPTIMAL_RESOURCES', message=\n 'Computational resources are not optimal.')\n spec.exit_code(399, 'ERROR_SOMETHING_WENT_WRONG', message=\n 'FleurCalculation failed and FleurBaseWorkChain has no strategy to resolve this'\n )\n\n def validate_inputs(self):\n \"\"\"\n Validate inputs that might depend on each other and cannot be validated by the spec.\n Also define dictionary `inputs` in the context, that will contain the inputs for the\n calculation that will be launched in the `run_calculation` step.\n \"\"\"\n self.ctx.inputs = AttributeDict(self.exposed_inputs(FleurCalculation))\n self.ctx.max_queue_nodes = self.inputs.add_comp_para['max_queue_nodes']\n self.ctx.max_queue_wallclock_sec = self.inputs.add_comp_para[\n 'max_queue_wallclock_sec']\n input_options = self.inputs.options.get_dict()\n self.ctx.optimize_resources = input_options.pop('optimize_resources',\n True)\n self.ctx.inputs.metadata.options = input_options\n if 'description' in self.inputs:\n self.ctx.inputs.metadata.description = self.inputs.description\n else:\n self.ctx.inputs.metadata.description = ''\n if 'label' in self.inputs:\n self.ctx.inputs.metadata.label = self.inputs.label\n else:\n self.ctx.inputs.metadata.label = ''\n if not self.ctx.optimize_resources:\n self.ctx.can_be_optimised = False\n return\n resources_input = self.ctx.inputs.metadata.options['resources']\n try:\n self.ctx.num_machines = int(resources_input['num_machines'])\n self.ctx.num_mpiprocs_per_machine = int(resources_input[\n 'num_mpiprocs_per_machine'])\n except KeyError:\n self.ctx.can_be_optimised = False\n self.report('WARNING: Computation resources were not optimised.')\n else:\n try:\n self.ctx.num_cores_per_mpiproc = int(resources_input[\n 'num_cores_per_mpiproc'])\n self.ctx.use_omp = True\n self.ctx.suggest_mpi_omp_ratio = (self.ctx.\n num_mpiprocs_per_machine / self.ctx.num_cores_per_mpiproc)\n except KeyError:\n self.ctx.num_cores_per_mpiproc = 1\n self.ctx.use_omp = False\n self.ctx.suggest_mpi_omp_ratio = 1\n status = self.check_kpts()\n if status is None:\n self.ctx.can_be_optimised = True\n else:\n self.report('ERROR: Not optimal computational resources.')\n return status\n\n def check_kpts(self):\n \"\"\"\n This routine checks if the total number of requested cpus\n is a factor of kpts and makes an optimisation.\n\n If suggested number of num_mpiprocs_per_machine is 60% smaller than\n requested, it throws an exit code and calculation stop withour submission.\n \"\"\"\n if 'fleurinp' in self.ctx.inputs:\n fleurinp = self.ctx.inputs.fleurinp\n else:\n fleurinp = get_fleurinp_from_remote_data(self.ctx.inputs.\n parent_folder)\n only_even_MPI = self.inputs.add_comp_para['only_even_MPI']\n forbid_single_mpi = self.inputs.add_comp_para['forbid_single_mpi']\n try:\n machines, mpi_tasks, omp_threads, message = optimize_calc_options(\n self.ctx.num_machines, self.ctx.num_mpiprocs_per_machine,\n self.ctx.num_cores_per_mpiproc, self.ctx.use_omp, self.ctx.\n suggest_mpi_omp_ratio, fleurinp, only_even_MPI=\n only_even_MPI, forbid_single_mpi=forbid_single_mpi)\n except ValueError as exc:\n self.report(exc)\n return self.exit_codes.ERROR_NOT_OPTIMAL_RESOURCES\n self.report(message)\n self.ctx.inputs.metadata.options['resources']['num_machines'\n ] = machines\n self.ctx.inputs.metadata.options['resources'][\n 'num_mpiprocs_per_machine'] = mpi_tasks\n if self.ctx.use_omp:\n self.ctx.inputs.metadata.options['resources'][\n 'num_cores_per_mpiproc'] = omp_threads\n if 'environment_variables' not in self.ctx.inputs.metadata.options:\n self.ctx.inputs.metadata.options['environment_variables'] = {}\n self.ctx.inputs.metadata.options['environment_variables'][\n 'OMP_NUM_THREADS'] = str(omp_threads)\n\n @process_handler(priority=1, exit_codes=[FleurCalculation.exit_codes.\n ERROR_FLEUR_CALC_FAILED, FleurCalculation.exit_codes.ERROR_MT_RADII,\n FleurCalculation.exit_codes.ERROR_NO_RETRIEVED_FOLDER,\n FleurCalculation.exit_codes.ERROR_OPENING_OUTPUTS, FleurCalculation\n .exit_codes.ERROR_NO_OUTXML, FleurCalculation.exit_codes.\n ERROR_XMLOUT_PARSING_FAILED, FleurCalculation.exit_codes.\n ERROR_RELAX_PARSING_FAILED, FleurCalculation.exit_codes.\n ERROR_MISSING_DEPENDENCY])\n def _handle_general_error(self, calculation):\n \"\"\"\n Calculation failed for unknown reason.\n \"\"\"\n self.ctx.restart_calc = calculation\n self.ctx.is_finished = True\n self.report(\n 'Calculation failed for a reason that can not be resolved automatically'\n )\n self.results()\n return ProcessHandlerReport(True, self.exit_codes.\n ERROR_SOMETHING_WENT_WRONG)\n\n @process_handler(priority=48, exit_codes=FleurCalculation.exit_codes.\n ERROR_DROP_CDN)\n def _handle_dirac_equation(self, calculation):\n \"\"\"\n Sometimes relaxation calculation fails with Diraq problem which is usually caused by\n problems with reusing charge density. In this case we resubmit the calculation, dropping the input cdn.\n \"\"\"\n is_fleurinp_from_relax = False\n if 'fleurinp' in self.ctx.inputs:\n if 'relax.xml' in self.ctx.inputs.fleurinp.files:\n is_fleurinp_from_relax = True\n if 'parent_folder' in self.ctx.inputs and is_fleurinp_from_relax:\n del self.ctx.inputs.parent_folder\n self.ctx.restart_calc = None\n self.ctx.is_finished = False\n self.report(\n 'Calculation seems to fail due to corrupted charge density (can happenduring relaxation). I drop cdn from previous step'\n )\n return ProcessHandlerReport(True)\n self.ctx.restart_calc = calculation\n self.ctx.is_finished = True\n self.report(\n 'Can not drop charge density. If I drop the remote folder, there will be no inp.xml'\n )\n self.results()\n return ProcessHandlerReport(True, self.exit_codes.\n ERROR_SOMETHING_WENT_WRONG)\n\n @process_handler(priority=52, exit_codes=FleurCalculation.exit_codes.\n ERROR_VACUUM_SPILL_RELAX)\n def _handle_vacuum_spill_error(self, calculation):\n \"\"\"\n Calculation failed for unknown reason.\n \"\"\"\n self.ctx.restart_calc = calculation\n self.ctx.is_finished = True\n self.report(\n 'FLEUR calculation failed because an atom spilled to the vacuum duringrelaxation. Can be fixed via RelaxBaseWorkChain.'\n )\n self.results()\n return ProcessHandlerReport(True, self.exit_codes.\n ERROR_VACUUM_SPILL_RELAX)\n\n @process_handler(priority=51, exit_codes=FleurCalculation.exit_codes.\n ERROR_MT_RADII_RELAX)\n def _handle_mt_relax_error(self, calculation):\n \"\"\"\n Calculation failed for unknown reason.\n \"\"\"\n self.ctx.restart_calc = calculation\n self.ctx.is_finished = True\n self.report(\n 'FLEUR calculation failed due to MT overlap. Can be fixed via RelaxBaseWorkChain'\n )\n self.results()\n return ProcessHandlerReport(True, self.exit_codes.ERROR_MT_RADII_RELAX)\n <mask token>\n\n @process_handler(priority=47, exit_codes=FleurCalculation.exit_codes.\n ERROR_TIME_LIMIT)\n def _handle_time_limits(self, calculation):\n \"\"\"\n If calculation fails due to time limits, we simply resubmit it.\n \"\"\"\n from aiida.common.exceptions import NotExistent\n try:\n prev_calculation_remote = calculation.base.links.get_incoming(\n ).get_node_by_label('parent_folder')\n prev_calculation_status = (prev_calculation_remote.creator.\n exit_status)\n if prev_calculation_status in FleurCalculation.get_exit_statuses([\n 'ERROR_TIME_LIMIT']):\n self.ctx.is_finished = True\n self.results()\n return ProcessHandlerReport(True)\n except NotExistent:\n pass\n self.report(\n 'FleurCalculation failed due to time limits, I restart it from where it ended'\n )\n propose_wallclock = self.ctx.inputs.metadata.options[\n 'max_wallclock_seconds'] * 2\n if propose_wallclock > self.ctx.max_queue_wallclock_sec:\n propose_wallclock = self.ctx.max_queue_wallclock_sec\n self.ctx.inputs.metadata.options['max_wallclock_seconds'\n ] = propose_wallclock\n propose_nodes = self.ctx.num_machines * 2\n if propose_nodes > self.ctx.max_queue_nodes:\n propose_nodes = self.ctx.max_queue_nodes\n self.ctx.num_machines = propose_nodes\n remote = calculation.base.links.get_outgoing().get_node_by_label(\n 'remote_folder')\n self.ctx.is_finished = False\n if _is_remote_reusable(self.ctx.inputs, calculation):\n if 'fleurinp' in self.ctx.inputs:\n del self.ctx.inputs.fleurinp\n self.ctx.inputs.parent_folder = remote\n return ProcessHandlerReport(True)\n\n\n<mask token>\n", "step-4": "<mask token>\nfrom aiida import orm\nfrom aiida.common import AttributeDict\nfrom aiida.engine import while_\nfrom aiida.engine.processes.workchains import BaseRestartWorkChain\nfrom aiida.engine.processes.workchains.utils import process_handler, ProcessHandlerReport\nfrom aiida_fleur.tools.common_fleur_wf import optimize_calc_options\nfrom aiida_fleur.calculation.fleur import FleurCalculation\nfrom aiida_fleur.data.fleurinp import get_fleurinp_from_remote_data\n\n\nclass FleurBaseWorkChain(BaseRestartWorkChain):\n \"\"\"Workchain to run a FLEUR calculation with automated error handling and restarts\"\"\"\n _workflowversion = '0.2.1'\n _process_class = FleurCalculation\n\n @classmethod\n def define(cls, spec):\n super().define(spec)\n spec.expose_inputs(FleurCalculation, exclude=('metadata.options',))\n spec.input('options', valid_type=orm.Dict, help=\n 'Optional parameters to set up computational details.')\n spec.input('description', valid_type=str, required=False, non_db=\n True, help='Calculation description.')\n spec.input('label', valid_type=str, required=False, non_db=True,\n help='Calculation label.')\n spec.input('add_comp_para', valid_type=orm.Dict, default=lambda :\n orm.Dict(dict={'only_even_MPI': False, 'forbid_single_mpi': \n False, 'max_queue_nodes': 20, 'max_queue_wallclock_sec': 86400}\n ), help=\n 'Gives additional control over computational parametersonly_even_MPI: set to true if you want to suppress odd number of MPI processes in parallelisation.This might speedup a calculation for machines having even number of sockets per node.max_queue_nodes: maximal number of nodes allowed on the remote machine. Used only to automatically solve some FLEUR failures.max_queue_wallclock_sec: maximal wallclock time allowed on the remote machine. Used only to automatically solve some FLEUR failures.'\n )\n spec.outline(cls.setup, cls.validate_inputs, while_(cls.\n should_run_process)(cls.run_process, cls.inspect_process), cls.\n results)\n spec.expose_outputs(FleurCalculation)\n spec.exit_code(311, 'ERROR_VACUUM_SPILL_RELAX', message=\n 'FLEUR calculation failed because an atom spilled to thevacuum during relaxation'\n )\n spec.exit_code(313, 'ERROR_MT_RADII_RELAX', message=\n 'Overlapping MT-spheres during relaxation.')\n spec.exit_code(388, 'ERROR_TIME_LIMIT_NO_SOLUTION', message=\n 'Computational resources are not optimal.')\n spec.exit_code(389, 'ERROR_MEMORY_ISSUE_NO_SOLUTION', message=\n 'Computational resources are not optimal.')\n spec.exit_code(390, 'ERROR_NOT_OPTIMAL_RESOURCES', message=\n 'Computational resources are not optimal.')\n spec.exit_code(399, 'ERROR_SOMETHING_WENT_WRONG', message=\n 'FleurCalculation failed and FleurBaseWorkChain has no strategy to resolve this'\n )\n\n def validate_inputs(self):\n \"\"\"\n Validate inputs that might depend on each other and cannot be validated by the spec.\n Also define dictionary `inputs` in the context, that will contain the inputs for the\n calculation that will be launched in the `run_calculation` step.\n \"\"\"\n self.ctx.inputs = AttributeDict(self.exposed_inputs(FleurCalculation))\n self.ctx.max_queue_nodes = self.inputs.add_comp_para['max_queue_nodes']\n self.ctx.max_queue_wallclock_sec = self.inputs.add_comp_para[\n 'max_queue_wallclock_sec']\n input_options = self.inputs.options.get_dict()\n self.ctx.optimize_resources = input_options.pop('optimize_resources',\n True)\n self.ctx.inputs.metadata.options = input_options\n if 'description' in self.inputs:\n self.ctx.inputs.metadata.description = self.inputs.description\n else:\n self.ctx.inputs.metadata.description = ''\n if 'label' in self.inputs:\n self.ctx.inputs.metadata.label = self.inputs.label\n else:\n self.ctx.inputs.metadata.label = ''\n if not self.ctx.optimize_resources:\n self.ctx.can_be_optimised = False\n return\n resources_input = self.ctx.inputs.metadata.options['resources']\n try:\n self.ctx.num_machines = int(resources_input['num_machines'])\n self.ctx.num_mpiprocs_per_machine = int(resources_input[\n 'num_mpiprocs_per_machine'])\n except KeyError:\n self.ctx.can_be_optimised = False\n self.report('WARNING: Computation resources were not optimised.')\n else:\n try:\n self.ctx.num_cores_per_mpiproc = int(resources_input[\n 'num_cores_per_mpiproc'])\n self.ctx.use_omp = True\n self.ctx.suggest_mpi_omp_ratio = (self.ctx.\n num_mpiprocs_per_machine / self.ctx.num_cores_per_mpiproc)\n except KeyError:\n self.ctx.num_cores_per_mpiproc = 1\n self.ctx.use_omp = False\n self.ctx.suggest_mpi_omp_ratio = 1\n status = self.check_kpts()\n if status is None:\n self.ctx.can_be_optimised = True\n else:\n self.report('ERROR: Not optimal computational resources.')\n return status\n\n def check_kpts(self):\n \"\"\"\n This routine checks if the total number of requested cpus\n is a factor of kpts and makes an optimisation.\n\n If suggested number of num_mpiprocs_per_machine is 60% smaller than\n requested, it throws an exit code and calculation stop withour submission.\n \"\"\"\n if 'fleurinp' in self.ctx.inputs:\n fleurinp = self.ctx.inputs.fleurinp\n else:\n fleurinp = get_fleurinp_from_remote_data(self.ctx.inputs.\n parent_folder)\n only_even_MPI = self.inputs.add_comp_para['only_even_MPI']\n forbid_single_mpi = self.inputs.add_comp_para['forbid_single_mpi']\n try:\n machines, mpi_tasks, omp_threads, message = optimize_calc_options(\n self.ctx.num_machines, self.ctx.num_mpiprocs_per_machine,\n self.ctx.num_cores_per_mpiproc, self.ctx.use_omp, self.ctx.\n suggest_mpi_omp_ratio, fleurinp, only_even_MPI=\n only_even_MPI, forbid_single_mpi=forbid_single_mpi)\n except ValueError as exc:\n self.report(exc)\n return self.exit_codes.ERROR_NOT_OPTIMAL_RESOURCES\n self.report(message)\n self.ctx.inputs.metadata.options['resources']['num_machines'\n ] = machines\n self.ctx.inputs.metadata.options['resources'][\n 'num_mpiprocs_per_machine'] = mpi_tasks\n if self.ctx.use_omp:\n self.ctx.inputs.metadata.options['resources'][\n 'num_cores_per_mpiproc'] = omp_threads\n if 'environment_variables' not in self.ctx.inputs.metadata.options:\n self.ctx.inputs.metadata.options['environment_variables'] = {}\n self.ctx.inputs.metadata.options['environment_variables'][\n 'OMP_NUM_THREADS'] = str(omp_threads)\n\n @process_handler(priority=1, exit_codes=[FleurCalculation.exit_codes.\n ERROR_FLEUR_CALC_FAILED, FleurCalculation.exit_codes.ERROR_MT_RADII,\n FleurCalculation.exit_codes.ERROR_NO_RETRIEVED_FOLDER,\n FleurCalculation.exit_codes.ERROR_OPENING_OUTPUTS, FleurCalculation\n .exit_codes.ERROR_NO_OUTXML, FleurCalculation.exit_codes.\n ERROR_XMLOUT_PARSING_FAILED, FleurCalculation.exit_codes.\n ERROR_RELAX_PARSING_FAILED, FleurCalculation.exit_codes.\n ERROR_MISSING_DEPENDENCY])\n def _handle_general_error(self, calculation):\n \"\"\"\n Calculation failed for unknown reason.\n \"\"\"\n self.ctx.restart_calc = calculation\n self.ctx.is_finished = True\n self.report(\n 'Calculation failed for a reason that can not be resolved automatically'\n )\n self.results()\n return ProcessHandlerReport(True, self.exit_codes.\n ERROR_SOMETHING_WENT_WRONG)\n\n @process_handler(priority=48, exit_codes=FleurCalculation.exit_codes.\n ERROR_DROP_CDN)\n def _handle_dirac_equation(self, calculation):\n \"\"\"\n Sometimes relaxation calculation fails with Diraq problem which is usually caused by\n problems with reusing charge density. In this case we resubmit the calculation, dropping the input cdn.\n \"\"\"\n is_fleurinp_from_relax = False\n if 'fleurinp' in self.ctx.inputs:\n if 'relax.xml' in self.ctx.inputs.fleurinp.files:\n is_fleurinp_from_relax = True\n if 'parent_folder' in self.ctx.inputs and is_fleurinp_from_relax:\n del self.ctx.inputs.parent_folder\n self.ctx.restart_calc = None\n self.ctx.is_finished = False\n self.report(\n 'Calculation seems to fail due to corrupted charge density (can happenduring relaxation). I drop cdn from previous step'\n )\n return ProcessHandlerReport(True)\n self.ctx.restart_calc = calculation\n self.ctx.is_finished = True\n self.report(\n 'Can not drop charge density. If I drop the remote folder, there will be no inp.xml'\n )\n self.results()\n return ProcessHandlerReport(True, self.exit_codes.\n ERROR_SOMETHING_WENT_WRONG)\n\n @process_handler(priority=52, exit_codes=FleurCalculation.exit_codes.\n ERROR_VACUUM_SPILL_RELAX)\n def _handle_vacuum_spill_error(self, calculation):\n \"\"\"\n Calculation failed for unknown reason.\n \"\"\"\n self.ctx.restart_calc = calculation\n self.ctx.is_finished = True\n self.report(\n 'FLEUR calculation failed because an atom spilled to the vacuum duringrelaxation. Can be fixed via RelaxBaseWorkChain.'\n )\n self.results()\n return ProcessHandlerReport(True, self.exit_codes.\n ERROR_VACUUM_SPILL_RELAX)\n\n @process_handler(priority=51, exit_codes=FleurCalculation.exit_codes.\n ERROR_MT_RADII_RELAX)\n def _handle_mt_relax_error(self, calculation):\n \"\"\"\n Calculation failed for unknown reason.\n \"\"\"\n self.ctx.restart_calc = calculation\n self.ctx.is_finished = True\n self.report(\n 'FLEUR calculation failed due to MT overlap. Can be fixed via RelaxBaseWorkChain'\n )\n self.results()\n return ProcessHandlerReport(True, self.exit_codes.ERROR_MT_RADII_RELAX)\n\n @process_handler(priority=50, exit_codes=FleurCalculation.exit_codes.\n ERROR_NOT_ENOUGH_MEMORY)\n def _handle_not_enough_memory(self, calculation):\n \"\"\"\n Calculation failed due to lack of memory.\n Probably works for JURECA only, has to be tested for other systems.\n \"\"\"\n if not self.ctx.can_be_optimised:\n self.ctx.restart_calc = calculation\n self.ctx.is_finished = True\n self.report(\n 'I am not allowed to optimize your settings. Consider providing at leastnum_machines and num_mpiprocs_per_machine'\n )\n self.results()\n return ProcessHandlerReport(True, self.exit_codes.\n ERROR_MEMORY_ISSUE_NO_SOLUTION)\n self.ctx.restart_calc = None\n self.ctx.is_finished = False\n self.report(\n 'Calculation failed due to lack of memory, I resubmit it with twice larger amount of computational nodes and smaller MPI/OMP ratio'\n )\n propose_nodes = self.ctx.num_machines * 2\n if propose_nodes > self.ctx.max_queue_nodes:\n propose_nodes = self.ctx.max_queue_nodes\n self.ctx.num_machines = propose_nodes\n self.ctx.suggest_mpi_omp_ratio = self.ctx.suggest_mpi_omp_ratio / 2\n status = self.check_kpts()\n if status is not None:\n self.ctx.is_finished = True\n self.results()\n return ProcessHandlerReport(True, self.exit_codes.\n ERROR_NOT_OPTIMAL_RESOURCES)\n if 'settings' not in self.ctx.inputs:\n settings = {}\n else:\n settings = self.ctx.inputs.settings.get_dict()\n settings.setdefault('remove_from_remotecopy_list', [])\n if 'mixing_history*' not in settings['remove_from_remotecopy_list']:\n settings['remove_from_remotecopy_list'].append('mixing_history*')\n self.ctx.inputs.settings = orm.Dict(dict=settings)\n remote = calculation.base.links.get_outgoing().get_node_by_label(\n 'remote_folder')\n if _is_remote_reusable(self.ctx.inputs, calculation):\n if 'fleurinp' in self.ctx.inputs:\n del self.ctx.inputs.fleurinp\n self.ctx.inputs.parent_folder = remote\n return ProcessHandlerReport(True)\n\n @process_handler(priority=47, exit_codes=FleurCalculation.exit_codes.\n ERROR_TIME_LIMIT)\n def _handle_time_limits(self, calculation):\n \"\"\"\n If calculation fails due to time limits, we simply resubmit it.\n \"\"\"\n from aiida.common.exceptions import NotExistent\n try:\n prev_calculation_remote = calculation.base.links.get_incoming(\n ).get_node_by_label('parent_folder')\n prev_calculation_status = (prev_calculation_remote.creator.\n exit_status)\n if prev_calculation_status in FleurCalculation.get_exit_statuses([\n 'ERROR_TIME_LIMIT']):\n self.ctx.is_finished = True\n self.results()\n return ProcessHandlerReport(True)\n except NotExistent:\n pass\n self.report(\n 'FleurCalculation failed due to time limits, I restart it from where it ended'\n )\n propose_wallclock = self.ctx.inputs.metadata.options[\n 'max_wallclock_seconds'] * 2\n if propose_wallclock > self.ctx.max_queue_wallclock_sec:\n propose_wallclock = self.ctx.max_queue_wallclock_sec\n self.ctx.inputs.metadata.options['max_wallclock_seconds'\n ] = propose_wallclock\n propose_nodes = self.ctx.num_machines * 2\n if propose_nodes > self.ctx.max_queue_nodes:\n propose_nodes = self.ctx.max_queue_nodes\n self.ctx.num_machines = propose_nodes\n remote = calculation.base.links.get_outgoing().get_node_by_label(\n 'remote_folder')\n self.ctx.is_finished = False\n if _is_remote_reusable(self.ctx.inputs, calculation):\n if 'fleurinp' in self.ctx.inputs:\n del self.ctx.inputs.fleurinp\n self.ctx.inputs.parent_folder = remote\n return ProcessHandlerReport(True)\n\n\ndef _is_remote_reusable(inputs, calculation):\n \"\"\"\n Check whether the remote folder of the given calculation\n can be resubmitted\n \"\"\"\n can_use_remote = False\n retrieved_filenames = calculation.base.links.get_outgoing(\n ).get_node_by_label('retrieved').list_object_names()\n if any(file in retrieved_filenames for file in ('cdn_last.hdf', 'cdn1')):\n can_use_remote = True\n if 'fleurinp' in inputs:\n modes = inputs.fleurinp.get_fleur_modes()\n if modes['force_theorem'] or modes['dos'] or modes['band']:\n can_use_remote = False\n return can_use_remote\n", "step-5": "###############################################################################\n# Copyright (c), Forschungszentrum Jülich GmbH, IAS-1/PGI-1, Germany. #\n# All rights reserved. #\n# This file is part of the AiiDA-FLEUR package. #\n# #\n# The code is hosted on GitHub at https://github.com/JuDFTteam/aiida-fleur #\n# For further information on the license, see the LICENSE.txt file #\n# For further information please visit http://www.flapw.de or #\n# http://aiida-fleur.readthedocs.io/en/develop/ #\n###############################################################################\n\"\"\"\nThis module contains the FleurBaseWorkChain.\nFleurBaseWorkChain is a workchain that wraps the submission of\nthe FLEUR calculation. Inheritance from the BaseRestartWorkChain\nallows to add scenarios to restart a calculation in an\nautomatic way if an expected failure occurred.\n\"\"\"\nfrom aiida import orm\nfrom aiida.common import AttributeDict\nfrom aiida.engine import while_\nfrom aiida.engine.processes.workchains import BaseRestartWorkChain\nfrom aiida.engine.processes.workchains.utils import process_handler, ProcessHandlerReport\n\nfrom aiida_fleur.tools.common_fleur_wf import optimize_calc_options\nfrom aiida_fleur.calculation.fleur import FleurCalculation\nfrom aiida_fleur.data.fleurinp import get_fleurinp_from_remote_data\n\n\nclass FleurBaseWorkChain(BaseRestartWorkChain):\n \"\"\"Workchain to run a FLEUR calculation with automated error handling and restarts\"\"\"\n _workflowversion = '0.2.1'\n _process_class = FleurCalculation\n\n @classmethod\n def define(cls, spec):\n super().define(spec)\n\n spec.expose_inputs(FleurCalculation, exclude=('metadata.options',))\n spec.input('options', valid_type=orm.Dict, help='Optional parameters to set up computational details.')\n spec.input('description', valid_type=str, required=False, non_db=True, help='Calculation description.')\n spec.input('label', valid_type=str, required=False, non_db=True, help='Calculation label.')\n spec.input(\n 'add_comp_para',\n valid_type=orm.Dict,\n default=lambda: orm.Dict(dict={\n 'only_even_MPI': False,\n 'forbid_single_mpi': False,\n 'max_queue_nodes': 20,\n 'max_queue_wallclock_sec': 86400\n }),\n help='Gives additional control over computational parameters'\n 'only_even_MPI: set to true if you want to suppress odd number of MPI processes in parallelisation.'\n 'This might speedup a calculation for machines having even number of sockets per node.'\n 'max_queue_nodes: maximal number of nodes allowed on the remote machine. Used only to automatically solve some FLEUR failures.'\n 'max_queue_wallclock_sec: maximal wallclock time allowed on the remote machine. Used only to automatically solve some FLEUR failures.'\n )\n\n spec.outline(\n cls.setup,\n cls.validate_inputs,\n while_(cls.should_run_process)(\n cls.run_process,\n cls.inspect_process,\n ),\n cls.results,\n )\n\n spec.expose_outputs(FleurCalculation)\n\n spec.exit_code(311,\n 'ERROR_VACUUM_SPILL_RELAX',\n message='FLEUR calculation failed because an atom spilled to the'\n 'vacuum during relaxation')\n spec.exit_code(313, 'ERROR_MT_RADII_RELAX', message='Overlapping MT-spheres during relaxation.')\n spec.exit_code(388, 'ERROR_TIME_LIMIT_NO_SOLUTION', message='Computational resources are not optimal.')\n spec.exit_code(389, 'ERROR_MEMORY_ISSUE_NO_SOLUTION', message='Computational resources are not optimal.')\n spec.exit_code(390, 'ERROR_NOT_OPTIMAL_RESOURCES', message='Computational resources are not optimal.')\n spec.exit_code(399,\n 'ERROR_SOMETHING_WENT_WRONG',\n message='FleurCalculation failed and FleurBaseWorkChain has no strategy '\n 'to resolve this')\n\n def validate_inputs(self):\n \"\"\"\n Validate inputs that might depend on each other and cannot be validated by the spec.\n Also define dictionary `inputs` in the context, that will contain the inputs for the\n calculation that will be launched in the `run_calculation` step.\n \"\"\"\n self.ctx.inputs = AttributeDict(self.exposed_inputs(FleurCalculation))\n\n self.ctx.max_queue_nodes = self.inputs.add_comp_para['max_queue_nodes']\n self.ctx.max_queue_wallclock_sec = self.inputs.add_comp_para['max_queue_wallclock_sec']\n\n input_options = self.inputs.options.get_dict()\n self.ctx.optimize_resources = input_options.pop('optimize_resources', True)\n self.ctx.inputs.metadata.options = input_options\n\n if 'description' in self.inputs:\n self.ctx.inputs.metadata.description = self.inputs.description\n else:\n self.ctx.inputs.metadata.description = ''\n if 'label' in self.inputs:\n self.ctx.inputs.metadata.label = self.inputs.label\n else:\n self.ctx.inputs.metadata.label = ''\n\n if not self.ctx.optimize_resources:\n self.ctx.can_be_optimised = False # set this for handlers to not change resources\n return\n\n resources_input = self.ctx.inputs.metadata.options['resources']\n try:\n self.ctx.num_machines = int(resources_input['num_machines'])\n self.ctx.num_mpiprocs_per_machine = int(resources_input['num_mpiprocs_per_machine'])\n except KeyError:\n self.ctx.can_be_optimised = False\n self.report('WARNING: Computation resources were not optimised.')\n else:\n try:\n self.ctx.num_cores_per_mpiproc = int(resources_input['num_cores_per_mpiproc'])\n self.ctx.use_omp = True\n self.ctx.suggest_mpi_omp_ratio = self.ctx.num_mpiprocs_per_machine / self.ctx.num_cores_per_mpiproc\n except KeyError:\n self.ctx.num_cores_per_mpiproc = 1\n self.ctx.use_omp = False\n self.ctx.suggest_mpi_omp_ratio = 1\n\n status = self.check_kpts()\n if status is None:\n self.ctx.can_be_optimised = True\n else:\n self.report('ERROR: Not optimal computational resources.')\n return status\n\n def check_kpts(self):\n \"\"\"\n This routine checks if the total number of requested cpus\n is a factor of kpts and makes an optimisation.\n\n If suggested number of num_mpiprocs_per_machine is 60% smaller than\n requested, it throws an exit code and calculation stop withour submission.\n \"\"\"\n if 'fleurinp' in self.ctx.inputs:\n fleurinp = self.ctx.inputs.fleurinp\n else:\n fleurinp = get_fleurinp_from_remote_data(self.ctx.inputs.parent_folder)\n\n only_even_MPI = self.inputs.add_comp_para['only_even_MPI']\n forbid_single_mpi = self.inputs.add_comp_para['forbid_single_mpi']\n try:\n machines, mpi_tasks, omp_threads, message = optimize_calc_options(self.ctx.num_machines,\n self.ctx.num_mpiprocs_per_machine,\n self.ctx.num_cores_per_mpiproc,\n self.ctx.use_omp,\n self.ctx.suggest_mpi_omp_ratio,\n fleurinp,\n only_even_MPI=only_even_MPI,\n forbid_single_mpi=forbid_single_mpi)\n except ValueError as exc:\n self.report(exc)\n return self.exit_codes.ERROR_NOT_OPTIMAL_RESOURCES\n\n self.report(message)\n\n self.ctx.inputs.metadata.options['resources']['num_machines'] = machines\n self.ctx.inputs.metadata.options['resources']['num_mpiprocs_per_machine'] = mpi_tasks\n if self.ctx.use_omp:\n self.ctx.inputs.metadata.options['resources']['num_cores_per_mpiproc'] = omp_threads\n if 'environment_variables' not in self.ctx.inputs.metadata.options:\n self.ctx.inputs.metadata.options['environment_variables'] = {}\n self.ctx.inputs.metadata.options['environment_variables']['OMP_NUM_THREADS'] = str(omp_threads)\n\n @process_handler(priority=1,\n exit_codes=[\n FleurCalculation.exit_codes.ERROR_FLEUR_CALC_FAILED,\n FleurCalculation.exit_codes.ERROR_MT_RADII,\n FleurCalculation.exit_codes.ERROR_NO_RETRIEVED_FOLDER,\n FleurCalculation.exit_codes.ERROR_OPENING_OUTPUTS,\n FleurCalculation.exit_codes.ERROR_NO_OUTXML,\n FleurCalculation.exit_codes.ERROR_XMLOUT_PARSING_FAILED,\n FleurCalculation.exit_codes.ERROR_RELAX_PARSING_FAILED,\n FleurCalculation.exit_codes.ERROR_MISSING_DEPENDENCY,\n ])\n def _handle_general_error(self, calculation):\n \"\"\"\n Calculation failed for unknown reason.\n \"\"\"\n self.ctx.restart_calc = calculation\n self.ctx.is_finished = True\n self.report('Calculation failed for a reason that can not be resolved automatically')\n self.results()\n return ProcessHandlerReport(True, self.exit_codes.ERROR_SOMETHING_WENT_WRONG)\n\n @process_handler(priority=48, exit_codes=FleurCalculation.exit_codes.ERROR_DROP_CDN)\n def _handle_dirac_equation(self, calculation):\n \"\"\"\n Sometimes relaxation calculation fails with Diraq problem which is usually caused by\n problems with reusing charge density. In this case we resubmit the calculation, dropping the input cdn.\n \"\"\"\n\n # try to drop remote folder and see if it helps\n is_fleurinp_from_relax = False\n if 'fleurinp' in self.ctx.inputs:\n if 'relax.xml' in self.ctx.inputs.fleurinp.files:\n is_fleurinp_from_relax = True\n\n if 'parent_folder' in self.ctx.inputs and is_fleurinp_from_relax:\n del self.ctx.inputs.parent_folder\n self.ctx.restart_calc = None\n self.ctx.is_finished = False\n self.report('Calculation seems to fail due to corrupted charge density (can happen'\n 'during relaxation). I drop cdn from previous step')\n return ProcessHandlerReport(True)\n\n self.ctx.restart_calc = calculation\n self.ctx.is_finished = True\n self.report('Can not drop charge density. If I drop the remote folder, there will be no inp.xml')\n self.results()\n return ProcessHandlerReport(True, self.exit_codes.ERROR_SOMETHING_WENT_WRONG)\n\n @process_handler(priority=52, exit_codes=FleurCalculation.exit_codes.ERROR_VACUUM_SPILL_RELAX)\n def _handle_vacuum_spill_error(self, calculation):\n \"\"\"\n Calculation failed for unknown reason.\n \"\"\"\n\n self.ctx.restart_calc = calculation\n self.ctx.is_finished = True\n self.report('FLEUR calculation failed because an atom spilled to the vacuum during'\n 'relaxation. Can be fixed via RelaxBaseWorkChain.')\n self.results()\n return ProcessHandlerReport(True, self.exit_codes.ERROR_VACUUM_SPILL_RELAX)\n\n @process_handler(priority=51, exit_codes=FleurCalculation.exit_codes.ERROR_MT_RADII_RELAX)\n def _handle_mt_relax_error(self, calculation):\n \"\"\"\n Calculation failed for unknown reason.\n \"\"\"\n self.ctx.restart_calc = calculation\n self.ctx.is_finished = True\n self.report('FLEUR calculation failed due to MT overlap. Can be fixed via RelaxBaseWorkChain')\n self.results()\n return ProcessHandlerReport(True, self.exit_codes.ERROR_MT_RADII_RELAX)\n\n @process_handler(priority=50, exit_codes=FleurCalculation.exit_codes.ERROR_NOT_ENOUGH_MEMORY)\n def _handle_not_enough_memory(self, calculation):\n \"\"\"\n Calculation failed due to lack of memory.\n Probably works for JURECA only, has to be tested for other systems.\n \"\"\"\n\n if not self.ctx.can_be_optimised:\n self.ctx.restart_calc = calculation\n self.ctx.is_finished = True\n self.report('I am not allowed to optimize your settings. Consider providing at least'\n 'num_machines and num_mpiprocs_per_machine')\n self.results()\n return ProcessHandlerReport(True, self.exit_codes.ERROR_MEMORY_ISSUE_NO_SOLUTION)\n\n self.ctx.restart_calc = None\n self.ctx.is_finished = False\n self.report('Calculation failed due to lack of memory, I resubmit it with twice larger'\n ' amount of computational nodes and smaller MPI/OMP ratio')\n\n # increase number of nodes\n propose_nodes = self.ctx.num_machines * 2\n if propose_nodes > self.ctx.max_queue_nodes:\n propose_nodes = self.ctx.max_queue_nodes\n self.ctx.num_machines = propose_nodes\n\n self.ctx.suggest_mpi_omp_ratio = self.ctx.suggest_mpi_omp_ratio / 2\n\n status = self.check_kpts()\n if status is not None:\n self.ctx.is_finished = True\n self.results()\n return ProcessHandlerReport(True, self.exit_codes.ERROR_NOT_OPTIMAL_RESOURCES)\n\n if 'settings' not in self.ctx.inputs:\n settings = {}\n else:\n settings = self.ctx.inputs.settings.get_dict()\n settings.setdefault('remove_from_remotecopy_list', [])\n if 'mixing_history*' not in settings['remove_from_remotecopy_list']:\n settings['remove_from_remotecopy_list'].append('mixing_history*')\n self.ctx.inputs.settings = orm.Dict(dict=settings)\n\n #check if the cdn.hdf can be reused\n #Out of memory can also occur after a couple of iterations if the mixing_history gets too large\n remote = calculation.base.links.get_outgoing().get_node_by_label('remote_folder')\n if _is_remote_reusable(self.ctx.inputs, calculation):\n if 'fleurinp' in self.ctx.inputs:\n del self.ctx.inputs.fleurinp\n self.ctx.inputs.parent_folder = remote\n\n return ProcessHandlerReport(True)\n\n @process_handler(priority=47, exit_codes=FleurCalculation.exit_codes.ERROR_TIME_LIMIT)\n def _handle_time_limits(self, calculation):\n \"\"\"\n If calculation fails due to time limits, we simply resubmit it.\n \"\"\"\n from aiida.common.exceptions import NotExistent\n\n # if previous calculation failed for the same reason, do not restart\n try:\n prev_calculation_remote = calculation.base.links.get_incoming().get_node_by_label('parent_folder')\n prev_calculation_status = prev_calculation_remote.creator.exit_status\n if prev_calculation_status in FleurCalculation.get_exit_statuses(['ERROR_TIME_LIMIT']):\n self.ctx.is_finished = True\n self.results()\n return ProcessHandlerReport(True)\n except NotExistent:\n pass\n\n self.report('FleurCalculation failed due to time limits, I restart it from where it ended')\n\n # increase wallclock time\n propose_wallclock = self.ctx.inputs.metadata.options['max_wallclock_seconds'] * 2\n if propose_wallclock > self.ctx.max_queue_wallclock_sec:\n propose_wallclock = self.ctx.max_queue_wallclock_sec\n self.ctx.inputs.metadata.options['max_wallclock_seconds'] = propose_wallclock\n\n # increase number of nodes\n propose_nodes = self.ctx.num_machines * 2\n if propose_nodes > self.ctx.max_queue_nodes:\n propose_nodes = self.ctx.max_queue_nodes\n self.ctx.num_machines = propose_nodes\n\n remote = calculation.base.links.get_outgoing().get_node_by_label('remote_folder')\n\n # resubmit providing inp.xml and cdn from the remote folder\n self.ctx.is_finished = False\n if _is_remote_reusable(self.ctx.inputs, calculation):\n if 'fleurinp' in self.ctx.inputs:\n del self.ctx.inputs.fleurinp\n self.ctx.inputs.parent_folder = remote\n\n return ProcessHandlerReport(True)\n\n\ndef _is_remote_reusable(inputs, calculation):\n \"\"\"\n Check whether the remote folder of the given calculation\n can be resubmitted\n \"\"\"\n can_use_remote = False\n #If no charge density file is available to restart from the calculation will except\n #with a not nice error message. So we can only reuse the charge density if these files are available\n retrieved_filenames = calculation.base.links.get_outgoing().get_node_by_label('retrieved').list_object_names()\n if any(file in retrieved_filenames for file in (\n 'cdn_last.hdf',\n 'cdn1',\n )):\n can_use_remote = True\n\n if 'fleurinp' in inputs:\n modes = inputs.fleurinp.get_fleur_modes()\n if modes['force_theorem'] or modes['dos'] or modes['band']:\n # in modes listed above it makes no sense copying cdn.hdf\n can_use_remote = False\n # without fleurinp it is harder to extract modes in this case\n # - simply try to reuse cdn.hdf and hope it works\n\n return can_use_remote\n", "step-ids": [ 4, 7, 9, 14, 15 ] }
[ 4, 7, 9, 14, 15 ]
import numpy as np N, M = (int(x) for x in input().split()) x, y, z = np.zeros(N, dtype=int), np.zeros(N, dtype=int), np.zeros(N, dtype=int ) for i in range(N): x[i], y[i], z[i] = (int(x) for x in input().split()) temp = [] for sx in (-1, 1): for sy in (-1, 1): for sz in (-1, 1): _x, _y, _z = sx * x, sy * y, sz * z T = np.sort(_x + _y + _z)[::-1][:M].sum() temp.append(T) print(max(temp))
normal
{ "blob_id": "af40239551709eff02b8a1f034583ab80845d1d7", "index": 1532, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in range(N):\n x[i], y[i], z[i] = (int(x) for x in input().split())\n<mask token>\nfor sx in (-1, 1):\n for sy in (-1, 1):\n for sz in (-1, 1):\n _x, _y, _z = sx * x, sy * y, sz * z\n T = np.sort(_x + _y + _z)[::-1][:M].sum()\n temp.append(T)\nprint(max(temp))\n", "step-3": "<mask token>\nN, M = (int(x) for x in input().split())\nx, y, z = np.zeros(N, dtype=int), np.zeros(N, dtype=int), np.zeros(N, dtype=int\n )\nfor i in range(N):\n x[i], y[i], z[i] = (int(x) for x in input().split())\ntemp = []\nfor sx in (-1, 1):\n for sy in (-1, 1):\n for sz in (-1, 1):\n _x, _y, _z = sx * x, sy * y, sz * z\n T = np.sort(_x + _y + _z)[::-1][:M].sum()\n temp.append(T)\nprint(max(temp))\n", "step-4": "import numpy as np\nN, M = (int(x) for x in input().split())\nx, y, z = np.zeros(N, dtype=int), np.zeros(N, dtype=int), np.zeros(N, dtype=int\n )\nfor i in range(N):\n x[i], y[i], z[i] = (int(x) for x in input().split())\ntemp = []\nfor sx in (-1, 1):\n for sy in (-1, 1):\n for sz in (-1, 1):\n _x, _y, _z = sx * x, sy * y, sz * z\n T = np.sort(_x + _y + _z)[::-1][:M].sum()\n temp.append(T)\nprint(max(temp))\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from django.contrib import admin from . import models admin.site.register(models.Comentario) # Register your models here.
normal
{ "blob_id": "d7d94cfed0b819297069c3434c70359a327403cd", "index": 718, "step-1": "<mask token>\n", "step-2": "<mask token>\nadmin.site.register(models.Comentario)\n", "step-3": "from django.contrib import admin\nfrom . import models\nadmin.site.register(models.Comentario)\n", "step-4": "from django.contrib import admin\nfrom . import models\n\nadmin.site.register(models.Comentario)\n\n# Register your models here.\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import numpy as np import tensorflow as tf from arg_parser import args from model_object import UnetModel def main(args): np.random.seed(args.random_seed) tf.random.set_seed(args.random_seed) unet_model = UnetModel(args) unet_model.prepare_data(args) unet_model.create_model(args) unet_model.train(args) unet_model.load_best_model(args, load_dir= args.savedir) unet_model.evaluate(args) if __name__ == "__main__": main(args)
normal
{ "blob_id": "588f6f78908e47e0b3f1bc42fffabad34766eede", "index": 9815, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef main(args):\n np.random.seed(args.random_seed)\n tf.random.set_seed(args.random_seed)\n unet_model = UnetModel(args)\n unet_model.prepare_data(args)\n unet_model.create_model(args)\n unet_model.train(args)\n unet_model.load_best_model(args, load_dir=args.savedir)\n unet_model.evaluate(args)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef main(args):\n np.random.seed(args.random_seed)\n tf.random.set_seed(args.random_seed)\n unet_model = UnetModel(args)\n unet_model.prepare_data(args)\n unet_model.create_model(args)\n unet_model.train(args)\n unet_model.load_best_model(args, load_dir=args.savedir)\n unet_model.evaluate(args)\n\n\nif __name__ == '__main__':\n main(args)\n", "step-4": "import numpy as np\nimport tensorflow as tf\nfrom arg_parser import args\nfrom model_object import UnetModel\n\n\ndef main(args):\n np.random.seed(args.random_seed)\n tf.random.set_seed(args.random_seed)\n unet_model = UnetModel(args)\n unet_model.prepare_data(args)\n unet_model.create_model(args)\n unet_model.train(args)\n unet_model.load_best_model(args, load_dir=args.savedir)\n unet_model.evaluate(args)\n\n\nif __name__ == '__main__':\n main(args)\n", "step-5": "import numpy as np\nimport tensorflow as tf\n\nfrom arg_parser import args\nfrom model_object import UnetModel\n\ndef main(args):\n \n np.random.seed(args.random_seed)\n tf.random.set_seed(args.random_seed)\n\n unet_model = UnetModel(args) \n\n unet_model.prepare_data(args)\n\n unet_model.create_model(args)\n\n unet_model.train(args)\n\n unet_model.load_best_model(args, load_dir= args.savedir)\n\n unet_model.evaluate(args)\n\nif __name__ == \"__main__\":\n main(args)", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from app import create_app __author__ = '七月' app = create_app() if __name__ == '__main__': app.run(debug=app.config['DEBUG'])
normal
{ "blob_id": "9a6d6637cd4ecf2f6e9c8eb8e702be06e83beea4", "index": 998, "step-1": "<mask token>\n", "step-2": "<mask token>\nif __name__ == '__main__':\n app.run(debug=app.config['DEBUG'])\n", "step-3": "<mask token>\n__author__ = '七月'\napp = create_app()\nif __name__ == '__main__':\n app.run(debug=app.config['DEBUG'])\n", "step-4": "from app import create_app\n__author__ = '七月'\napp = create_app()\nif __name__ == '__main__':\n app.run(debug=app.config['DEBUG'])\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#program, ktory zisti, ci zadany rok je prestupny rok=input("Zadaj rok: ") rok_int= int(rok) if rok_int% 4==0: if rok_int % 100 != 0: if rok_int % 400: print(f'Rok {rok_int} je priestupny') else: print("rok je neprestupny") else: print("rok je prestupny") else: print(f"Rok {rok_int} nie je priestupny") # #pridame rozsah rokov rok_od = int(input("Zadaj rok od: ")) rok_do = int(input("Zadaj rok do: ")) for rok in range(rok_od, rok_do+1): if ((rok%4 == 0) and (rok % 100 != 0)) or rok %400 == 0: print(f"Rok {rok} je prestupny")
normal
{ "blob_id": "c9b1956d66f0b8ae8a7ce7e509259747c8b7709e", "index": 6088, "step-1": "<mask token>\n", "step-2": "<mask token>\nif rok_int % 4 == 0:\n if rok_int % 100 != 0:\n if rok_int % 400:\n print(f'Rok {rok_int} je priestupny')\n else:\n print('rok je neprestupny')\n else:\n print('rok je prestupny')\nelse:\n print(f'Rok {rok_int} nie je priestupny')\n<mask token>\nfor rok in range(rok_od, rok_do + 1):\n if rok % 4 == 0 and rok % 100 != 0 or rok % 400 == 0:\n print(f'Rok {rok} je prestupny')\n", "step-3": "rok = input('Zadaj rok: ')\nrok_int = int(rok)\nif rok_int % 4 == 0:\n if rok_int % 100 != 0:\n if rok_int % 400:\n print(f'Rok {rok_int} je priestupny')\n else:\n print('rok je neprestupny')\n else:\n print('rok je prestupny')\nelse:\n print(f'Rok {rok_int} nie je priestupny')\nrok_od = int(input('Zadaj rok od: '))\nrok_do = int(input('Zadaj rok do: '))\nfor rok in range(rok_od, rok_do + 1):\n if rok % 4 == 0 and rok % 100 != 0 or rok % 400 == 0:\n print(f'Rok {rok} je prestupny')\n", "step-4": "#program, ktory zisti, ci zadany rok je prestupny\nrok=input(\"Zadaj rok: \")\nrok_int= int(rok)\nif rok_int% 4==0:\n if rok_int % 100 != 0:\n if rok_int % 400:\n print(f'Rok {rok_int} je priestupny')\n else:\n print(\"rok je neprestupny\")\n else:\n print(\"rok je prestupny\")\nelse:\n print(f\"Rok {rok_int} nie je priestupny\")\n#\n#pridame rozsah rokov\nrok_od = int(input(\"Zadaj rok od: \"))\nrok_do = int(input(\"Zadaj rok do: \"))\nfor rok in range(rok_od, rok_do+1):\n if ((rok%4 == 0) and (rok % 100 != 0)) or rok %400 == 0:\n print(f\"Rok {rok} je prestupny\")\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# Imports import numpy as np from ctf.functions2d.function2d import Function2D # Problem class StyblinskiTang(Function2D): """ Styblinski-Tang Function. """ def __init__(self): """ Constructor. """ # Information self.min = np.array([-2.903534, -2.903534]) self.value = -39.16599*2.0 self.domain = np.array([[-5.0, 5.0], [-5.0, 5.0]]) self.n = 2 self.smooth = True self.info = [True, True, True] # Description self.latex_name = "Styblinski-Tang Function" self.latex_type = "Other" self.latex_cost = r'\[ f(\mathbf{x}) = \frac{1}{2} \sum_{i=0}^{d-1} (x_i^4 - 16 x_i^2 + 5 x_i) \]' self.latex_desc = "The local minima are separated by a local maximum. There is only a single global minimum." def cost(self, x): """ Cost function. """ # Cost c = np.zeros(x.shape[1:]) # Calculate Cost c = 0.5*(x[0]**4.0 - 16*x[0]**2.0 + 5.0*x[0] + x[1]**4.0 - 16*x[1]**2.0 + 5.0*x[1]) # Return Cost return c def grad(self, x): """ Grad function. """ # Grad g = np.zeros(x.shape) # Calculate Grads g[0] = -16.0*x[0]**1.0 + 2.0*x[0]**3.0 + 2.5 g[1] = -16.0*x[1]**1.0 + 2.0*x[1]**3.0 + 2.5 # Return Grad return g def hess(self, x): """ Hess function. """ # Hess h = np.zeros((2, 2) + x.shape[1:]) # Calculate Hess h[0][0] = 6.0*x[0]**2.0 - 16.0 h[0][1] = 0.0 h[1][0] = h[0][1] h[1][1] = 6.0*x[1]**2.0 - 16.0 # Return Hess return h
normal
{ "blob_id": "5d8715dd02feff4e13919858051abeb5b6828011", "index": 6798, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass StyblinskiTang(Function2D):\n <mask token>\n <mask token>\n <mask token>\n\n def grad(self, x):\n \"\"\" Grad function. \"\"\"\n g = np.zeros(x.shape)\n g[0] = -16.0 * x[0] ** 1.0 + 2.0 * x[0] ** 3.0 + 2.5\n g[1] = -16.0 * x[1] ** 1.0 + 2.0 * x[1] ** 3.0 + 2.5\n return g\n\n def hess(self, x):\n \"\"\" Hess function. \"\"\"\n h = np.zeros((2, 2) + x.shape[1:])\n h[0][0] = 6.0 * x[0] ** 2.0 - 16.0\n h[0][1] = 0.0\n h[1][0] = h[0][1]\n h[1][1] = 6.0 * x[1] ** 2.0 - 16.0\n return h\n", "step-3": "<mask token>\n\n\nclass StyblinskiTang(Function2D):\n \"\"\" Styblinski-Tang Function. \"\"\"\n\n def __init__(self):\n \"\"\" Constructor. \"\"\"\n self.min = np.array([-2.903534, -2.903534])\n self.value = -39.16599 * 2.0\n self.domain = np.array([[-5.0, 5.0], [-5.0, 5.0]])\n self.n = 2\n self.smooth = True\n self.info = [True, True, True]\n self.latex_name = 'Styblinski-Tang Function'\n self.latex_type = 'Other'\n self.latex_cost = (\n '\\\\[ f(\\\\mathbf{x}) = \\\\frac{1}{2} \\\\sum_{i=0}^{d-1} (x_i^4 - 16 x_i^2 + 5 x_i) \\\\]'\n )\n self.latex_desc = (\n 'The local minima are separated by a local maximum. There is only a single global minimum.'\n )\n\n def cost(self, x):\n \"\"\" Cost function. \"\"\"\n c = np.zeros(x.shape[1:])\n c = 0.5 * (x[0] ** 4.0 - 16 * x[0] ** 2.0 + 5.0 * x[0] + x[1] ** \n 4.0 - 16 * x[1] ** 2.0 + 5.0 * x[1])\n return c\n\n def grad(self, x):\n \"\"\" Grad function. \"\"\"\n g = np.zeros(x.shape)\n g[0] = -16.0 * x[0] ** 1.0 + 2.0 * x[0] ** 3.0 + 2.5\n g[1] = -16.0 * x[1] ** 1.0 + 2.0 * x[1] ** 3.0 + 2.5\n return g\n\n def hess(self, x):\n \"\"\" Hess function. \"\"\"\n h = np.zeros((2, 2) + x.shape[1:])\n h[0][0] = 6.0 * x[0] ** 2.0 - 16.0\n h[0][1] = 0.0\n h[1][0] = h[0][1]\n h[1][1] = 6.0 * x[1] ** 2.0 - 16.0\n return h\n", "step-4": "import numpy as np\nfrom ctf.functions2d.function2d import Function2D\n\n\nclass StyblinskiTang(Function2D):\n \"\"\" Styblinski-Tang Function. \"\"\"\n\n def __init__(self):\n \"\"\" Constructor. \"\"\"\n self.min = np.array([-2.903534, -2.903534])\n self.value = -39.16599 * 2.0\n self.domain = np.array([[-5.0, 5.0], [-5.0, 5.0]])\n self.n = 2\n self.smooth = True\n self.info = [True, True, True]\n self.latex_name = 'Styblinski-Tang Function'\n self.latex_type = 'Other'\n self.latex_cost = (\n '\\\\[ f(\\\\mathbf{x}) = \\\\frac{1}{2} \\\\sum_{i=0}^{d-1} (x_i^4 - 16 x_i^2 + 5 x_i) \\\\]'\n )\n self.latex_desc = (\n 'The local minima are separated by a local maximum. There is only a single global minimum.'\n )\n\n def cost(self, x):\n \"\"\" Cost function. \"\"\"\n c = np.zeros(x.shape[1:])\n c = 0.5 * (x[0] ** 4.0 - 16 * x[0] ** 2.0 + 5.0 * x[0] + x[1] ** \n 4.0 - 16 * x[1] ** 2.0 + 5.0 * x[1])\n return c\n\n def grad(self, x):\n \"\"\" Grad function. \"\"\"\n g = np.zeros(x.shape)\n g[0] = -16.0 * x[0] ** 1.0 + 2.0 * x[0] ** 3.0 + 2.5\n g[1] = -16.0 * x[1] ** 1.0 + 2.0 * x[1] ** 3.0 + 2.5\n return g\n\n def hess(self, x):\n \"\"\" Hess function. \"\"\"\n h = np.zeros((2, 2) + x.shape[1:])\n h[0][0] = 6.0 * x[0] ** 2.0 - 16.0\n h[0][1] = 0.0\n h[1][0] = h[0][1]\n h[1][1] = 6.0 * x[1] ** 2.0 - 16.0\n return h\n", "step-5": "# Imports\nimport numpy as np\n\nfrom ctf.functions2d.function2d import Function2D\n\n\n\n# Problem\nclass StyblinskiTang(Function2D):\n \"\"\" Styblinski-Tang Function. \"\"\"\n\n def __init__(self):\n \"\"\" Constructor. \"\"\"\n # Information\n self.min = np.array([-2.903534, -2.903534])\n self.value = -39.16599*2.0\n self.domain = np.array([[-5.0, 5.0], [-5.0, 5.0]])\n self.n = 2\n self.smooth = True\n self.info = [True, True, True]\n # Description\n self.latex_name = \"Styblinski-Tang Function\"\n self.latex_type = \"Other\"\n self.latex_cost = r'\\[ f(\\mathbf{x}) = \\frac{1}{2} \\sum_{i=0}^{d-1} (x_i^4 - 16 x_i^2 + 5 x_i) \\]'\n self.latex_desc = \"The local minima are separated by a local maximum. There is only a single global minimum.\"\n\n def cost(self, x):\n \"\"\" Cost function. \"\"\"\n # Cost\n c = np.zeros(x.shape[1:])\n # Calculate Cost\n c = 0.5*(x[0]**4.0 - 16*x[0]**2.0 + 5.0*x[0] + x[1]**4.0 - 16*x[1]**2.0 + 5.0*x[1])\n # Return Cost\n return c\n\n def grad(self, x):\n \"\"\" Grad function. \"\"\"\n # Grad\n g = np.zeros(x.shape)\n # Calculate Grads\n g[0] = -16.0*x[0]**1.0 + 2.0*x[0]**3.0 + 2.5\n g[1] = -16.0*x[1]**1.0 + 2.0*x[1]**3.0 + 2.5\n # Return Grad\n return g\n\n def hess(self, x):\n \"\"\" Hess function. \"\"\"\n # Hess\n h = np.zeros((2, 2) + x.shape[1:])\n # Calculate Hess\n h[0][0] = 6.0*x[0]**2.0 - 16.0\n h[0][1] = 0.0\n h[1][0] = h[0][1]\n h[1][1] = 6.0*x[1]**2.0 - 16.0\n # Return Hess\n return h", "step-ids": [ 0, 3, 6, 7, 8 ] }
[ 0, 3, 6, 7, 8 ]
# In the 20×20 grid below, four numbers along a diagonal line have been marked in red. # The product of these numbers is 26 × 63 × 78 × 14 = 1788696. # What is the greatest product of four adjacent numbers in the same direction # (up, down, left, right, or diagonally) in the 20×20 grid? import numpy as np data = np.genfromtxt("problem_11_matrix.txt", delimiter=" ") # find greatest product horizontally max_product_hor = 0 for i in range(0, len(data[0, :])-3): for j in range(0, len(data[0, :])-3): product_hor = data[j, i] * data[j, i+1] * data[j, i+2] * data[j, i+3] if product_hor > max_product_hor: max_product_hor = product_hor # print("The greatest product horizontally is {}. " .format(max_product_hor)) # find greatest product vertically max_product_ver = 0 for i in range(0, len(data[:, 0])-3): for j in range(0, len(data[:, 0])-3): product_ver = data[i, j] * data[i+1, j] * data[i+2, j] * data[i+3, j] if product_ver > max_product_ver: max_product_ver = product_ver # print("The greatest product vertically is {}. " .format(max_product_ver)) # find greatest product diagonally max_product_dia = 0 for j in range(0, len(data[0, :])-3): for i in range(0, len(data[0, :])-3): product_dia = data[i, j] * data[i+1, j+1] * data[i+2, j+2] * data[i+3, j+3] if product_dia > max_product_dia: max_product_dia = product_dia # print("The greatest product diagonally is {}. " .format(max_product_dia)) max_product_dia_2 = 0 for j in range(0, len(data[0, :])-3): for i in range(2, len(data[0, :])-1): product_dia_2 = data[i, j] * data[i-1, j+1] * data[i-2, j+2] * data[i-3, j+3] if product_dia_2 > max_product_dia_2: max_product_dia_2 = product_dia_2 # print("The greatest product diagonally is {}. " .format(max_product_dia_2)) max_value = max(max_product_hor, max_product_ver, max_product_dia, max_product_dia_2) print("The greatest product in the same direction is {}." .format(int(max_value)))
normal
{ "blob_id": "bacaaf5c91232d85f451c2c17a42cd2ec6966684", "index": 1499, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in range(0, len(data[0, :]) - 3):\n for j in range(0, len(data[0, :]) - 3):\n product_hor = data[j, i] * data[j, i + 1] * data[j, i + 2] * data[j,\n i + 3]\n if product_hor > max_product_hor:\n max_product_hor = product_hor\n<mask token>\nfor i in range(0, len(data[:, 0]) - 3):\n for j in range(0, len(data[:, 0]) - 3):\n product_ver = data[i, j] * data[i + 1, j] * data[i + 2, j] * data[i +\n 3, j]\n if product_ver > max_product_ver:\n max_product_ver = product_ver\n<mask token>\nfor j in range(0, len(data[0, :]) - 3):\n for i in range(0, len(data[0, :]) - 3):\n product_dia = data[i, j] * data[i + 1, j + 1] * data[i + 2, j + 2\n ] * data[i + 3, j + 3]\n if product_dia > max_product_dia:\n max_product_dia = product_dia\n<mask token>\nfor j in range(0, len(data[0, :]) - 3):\n for i in range(2, len(data[0, :]) - 1):\n product_dia_2 = data[i, j] * data[i - 1, j + 1] * data[i - 2, j + 2\n ] * data[i - 3, j + 3]\n if product_dia_2 > max_product_dia_2:\n max_product_dia_2 = product_dia_2\n<mask token>\nprint('The greatest product in the same direction is {}.'.format(int(\n max_value)))\n", "step-3": "<mask token>\ndata = np.genfromtxt('problem_11_matrix.txt', delimiter=' ')\nmax_product_hor = 0\nfor i in range(0, len(data[0, :]) - 3):\n for j in range(0, len(data[0, :]) - 3):\n product_hor = data[j, i] * data[j, i + 1] * data[j, i + 2] * data[j,\n i + 3]\n if product_hor > max_product_hor:\n max_product_hor = product_hor\nmax_product_ver = 0\nfor i in range(0, len(data[:, 0]) - 3):\n for j in range(0, len(data[:, 0]) - 3):\n product_ver = data[i, j] * data[i + 1, j] * data[i + 2, j] * data[i +\n 3, j]\n if product_ver > max_product_ver:\n max_product_ver = product_ver\nmax_product_dia = 0\nfor j in range(0, len(data[0, :]) - 3):\n for i in range(0, len(data[0, :]) - 3):\n product_dia = data[i, j] * data[i + 1, j + 1] * data[i + 2, j + 2\n ] * data[i + 3, j + 3]\n if product_dia > max_product_dia:\n max_product_dia = product_dia\nmax_product_dia_2 = 0\nfor j in range(0, len(data[0, :]) - 3):\n for i in range(2, len(data[0, :]) - 1):\n product_dia_2 = data[i, j] * data[i - 1, j + 1] * data[i - 2, j + 2\n ] * data[i - 3, j + 3]\n if product_dia_2 > max_product_dia_2:\n max_product_dia_2 = product_dia_2\nmax_value = max(max_product_hor, max_product_ver, max_product_dia,\n max_product_dia_2)\nprint('The greatest product in the same direction is {}.'.format(int(\n max_value)))\n", "step-4": "import numpy as np\ndata = np.genfromtxt('problem_11_matrix.txt', delimiter=' ')\nmax_product_hor = 0\nfor i in range(0, len(data[0, :]) - 3):\n for j in range(0, len(data[0, :]) - 3):\n product_hor = data[j, i] * data[j, i + 1] * data[j, i + 2] * data[j,\n i + 3]\n if product_hor > max_product_hor:\n max_product_hor = product_hor\nmax_product_ver = 0\nfor i in range(0, len(data[:, 0]) - 3):\n for j in range(0, len(data[:, 0]) - 3):\n product_ver = data[i, j] * data[i + 1, j] * data[i + 2, j] * data[i +\n 3, j]\n if product_ver > max_product_ver:\n max_product_ver = product_ver\nmax_product_dia = 0\nfor j in range(0, len(data[0, :]) - 3):\n for i in range(0, len(data[0, :]) - 3):\n product_dia = data[i, j] * data[i + 1, j + 1] * data[i + 2, j + 2\n ] * data[i + 3, j + 3]\n if product_dia > max_product_dia:\n max_product_dia = product_dia\nmax_product_dia_2 = 0\nfor j in range(0, len(data[0, :]) - 3):\n for i in range(2, len(data[0, :]) - 1):\n product_dia_2 = data[i, j] * data[i - 1, j + 1] * data[i - 2, j + 2\n ] * data[i - 3, j + 3]\n if product_dia_2 > max_product_dia_2:\n max_product_dia_2 = product_dia_2\nmax_value = max(max_product_hor, max_product_ver, max_product_dia,\n max_product_dia_2)\nprint('The greatest product in the same direction is {}.'.format(int(\n max_value)))\n", "step-5": "# In the 20×20 grid below, four numbers along a diagonal line have been marked in red.\n# The product of these numbers is 26 × 63 × 78 × 14 = 1788696.\n# What is the greatest product of four adjacent numbers in the same direction\n# (up, down, left, right, or diagonally) in the 20×20 grid?\n\nimport numpy as np\ndata = np.genfromtxt(\"problem_11_matrix.txt\", delimiter=\" \")\n\n# find greatest product horizontally\nmax_product_hor = 0\nfor i in range(0, len(data[0, :])-3):\n for j in range(0, len(data[0, :])-3):\n product_hor = data[j, i] * data[j, i+1] * data[j, i+2] * data[j, i+3]\n if product_hor > max_product_hor:\n max_product_hor = product_hor\n# print(\"The greatest product horizontally is {}. \" .format(max_product_hor))\n\n# find greatest product vertically\nmax_product_ver = 0\nfor i in range(0, len(data[:, 0])-3):\n for j in range(0, len(data[:, 0])-3):\n product_ver = data[i, j] * data[i+1, j] * data[i+2, j] * data[i+3, j]\n if product_ver > max_product_ver:\n max_product_ver = product_ver\n# print(\"The greatest product vertically is {}. \" .format(max_product_ver))\n\n# find greatest product diagonally\nmax_product_dia = 0\nfor j in range(0, len(data[0, :])-3):\n for i in range(0, len(data[0, :])-3):\n product_dia = data[i, j] * data[i+1, j+1] * data[i+2, j+2] * data[i+3, j+3]\n if product_dia > max_product_dia:\n max_product_dia = product_dia\n# print(\"The greatest product diagonally is {}. \" .format(max_product_dia))\n\nmax_product_dia_2 = 0\nfor j in range(0, len(data[0, :])-3):\n for i in range(2, len(data[0, :])-1):\n product_dia_2 = data[i, j] * data[i-1, j+1] * data[i-2, j+2] * data[i-3, j+3]\n if product_dia_2 > max_product_dia_2:\n max_product_dia_2 = product_dia_2\n# print(\"The greatest product diagonally is {}. \" .format(max_product_dia_2))\n\nmax_value = max(max_product_hor, max_product_ver, max_product_dia, max_product_dia_2)\n\nprint(\"The greatest product in the same direction is {}.\" .format(int(max_value)))\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import matplotlib import matplotlib.pyplot as plt from matplotlib.transforms import Bbox from matplotlib.path import Path import json def cLineGraph(j_file): data = [] with open(j_file) as f: for line in f: data.append(json.loads(line)) data = data[0] in_other = 0 in_picture = 1 in_text = 2 values = [] time = [] x_coords = [] x_times = [] page_turns = [] pages = [] pic = [] text = [] p = 1 t0 = 0 first = 0 for i in range(0, len(data)): if data[i].get('type') == 'Picture': pic = data[i] #print(pic, i) if data[i].get('type') == 'Text': text = data[i] if first == 0: page_turns.append(0) else: page_turns.append(data[i+1].get('timestamp') - t0) pages.append(p) p = p + 1 #print(text, i) if data[i].get('type') == 'SampleGaze' or data[i].get('type') == 'SampleFixation': #if data[i].get('type') == 'SampleFixation': # comment out line above and use this one for only fixation data if first == 0: t0 = data[i].get('timestamp') first = 1 time.append(data[i].get('timestamp') - t0) x = data[i].get('x') y = data[i].get('y') if x < pic.get('pr') and x > pic.get('pl') and y < pic.get('pb') and y > pic.get('pt'): values.append(in_picture) elif x < text.get('tr') and x > text.get('tl') and y < text.get('tb') and y > text.get('tt'): values.append(in_text) x_coords.append(x) x_times.append(data[i].get('timestamp') - t0) else: values.append(in_other) d = [] v = values[0] vs = [] ts = [] vs.append(v) ts.append(time[0]) for i in range(1, len(values)): if values[i] == v: vs.append(v) ts.append(time[i]) else: d.append([ts, vs]) vs = [] ts = [] v = values[i] vs.append(v) ts.append(time[i]) for i in range(0, len(x_times)): x_coords[i] = ((1/1920.0)*(x_coords[i])) + 1.5 for plot in d: if plot[1][0] == 0: # other plt.plot(plot[0], plot[1], 'k', linewidth=10) elif plot[1][0] == 1: # picture plt.plot(plot[0], plot[1], 'b', linewidth=10) elif plot[1][0] == 2: plt.plot(plot[0], plot[1], 'g', linewidth=10) # THESE TWO LINES IMPLEMENT THE READING POINT PLOT FUNCTIONALITY #plt.plot(x_times, x_coords, 'go') #plt.plot(x_times, x_coords, 'g') plt.axis([0, time[-1], -0.5, 2.5]) plt.yticks([0, 1, 2], ['Other', 'Picture', 'Text'], size='small') plt.xticks(page_turns, pages, size='small') plt.xlabel('Page') plt.ylabel('Eye Location on Page') plt.savefig('linegraph' + j_file[11:-5] + '.png')
normal
{ "blob_id": "319af5232c043d77a9d63ab1efa62d857da6db23", "index": 1508, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef cLineGraph(j_file):\n data = []\n with open(j_file) as f:\n for line in f:\n data.append(json.loads(line))\n data = data[0]\n in_other = 0\n in_picture = 1\n in_text = 2\n values = []\n time = []\n x_coords = []\n x_times = []\n page_turns = []\n pages = []\n pic = []\n text = []\n p = 1\n t0 = 0\n first = 0\n for i in range(0, len(data)):\n if data[i].get('type') == 'Picture':\n pic = data[i]\n if data[i].get('type') == 'Text':\n text = data[i]\n if first == 0:\n page_turns.append(0)\n else:\n page_turns.append(data[i + 1].get('timestamp') - t0)\n pages.append(p)\n p = p + 1\n if data[i].get('type') == 'SampleGaze' or data[i].get('type'\n ) == 'SampleFixation':\n if first == 0:\n t0 = data[i].get('timestamp')\n first = 1\n time.append(data[i].get('timestamp') - t0)\n x = data[i].get('x')\n y = data[i].get('y')\n if x < pic.get('pr') and x > pic.get('pl') and y < pic.get('pb'\n ) and y > pic.get('pt'):\n values.append(in_picture)\n elif x < text.get('tr') and x > text.get('tl') and y < text.get(\n 'tb') and y > text.get('tt'):\n values.append(in_text)\n x_coords.append(x)\n x_times.append(data[i].get('timestamp') - t0)\n else:\n values.append(in_other)\n d = []\n v = values[0]\n vs = []\n ts = []\n vs.append(v)\n ts.append(time[0])\n for i in range(1, len(values)):\n if values[i] == v:\n vs.append(v)\n ts.append(time[i])\n else:\n d.append([ts, vs])\n vs = []\n ts = []\n v = values[i]\n vs.append(v)\n ts.append(time[i])\n for i in range(0, len(x_times)):\n x_coords[i] = 1 / 1920.0 * x_coords[i] + 1.5\n for plot in d:\n if plot[1][0] == 0:\n plt.plot(plot[0], plot[1], 'k', linewidth=10)\n elif plot[1][0] == 1:\n plt.plot(plot[0], plot[1], 'b', linewidth=10)\n elif plot[1][0] == 2:\n plt.plot(plot[0], plot[1], 'g', linewidth=10)\n plt.axis([0, time[-1], -0.5, 2.5])\n plt.yticks([0, 1, 2], ['Other', 'Picture', 'Text'], size='small')\n plt.xticks(page_turns, pages, size='small')\n plt.xlabel('Page')\n plt.ylabel('Eye Location on Page')\n plt.savefig('linegraph' + j_file[11:-5] + '.png')\n", "step-3": "import matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.transforms import Bbox\nfrom matplotlib.path import Path\nimport json\n\n\ndef cLineGraph(j_file):\n data = []\n with open(j_file) as f:\n for line in f:\n data.append(json.loads(line))\n data = data[0]\n in_other = 0\n in_picture = 1\n in_text = 2\n values = []\n time = []\n x_coords = []\n x_times = []\n page_turns = []\n pages = []\n pic = []\n text = []\n p = 1\n t0 = 0\n first = 0\n for i in range(0, len(data)):\n if data[i].get('type') == 'Picture':\n pic = data[i]\n if data[i].get('type') == 'Text':\n text = data[i]\n if first == 0:\n page_turns.append(0)\n else:\n page_turns.append(data[i + 1].get('timestamp') - t0)\n pages.append(p)\n p = p + 1\n if data[i].get('type') == 'SampleGaze' or data[i].get('type'\n ) == 'SampleFixation':\n if first == 0:\n t0 = data[i].get('timestamp')\n first = 1\n time.append(data[i].get('timestamp') - t0)\n x = data[i].get('x')\n y = data[i].get('y')\n if x < pic.get('pr') and x > pic.get('pl') and y < pic.get('pb'\n ) and y > pic.get('pt'):\n values.append(in_picture)\n elif x < text.get('tr') and x > text.get('tl') and y < text.get(\n 'tb') and y > text.get('tt'):\n values.append(in_text)\n x_coords.append(x)\n x_times.append(data[i].get('timestamp') - t0)\n else:\n values.append(in_other)\n d = []\n v = values[0]\n vs = []\n ts = []\n vs.append(v)\n ts.append(time[0])\n for i in range(1, len(values)):\n if values[i] == v:\n vs.append(v)\n ts.append(time[i])\n else:\n d.append([ts, vs])\n vs = []\n ts = []\n v = values[i]\n vs.append(v)\n ts.append(time[i])\n for i in range(0, len(x_times)):\n x_coords[i] = 1 / 1920.0 * x_coords[i] + 1.5\n for plot in d:\n if plot[1][0] == 0:\n plt.plot(plot[0], plot[1], 'k', linewidth=10)\n elif plot[1][0] == 1:\n plt.plot(plot[0], plot[1], 'b', linewidth=10)\n elif plot[1][0] == 2:\n plt.plot(plot[0], plot[1], 'g', linewidth=10)\n plt.axis([0, time[-1], -0.5, 2.5])\n plt.yticks([0, 1, 2], ['Other', 'Picture', 'Text'], size='small')\n plt.xticks(page_turns, pages, size='small')\n plt.xlabel('Page')\n plt.ylabel('Eye Location on Page')\n plt.savefig('linegraph' + j_file[11:-5] + '.png')\n", "step-4": "import matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.transforms import Bbox\nfrom matplotlib.path import Path\nimport json\n\ndef cLineGraph(j_file):\n data = []\n\n with open(j_file) as f:\n for line in f:\n data.append(json.loads(line))\n data = data[0]\n\n in_other = 0\n in_picture = 1\n in_text = 2\n\n values = []\n time = []\n x_coords = []\n x_times = []\n\n page_turns = []\n pages = []\n\n pic = []\n text = []\n p = 1\n t0 = 0\n first = 0\n\n for i in range(0, len(data)):\n if data[i].get('type') == 'Picture':\n pic = data[i]\n #print(pic, i)\n if data[i].get('type') == 'Text':\n text = data[i]\n if first == 0:\n page_turns.append(0)\n else:\n page_turns.append(data[i+1].get('timestamp') - t0)\n pages.append(p)\n p = p + 1\n #print(text, i)\n if data[i].get('type') == 'SampleGaze' or data[i].get('type') == 'SampleFixation':\n #if data[i].get('type') == 'SampleFixation': # comment out line above and use this one for only fixation data\n if first == 0:\n t0 = data[i].get('timestamp')\n first = 1\n time.append(data[i].get('timestamp') - t0)\n x = data[i].get('x')\n y = data[i].get('y')\n if x < pic.get('pr') and x > pic.get('pl') and y < pic.get('pb') and y > pic.get('pt'):\n values.append(in_picture)\n elif x < text.get('tr') and x > text.get('tl') and y < text.get('tb') and y > text.get('tt'):\n values.append(in_text)\n x_coords.append(x)\n x_times.append(data[i].get('timestamp') - t0)\n else:\n values.append(in_other)\n d = []\n v = values[0]\n vs = []\n ts = []\n vs.append(v)\n ts.append(time[0])\n for i in range(1, len(values)):\n if values[i] == v:\n vs.append(v)\n ts.append(time[i])\n else:\n d.append([ts, vs])\n vs = []\n ts = []\n v = values[i]\n vs.append(v)\n ts.append(time[i])\n for i in range(0, len(x_times)):\n x_coords[i] = ((1/1920.0)*(x_coords[i])) + 1.5\n\n for plot in d:\n if plot[1][0] == 0: # other\n plt.plot(plot[0], plot[1], 'k', linewidth=10)\n elif plot[1][0] == 1: # picture\n plt.plot(plot[0], plot[1], 'b', linewidth=10)\n elif plot[1][0] == 2:\n plt.plot(plot[0], plot[1], 'g', linewidth=10)\n \n # THESE TWO LINES IMPLEMENT THE READING POINT PLOT FUNCTIONALITY \n #plt.plot(x_times, x_coords, 'go')\n #plt.plot(x_times, x_coords, 'g')\n\n plt.axis([0, time[-1], -0.5, 2.5])\n plt.yticks([0, 1, 2], ['Other', 'Picture', 'Text'], size='small')\n plt.xticks(page_turns, pages, size='small')\n plt.xlabel('Page')\n plt.ylabel('Eye Location on Page')\n plt.savefig('linegraph' + j_file[11:-5] + '.png')", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
""" HLS: Check if Twin Granule Exists """ from typing import Dict import os import re import boto3 from botocore.errorfactory import ClientError from datetime import date s3 = boto3.client("s3") bucket = os.getenv("SENTINEL_INPUT_BUCKET", None) print(bucket) if bucket is None: raise Exception("No Input Bucket set") def handler(event: Dict, context: Dict): """AWS Lambda handler.""" granule = event.get("granule") prefix = granule[0:-6] print(prefix) response = s3.list_objects_v2(Bucket=bucket, Prefix=prefix,) print(response) granules = [] contents = response["Contents"] for obj in contents: granules.append(obj["Key"][0:-4]) granule_str = ",".join(granules) output = { "granule": granule_str, } return output
normal
{ "blob_id": "d2b05c5653ca6c6b7219f6c0393e81c9425b5977", "index": 279, "step-1": "<mask token>\n\n\ndef handler(event: Dict, context: Dict):\n \"\"\"AWS Lambda handler.\"\"\"\n granule = event.get('granule')\n prefix = granule[0:-6]\n print(prefix)\n response = s3.list_objects_v2(Bucket=bucket, Prefix=prefix)\n print(response)\n granules = []\n contents = response['Contents']\n for obj in contents:\n granules.append(obj['Key'][0:-4])\n granule_str = ','.join(granules)\n output = {'granule': granule_str}\n return output\n", "step-2": "<mask token>\nprint(bucket)\nif bucket is None:\n raise Exception('No Input Bucket set')\n\n\ndef handler(event: Dict, context: Dict):\n \"\"\"AWS Lambda handler.\"\"\"\n granule = event.get('granule')\n prefix = granule[0:-6]\n print(prefix)\n response = s3.list_objects_v2(Bucket=bucket, Prefix=prefix)\n print(response)\n granules = []\n contents = response['Contents']\n for obj in contents:\n granules.append(obj['Key'][0:-4])\n granule_str = ','.join(granules)\n output = {'granule': granule_str}\n return output\n", "step-3": "<mask token>\ns3 = boto3.client('s3')\nbucket = os.getenv('SENTINEL_INPUT_BUCKET', None)\nprint(bucket)\nif bucket is None:\n raise Exception('No Input Bucket set')\n\n\ndef handler(event: Dict, context: Dict):\n \"\"\"AWS Lambda handler.\"\"\"\n granule = event.get('granule')\n prefix = granule[0:-6]\n print(prefix)\n response = s3.list_objects_v2(Bucket=bucket, Prefix=prefix)\n print(response)\n granules = []\n contents = response['Contents']\n for obj in contents:\n granules.append(obj['Key'][0:-4])\n granule_str = ','.join(granules)\n output = {'granule': granule_str}\n return output\n", "step-4": "<mask token>\nfrom typing import Dict\nimport os\nimport re\nimport boto3\nfrom botocore.errorfactory import ClientError\nfrom datetime import date\ns3 = boto3.client('s3')\nbucket = os.getenv('SENTINEL_INPUT_BUCKET', None)\nprint(bucket)\nif bucket is None:\n raise Exception('No Input Bucket set')\n\n\ndef handler(event: Dict, context: Dict):\n \"\"\"AWS Lambda handler.\"\"\"\n granule = event.get('granule')\n prefix = granule[0:-6]\n print(prefix)\n response = s3.list_objects_v2(Bucket=bucket, Prefix=prefix)\n print(response)\n granules = []\n contents = response['Contents']\n for obj in contents:\n granules.append(obj['Key'][0:-4])\n granule_str = ','.join(granules)\n output = {'granule': granule_str}\n return output\n", "step-5": "\"\"\"\nHLS: Check if Twin Granule Exists\n\"\"\"\nfrom typing import Dict\nimport os\nimport re\nimport boto3\nfrom botocore.errorfactory import ClientError\nfrom datetime import date\n\ns3 = boto3.client(\"s3\")\nbucket = os.getenv(\"SENTINEL_INPUT_BUCKET\", None)\nprint(bucket)\nif bucket is None:\n raise Exception(\"No Input Bucket set\")\n\n\ndef handler(event: Dict, context: Dict):\n \"\"\"AWS Lambda handler.\"\"\"\n granule = event.get(\"granule\")\n prefix = granule[0:-6]\n print(prefix)\n response = s3.list_objects_v2(Bucket=bucket, Prefix=prefix,)\n print(response)\n granules = []\n contents = response[\"Contents\"]\n for obj in contents:\n granules.append(obj[\"Key\"][0:-4])\n\n granule_str = \",\".join(granules)\n\n output = {\n \"granule\": granule_str,\n }\n return output\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
# drop data to file filter import tarr.compiler_base def format_data(data): return '{0.id}: {0.payload}'.format(data) class WRITE_TO_FILE(tarr.compiler_base.Instruction): @property def __name__(self): return 'POINT OF INTEREST - WRITE("{}")'.format(self.filename) def __init__(self, filename, formatter=format_data): self.format = formatter self.filename = filename def run(self, runner, data): # NOTE: we need to do writing in UNBUFFERED mode (buffering=0) # as potentially there are other processes writing to the same file # *NOW* with open(self.filename, 'ab', buffering=0) as f: f.write(self.format(data) + '\n') return data def clone(self): return self.__class__(filename=self.filename, formatter=self.format)
normal
{ "blob_id": "75393d39b147097a7ac1d82938ac102491ea9441", "index": 8469, "step-1": "<mask token>\n\n\nclass WRITE_TO_FILE(tarr.compiler_base.Instruction):\n\n @property\n def __name__(self):\n return 'POINT OF INTEREST - WRITE(\"{}\")'.format(self.filename)\n\n def __init__(self, filename, formatter=format_data):\n self.format = formatter\n self.filename = filename\n\n def run(self, runner, data):\n with open(self.filename, 'ab', buffering=0) as f:\n f.write(self.format(data) + '\\n')\n return data\n <mask token>\n", "step-2": "<mask token>\n\n\nclass WRITE_TO_FILE(tarr.compiler_base.Instruction):\n\n @property\n def __name__(self):\n return 'POINT OF INTEREST - WRITE(\"{}\")'.format(self.filename)\n\n def __init__(self, filename, formatter=format_data):\n self.format = formatter\n self.filename = filename\n\n def run(self, runner, data):\n with open(self.filename, 'ab', buffering=0) as f:\n f.write(self.format(data) + '\\n')\n return data\n\n def clone(self):\n return self.__class__(filename=self.filename, formatter=self.format)\n", "step-3": "<mask token>\n\n\ndef format_data(data):\n return '{0.id}: {0.payload}'.format(data)\n\n\nclass WRITE_TO_FILE(tarr.compiler_base.Instruction):\n\n @property\n def __name__(self):\n return 'POINT OF INTEREST - WRITE(\"{}\")'.format(self.filename)\n\n def __init__(self, filename, formatter=format_data):\n self.format = formatter\n self.filename = filename\n\n def run(self, runner, data):\n with open(self.filename, 'ab', buffering=0) as f:\n f.write(self.format(data) + '\\n')\n return data\n\n def clone(self):\n return self.__class__(filename=self.filename, formatter=self.format)\n", "step-4": "import tarr.compiler_base\n\n\ndef format_data(data):\n return '{0.id}: {0.payload}'.format(data)\n\n\nclass WRITE_TO_FILE(tarr.compiler_base.Instruction):\n\n @property\n def __name__(self):\n return 'POINT OF INTEREST - WRITE(\"{}\")'.format(self.filename)\n\n def __init__(self, filename, formatter=format_data):\n self.format = formatter\n self.filename = filename\n\n def run(self, runner, data):\n with open(self.filename, 'ab', buffering=0) as f:\n f.write(self.format(data) + '\\n')\n return data\n\n def clone(self):\n return self.__class__(filename=self.filename, formatter=self.format)\n", "step-5": "# drop data to file filter\nimport tarr.compiler_base\n\n\ndef format_data(data):\n return '{0.id}: {0.payload}'.format(data)\n\n\nclass WRITE_TO_FILE(tarr.compiler_base.Instruction):\n\n @property\n def __name__(self):\n return 'POINT OF INTEREST - WRITE(\"{}\")'.format(self.filename)\n\n def __init__(self, filename, formatter=format_data):\n self.format = formatter\n self.filename = filename\n\n def run(self, runner, data):\n # NOTE: we need to do writing in UNBUFFERED mode (buffering=0)\n # as potentially there are other processes writing to the same file\n # *NOW*\n with open(self.filename, 'ab', buffering=0) as f:\n f.write(self.format(data) + '\\n')\n return data\n\n def clone(self):\n return self.__class__(filename=self.filename, formatter=self.format)\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
# This is an auto-generated Django model module. # You'll have to do the following manually to clean this up: # * Rearrange models' order # * Make sure each model has one field with primary_key=True # * Make sure each ForeignKey and OneToOneField has `on_delete` set to the desired behavior # * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table # Feel free to rename the models, but don't rename db_table values or field names. from django.db import models class Agentname(models.Model): name_id = models.IntegerField(primary_key=True) firstname = models.CharField(max_length=255) lastname = models.CharField(max_length=255) email = models.CharField(max_length=255, blank=True, null=True) phone = models.CharField(max_length=13) pollingunit_uniqueid = models.IntegerField() class Meta: managed = False db_table = 'agentname' class AnnouncedLgaResults(models.Model): result_id = models.IntegerField(primary_key=True) lga_name = models.CharField(max_length=50) party_abbreviation = models.CharField(max_length=4) party_score = models.IntegerField() entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'announced_lga_results' class AnnouncedPuResults(models.Model): result_id = models.IntegerField(primary_key=True) polling_unit_uniqueid = models.CharField(max_length=50) party_abbreviation = models.CharField(max_length=4) party_score = models.IntegerField() entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'announced_pu_results' class AnnouncedStateResults(models.Model): result_id = models.IntegerField(primary_key=True) state_name = models.CharField(max_length=50) party_abbreviation = models.CharField(max_length=4) party_score = models.IntegerField() entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'announced_state_results' class AnnouncedWardResults(models.Model): result_id = models.IntegerField(primary_key=True) ward_name = models.CharField(max_length=50) party_abbreviation = models.CharField(max_length=4) party_score = models.IntegerField() entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'announced_ward_results' class Lga(models.Model): uniqueid = models.IntegerField(primary_key=True) lga_id = models.IntegerField() lga_name = models.CharField(max_length=50) state_id = models.IntegerField() lga_description = models.TextField(blank=True, null=True) entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'lga' class Party(models.Model): id = models.IntegerField(primary_key=True) partyid = models.CharField(max_length=11) partyname = models.CharField(max_length=11) class Meta: managed = False db_table = 'party' class PollingUnit(models.Model): uniqueid = models.IntegerField(primary_key=True) polling_unit_id = models.IntegerField() ward_id = models.IntegerField() lga_id = models.IntegerField() uniquewardid = models.IntegerField(blank=True, null=True) polling_unit_number = models.CharField(max_length=50, blank=True, null=True) polling_unit_name = models.CharField(max_length=50, blank=True, null=True) polling_unit_description = models.TextField(blank=True, null=True) lat = models.CharField(max_length=255, blank=True, null=True) lon = models.CharField(max_length=255, blank=True, null=True) entered_by_user = models.CharField(max_length=50, blank=True, null=True) date_entered = models.DateTimeField(blank=True, null=True) user_ip_address = models.CharField(max_length=50, blank=True, null=True) class Meta: managed = False db_table = 'polling_unit' class States(models.Model): state_id = models.IntegerField(primary_key=True) state_name = models.CharField(max_length=50) class Meta: managed = False db_table = 'states' class Ward(models.Model): uniqueid = models.IntegerField(primary_key=True) ward_id = models.IntegerField() ward_name = models.CharField(max_length=50) lga_id = models.IntegerField() ward_description = models.TextField(blank=True, null=True) entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'ward'
normal
{ "blob_id": "5ce5fbfa33c241fc316d5e414df01a39bfc9be18", "index": 7063, "step-1": "<mask token>\n\n\nclass AnnouncedPuResults(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n managed = False\n db_table = 'announced_pu_results'\n\n\nclass AnnouncedStateResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n state_name = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'announced_state_results'\n\n\nclass AnnouncedWardResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n ward_name = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'announced_ward_results'\n\n\nclass Lga(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n lga_id = models.IntegerField()\n lga_name = models.CharField(max_length=50)\n state_id = models.IntegerField()\n lga_description = models.TextField(blank=True, null=True)\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'lga'\n\n\nclass Party(models.Model):\n id = models.IntegerField(primary_key=True)\n partyid = models.CharField(max_length=11)\n partyname = models.CharField(max_length=11)\n\n\n class Meta:\n managed = False\n db_table = 'party'\n\n\nclass PollingUnit(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n polling_unit_id = models.IntegerField()\n ward_id = models.IntegerField()\n lga_id = models.IntegerField()\n uniquewardid = models.IntegerField(blank=True, null=True)\n polling_unit_number = models.CharField(max_length=50, blank=True, null=True\n )\n polling_unit_name = models.CharField(max_length=50, blank=True, null=True)\n polling_unit_description = models.TextField(blank=True, null=True)\n lat = models.CharField(max_length=255, blank=True, null=True)\n lon = models.CharField(max_length=255, blank=True, null=True)\n entered_by_user = models.CharField(max_length=50, blank=True, null=True)\n date_entered = models.DateTimeField(blank=True, null=True)\n user_ip_address = models.CharField(max_length=50, blank=True, null=True)\n\n\n class Meta:\n managed = False\n db_table = 'polling_unit'\n\n\nclass States(models.Model):\n state_id = models.IntegerField(primary_key=True)\n state_name = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'states'\n\n\nclass Ward(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n ward_id = models.IntegerField()\n ward_name = models.CharField(max_length=50)\n lga_id = models.IntegerField()\n ward_description = models.TextField(blank=True, null=True)\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'ward'\n", "step-2": "<mask token>\n\n\nclass AnnouncedLgaResults(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n managed = False\n db_table = 'announced_lga_results'\n\n\nclass AnnouncedPuResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n polling_unit_uniqueid = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'announced_pu_results'\n\n\nclass AnnouncedStateResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n state_name = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'announced_state_results'\n\n\nclass AnnouncedWardResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n ward_name = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'announced_ward_results'\n\n\nclass Lga(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n lga_id = models.IntegerField()\n lga_name = models.CharField(max_length=50)\n state_id = models.IntegerField()\n lga_description = models.TextField(blank=True, null=True)\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'lga'\n\n\nclass Party(models.Model):\n id = models.IntegerField(primary_key=True)\n partyid = models.CharField(max_length=11)\n partyname = models.CharField(max_length=11)\n\n\n class Meta:\n managed = False\n db_table = 'party'\n\n\nclass PollingUnit(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n polling_unit_id = models.IntegerField()\n ward_id = models.IntegerField()\n lga_id = models.IntegerField()\n uniquewardid = models.IntegerField(blank=True, null=True)\n polling_unit_number = models.CharField(max_length=50, blank=True, null=True\n )\n polling_unit_name = models.CharField(max_length=50, blank=True, null=True)\n polling_unit_description = models.TextField(blank=True, null=True)\n lat = models.CharField(max_length=255, blank=True, null=True)\n lon = models.CharField(max_length=255, blank=True, null=True)\n entered_by_user = models.CharField(max_length=50, blank=True, null=True)\n date_entered = models.DateTimeField(blank=True, null=True)\n user_ip_address = models.CharField(max_length=50, blank=True, null=True)\n\n\n class Meta:\n managed = False\n db_table = 'polling_unit'\n\n\nclass States(models.Model):\n state_id = models.IntegerField(primary_key=True)\n state_name = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'states'\n\n\nclass Ward(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n ward_id = models.IntegerField()\n ward_name = models.CharField(max_length=50)\n lga_id = models.IntegerField()\n ward_description = models.TextField(blank=True, null=True)\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'ward'\n", "step-3": "<mask token>\n\n\nclass Agentname(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n managed = False\n db_table = 'agentname'\n\n\nclass AnnouncedLgaResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n lga_name = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'announced_lga_results'\n\n\nclass AnnouncedPuResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n polling_unit_uniqueid = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'announced_pu_results'\n\n\nclass AnnouncedStateResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n state_name = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'announced_state_results'\n\n\nclass AnnouncedWardResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n ward_name = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'announced_ward_results'\n\n\nclass Lga(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n lga_id = models.IntegerField()\n lga_name = models.CharField(max_length=50)\n state_id = models.IntegerField()\n lga_description = models.TextField(blank=True, null=True)\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'lga'\n\n\nclass Party(models.Model):\n id = models.IntegerField(primary_key=True)\n partyid = models.CharField(max_length=11)\n partyname = models.CharField(max_length=11)\n\n\n class Meta:\n managed = False\n db_table = 'party'\n\n\nclass PollingUnit(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n polling_unit_id = models.IntegerField()\n ward_id = models.IntegerField()\n lga_id = models.IntegerField()\n uniquewardid = models.IntegerField(blank=True, null=True)\n polling_unit_number = models.CharField(max_length=50, blank=True, null=True\n )\n polling_unit_name = models.CharField(max_length=50, blank=True, null=True)\n polling_unit_description = models.TextField(blank=True, null=True)\n lat = models.CharField(max_length=255, blank=True, null=True)\n lon = models.CharField(max_length=255, blank=True, null=True)\n entered_by_user = models.CharField(max_length=50, blank=True, null=True)\n date_entered = models.DateTimeField(blank=True, null=True)\n user_ip_address = models.CharField(max_length=50, blank=True, null=True)\n\n\n class Meta:\n managed = False\n db_table = 'polling_unit'\n\n\nclass States(models.Model):\n state_id = models.IntegerField(primary_key=True)\n state_name = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'states'\n\n\nclass Ward(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n ward_id = models.IntegerField()\n ward_name = models.CharField(max_length=50)\n lga_id = models.IntegerField()\n ward_description = models.TextField(blank=True, null=True)\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'ward'\n", "step-4": "from django.db import models\n\n\nclass Agentname(models.Model):\n name_id = models.IntegerField(primary_key=True)\n firstname = models.CharField(max_length=255)\n lastname = models.CharField(max_length=255)\n email = models.CharField(max_length=255, blank=True, null=True)\n phone = models.CharField(max_length=13)\n pollingunit_uniqueid = models.IntegerField()\n\n\n class Meta:\n managed = False\n db_table = 'agentname'\n\n\nclass AnnouncedLgaResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n lga_name = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'announced_lga_results'\n\n\nclass AnnouncedPuResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n polling_unit_uniqueid = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'announced_pu_results'\n\n\nclass AnnouncedStateResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n state_name = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'announced_state_results'\n\n\nclass AnnouncedWardResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n ward_name = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'announced_ward_results'\n\n\nclass Lga(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n lga_id = models.IntegerField()\n lga_name = models.CharField(max_length=50)\n state_id = models.IntegerField()\n lga_description = models.TextField(blank=True, null=True)\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'lga'\n\n\nclass Party(models.Model):\n id = models.IntegerField(primary_key=True)\n partyid = models.CharField(max_length=11)\n partyname = models.CharField(max_length=11)\n\n\n class Meta:\n managed = False\n db_table = 'party'\n\n\nclass PollingUnit(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n polling_unit_id = models.IntegerField()\n ward_id = models.IntegerField()\n lga_id = models.IntegerField()\n uniquewardid = models.IntegerField(blank=True, null=True)\n polling_unit_number = models.CharField(max_length=50, blank=True, null=True\n )\n polling_unit_name = models.CharField(max_length=50, blank=True, null=True)\n polling_unit_description = models.TextField(blank=True, null=True)\n lat = models.CharField(max_length=255, blank=True, null=True)\n lon = models.CharField(max_length=255, blank=True, null=True)\n entered_by_user = models.CharField(max_length=50, blank=True, null=True)\n date_entered = models.DateTimeField(blank=True, null=True)\n user_ip_address = models.CharField(max_length=50, blank=True, null=True)\n\n\n class Meta:\n managed = False\n db_table = 'polling_unit'\n\n\nclass States(models.Model):\n state_id = models.IntegerField(primary_key=True)\n state_name = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'states'\n\n\nclass Ward(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n ward_id = models.IntegerField()\n ward_name = models.CharField(max_length=50)\n lga_id = models.IntegerField()\n ward_description = models.TextField(blank=True, null=True)\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'ward'\n", "step-5": "# This is an auto-generated Django model module.\n# You'll have to do the following manually to clean this up:\n# * Rearrange models' order\n# * Make sure each model has one field with primary_key=True\n# * Make sure each ForeignKey and OneToOneField has `on_delete` set to the desired behavior\n# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table\n# Feel free to rename the models, but don't rename db_table values or field names.\nfrom django.db import models\n\n\nclass Agentname(models.Model):\n name_id = models.IntegerField(primary_key=True)\n firstname = models.CharField(max_length=255)\n lastname = models.CharField(max_length=255)\n email = models.CharField(max_length=255, blank=True, null=True)\n phone = models.CharField(max_length=13)\n pollingunit_uniqueid = models.IntegerField()\n\n class Meta:\n managed = False\n db_table = 'agentname'\n\n\nclass AnnouncedLgaResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n lga_name = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n class Meta:\n managed = False\n db_table = 'announced_lga_results'\n\n\nclass AnnouncedPuResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n polling_unit_uniqueid = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n class Meta:\n managed = False\n db_table = 'announced_pu_results'\n\n\nclass AnnouncedStateResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n state_name = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n class Meta:\n managed = False\n db_table = 'announced_state_results'\n\n\nclass AnnouncedWardResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n ward_name = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n class Meta:\n managed = False\n db_table = 'announced_ward_results'\n\n\nclass Lga(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n lga_id = models.IntegerField()\n lga_name = models.CharField(max_length=50)\n state_id = models.IntegerField()\n lga_description = models.TextField(blank=True, null=True)\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n class Meta:\n managed = False\n db_table = 'lga'\n\n\nclass Party(models.Model):\n id = models.IntegerField(primary_key=True)\n partyid = models.CharField(max_length=11)\n partyname = models.CharField(max_length=11)\n\n class Meta:\n managed = False\n db_table = 'party'\n\n\nclass PollingUnit(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n polling_unit_id = models.IntegerField()\n ward_id = models.IntegerField()\n lga_id = models.IntegerField()\n uniquewardid = models.IntegerField(blank=True, null=True)\n polling_unit_number = models.CharField(max_length=50, blank=True, null=True)\n polling_unit_name = models.CharField(max_length=50, blank=True, null=True)\n polling_unit_description = models.TextField(blank=True, null=True)\n lat = models.CharField(max_length=255, blank=True, null=True)\n lon = models.CharField(max_length=255, blank=True, null=True)\n entered_by_user = models.CharField(max_length=50, blank=True, null=True)\n date_entered = models.DateTimeField(blank=True, null=True)\n user_ip_address = models.CharField(max_length=50, blank=True, null=True)\n\n class Meta:\n managed = False\n db_table = 'polling_unit'\n\n\nclass States(models.Model):\n state_id = models.IntegerField(primary_key=True)\n state_name = models.CharField(max_length=50)\n\n class Meta:\n managed = False\n db_table = 'states'\n\n\nclass Ward(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n ward_id = models.IntegerField()\n ward_name = models.CharField(max_length=50)\n lga_id = models.IntegerField()\n ward_description = models.TextField(blank=True, null=True)\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n class Meta:\n managed = False\n db_table = 'ward'\n", "step-ids": [ 15, 17, 19, 21, 22 ] }
[ 15, 17, 19, 21, 22 ]
''' 给定两个整数,被除数 dividend 和除数 divisor。将两数相除,要求不使用乘法、除法和 mod 运算符。 返回被除数 dividend 除以除数 divisor 得到的商 链接:https://leetcode-cn.com/problems/divide-two-integers ''' # 该题看起来也不难,但是其中坑很多,想要写出健壮的代码并不容易 # 我个人思考可以考虑使用上下界,不断缩小范围来确定 def division(dividend, divisor): temp = 0 for i in range(dividend + 1): temp += abs(divisor) if temp > abs(dividend): if ((dividend ^ divisor) >> divisor.__sizeof__())^1 > 0: return i else : return -i return 2**31 - 1 def division_v2(dividend, divisor): def get_add_num(num, times): sum = 0 for i in range(times): sum += num return sum low = 0 up = dividend while low < up: mid = round((low + up) / 2) if get_add_num(divisor, mid) < dividend: low = mid else: up = mid return mid if __name__ == '__main__': # print(division(2147483647, 1)) print(division_v2(3, 1))
normal
{ "blob_id": "edb80652de641a1a6cbb37a60cc236cd7828a96e", "index": 8151, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef division_v2(dividend, divisor):\n\n def get_add_num(num, times):\n sum = 0\n for i in range(times):\n sum += num\n return sum\n low = 0\n up = dividend\n while low < up:\n mid = round((low + up) / 2)\n if get_add_num(divisor, mid) < dividend:\n low = mid\n else:\n up = mid\n return mid\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef division(dividend, divisor):\n temp = 0\n for i in range(dividend + 1):\n temp += abs(divisor)\n if temp > abs(dividend):\n if (dividend ^ divisor) >> divisor.__sizeof__() ^ 1 > 0:\n return i\n else:\n return -i\n return 2 ** 31 - 1\n\n\ndef division_v2(dividend, divisor):\n\n def get_add_num(num, times):\n sum = 0\n for i in range(times):\n sum += num\n return sum\n low = 0\n up = dividend\n while low < up:\n mid = round((low + up) / 2)\n if get_add_num(divisor, mid) < dividend:\n low = mid\n else:\n up = mid\n return mid\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\ndef division(dividend, divisor):\n temp = 0\n for i in range(dividend + 1):\n temp += abs(divisor)\n if temp > abs(dividend):\n if (dividend ^ divisor) >> divisor.__sizeof__() ^ 1 > 0:\n return i\n else:\n return -i\n return 2 ** 31 - 1\n\n\ndef division_v2(dividend, divisor):\n\n def get_add_num(num, times):\n sum = 0\n for i in range(times):\n sum += num\n return sum\n low = 0\n up = dividend\n while low < up:\n mid = round((low + up) / 2)\n if get_add_num(divisor, mid) < dividend:\n low = mid\n else:\n up = mid\n return mid\n\n\nif __name__ == '__main__':\n print(division_v2(3, 1))\n", "step-5": "\n'''\n给定两个整数,被除数 dividend 和除数 divisor。将两数相除,要求不使用乘法、除法和 mod 运算符。\n\n返回被除数 dividend 除以除数 divisor 得到的商\n\n链接:https://leetcode-cn.com/problems/divide-two-integers\n'''\n\n# 该题看起来也不难,但是其中坑很多,想要写出健壮的代码并不容易\n# 我个人思考可以考虑使用上下界,不断缩小范围来确定\ndef division(dividend, divisor):\n temp = 0\n for i in range(dividend + 1):\n temp += abs(divisor)\n if temp > abs(dividend):\n if ((dividend ^ divisor) >> divisor.__sizeof__())^1 > 0:\n return i\n else :\n return -i\n return 2**31 - 1\n\n\ndef division_v2(dividend, divisor):\n def get_add_num(num, times):\n sum = 0\n for i in range(times):\n sum += num\n return sum\n low = 0\n up = dividend\n while low < up:\n mid = round((low + up) / 2)\n if get_add_num(divisor, mid) < dividend:\n low = mid\n else:\n up = mid\n return mid\n\n\nif __name__ == '__main__':\n # print(division(2147483647, 1))\n print(division_v2(3, 1))\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# The nth term of the sequence of triangle numbers is given by, tn = ½n(n+1); so the first ten triangle numbers are: # 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ... # By converting each letter in a word to a number corresponding to its alphabetical position and adding these values we form a word value. For example, the word value for SKY is 19 + 11 + 25 = 55 = t10. If the word value is a triangle number then we shall call the word a triangle word. # Using words.txt (right click and 'Save Link/Target As...'), a 16K text file containing nearly two-thousand common English words, how many are triangle words? # ANSWER: 162 import math import time # Get Data fin=open("D:\OneDrive\Study\Self learning\Coding\Project Euler\data\Problem 42\p042_words.txt","r") string_content=fin.readline() fin.close() char_dict={"A":1,"B":2,"C":3,"D":4,"E":5,"F":6,"G":7,"H":8,"I":9,"J":10,"K":11,"L":12,"M":13, "N":14,"O":15,"P":16,"Q":17,"R":18,"S":19,"T":20,"U":21,"V":22,"W":23,"X":24,"Y":25,"Z":26} # Split data into element of a list string_list=list() string_list=string_content.replace('"','').split(',') # Check if it is tran def is_triangle_number(n): # Check if root is integer root=(-1+math.sqrt(1+8.0*n))/2 if root.is_integer(): return True return False def calculation(): count=0 # For each word in string list for word in string_list: sum=0 # For each char in each word for char in word: sum += char_dict[char] if is_triangle_number(sum): count +=1 print(count) calculation()
normal
{ "blob_id": "61019a5439a6f0c1aee51db9b048a26fb9b5bf5d", "index": 8257, "step-1": "<mask token>\n\n\ndef is_triangle_number(n):\n root = (-1 + math.sqrt(1 + 8.0 * n)) / 2\n if root.is_integer():\n return True\n return False\n\n\ndef calculation():\n count = 0\n for word in string_list:\n sum = 0\n for char in word:\n sum += char_dict[char]\n if is_triangle_number(sum):\n count += 1\n print(count)\n\n\n<mask token>\n", "step-2": "<mask token>\nfin.close()\n<mask token>\n\n\ndef is_triangle_number(n):\n root = (-1 + math.sqrt(1 + 8.0 * n)) / 2\n if root.is_integer():\n return True\n return False\n\n\ndef calculation():\n count = 0\n for word in string_list:\n sum = 0\n for char in word:\n sum += char_dict[char]\n if is_triangle_number(sum):\n count += 1\n print(count)\n\n\ncalculation()\n", "step-3": "<mask token>\nfin = open(\n 'D:\\\\OneDrive\\\\Study\\\\Self learning\\\\Coding\\\\Project Euler\\\\data\\\\Problem 42\\\\p042_words.txt'\n , 'r')\nstring_content = fin.readline()\nfin.close()\nchar_dict = {'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7, 'H': 8,\n 'I': 9, 'J': 10, 'K': 11, 'L': 12, 'M': 13, 'N': 14, 'O': 15, 'P': 16,\n 'Q': 17, 'R': 18, 'S': 19, 'T': 20, 'U': 21, 'V': 22, 'W': 23, 'X': 24,\n 'Y': 25, 'Z': 26}\nstring_list = list()\nstring_list = string_content.replace('\"', '').split(',')\n\n\ndef is_triangle_number(n):\n root = (-1 + math.sqrt(1 + 8.0 * n)) / 2\n if root.is_integer():\n return True\n return False\n\n\ndef calculation():\n count = 0\n for word in string_list:\n sum = 0\n for char in word:\n sum += char_dict[char]\n if is_triangle_number(sum):\n count += 1\n print(count)\n\n\ncalculation()\n", "step-4": "import math\nimport time\nfin = open(\n 'D:\\\\OneDrive\\\\Study\\\\Self learning\\\\Coding\\\\Project Euler\\\\data\\\\Problem 42\\\\p042_words.txt'\n , 'r')\nstring_content = fin.readline()\nfin.close()\nchar_dict = {'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7, 'H': 8,\n 'I': 9, 'J': 10, 'K': 11, 'L': 12, 'M': 13, 'N': 14, 'O': 15, 'P': 16,\n 'Q': 17, 'R': 18, 'S': 19, 'T': 20, 'U': 21, 'V': 22, 'W': 23, 'X': 24,\n 'Y': 25, 'Z': 26}\nstring_list = list()\nstring_list = string_content.replace('\"', '').split(',')\n\n\ndef is_triangle_number(n):\n root = (-1 + math.sqrt(1 + 8.0 * n)) / 2\n if root.is_integer():\n return True\n return False\n\n\ndef calculation():\n count = 0\n for word in string_list:\n sum = 0\n for char in word:\n sum += char_dict[char]\n if is_triangle_number(sum):\n count += 1\n print(count)\n\n\ncalculation()\n", "step-5": "# The nth term of the sequence of triangle numbers is given by, tn = ½n(n+1); so the first ten triangle numbers are:\n# 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...\n# By converting each letter in a word to a number corresponding to its alphabetical position and adding these values we form a word value. For example, the word value for SKY is 19 + 11 + 25 = 55 = t10. If the word value is a triangle number then we shall call the word a triangle word.\n# Using words.txt (right click and 'Save Link/Target As...'), a 16K text file containing nearly two-thousand common English words, how many are triangle words?\n# ANSWER: 162\n\nimport math\nimport time\n# Get Data\nfin=open(\"D:\\OneDrive\\Study\\Self learning\\Coding\\Project Euler\\data\\Problem 42\\p042_words.txt\",\"r\")\nstring_content=fin.readline()\nfin.close()\n\nchar_dict={\"A\":1,\"B\":2,\"C\":3,\"D\":4,\"E\":5,\"F\":6,\"G\":7,\"H\":8,\"I\":9,\"J\":10,\"K\":11,\"L\":12,\"M\":13, \"N\":14,\"O\":15,\"P\":16,\"Q\":17,\"R\":18,\"S\":19,\"T\":20,\"U\":21,\"V\":22,\"W\":23,\"X\":24,\"Y\":25,\"Z\":26}\n\n# Split data into element of a list\nstring_list=list()\nstring_list=string_content.replace('\"','').split(',')\n\n# Check if it is tran\ndef is_triangle_number(n):\n # Check if root is integer\n root=(-1+math.sqrt(1+8.0*n))/2\n if root.is_integer():\n return True\n return False \n\ndef calculation():\n count=0\n # For each word in string list\n for word in string_list:\n sum=0\n # For each char in each word\n for char in word:\n sum += char_dict[char]\n if is_triangle_number(sum):\n count +=1\n print(count)\ncalculation()\n\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
def check_integer(a): if type(a) != int: print("please input an integer") exit() def is_even(a): check_integer(a) if a % 2 == 0: print("true") return True else: print("false") return False is_even(2) is_even(3) is_even("cat")
normal
{ "blob_id": "92391f17380b2e09cc9b3913f15ce35189d9893d", "index": 8241, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef is_even(a):\n check_integer(a)\n if a % 2 == 0:\n print('true')\n return True\n else:\n print('false')\n return False\n\n\n<mask token>\n", "step-3": "def check_integer(a):\n if type(a) != int:\n print('please input an integer')\n exit()\n\n\ndef is_even(a):\n check_integer(a)\n if a % 2 == 0:\n print('true')\n return True\n else:\n print('false')\n return False\n\n\n<mask token>\n", "step-4": "def check_integer(a):\n if type(a) != int:\n print('please input an integer')\n exit()\n\n\ndef is_even(a):\n check_integer(a)\n if a % 2 == 0:\n print('true')\n return True\n else:\n print('false')\n return False\n\n\nis_even(2)\nis_even(3)\nis_even('cat')\n", "step-5": "\n\ndef check_integer(a):\n if type(a) != int:\n print(\"please input an integer\")\n exit()\n\n\ndef is_even(a):\n check_integer(a)\n if a % 2 == 0:\n print(\"true\")\n return True\n else:\n print(\"false\")\n return False\n\n\nis_even(2)\nis_even(3)\nis_even(\"cat\")\n\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# -*- coding: utf-8 -*- import scrapy import re class LeedsAcUkSpider(scrapy.Spider): name = 'leeds_ac_uk' allowed_domains = ['webprod3.leeds.ac.uk'] start_urls = ['http://webprod3.leeds.ac.uk/catalogue/dynmodules.asp?Y=201920&M=ANAT-3105'] def parse(self, response): item = {} item['Subject'] = response.css('div#module-programmes h2::text').get().split()[-1] item['Subject short'] = response.css('div#module-programmes h2::text').get().split()[0].split('3')[0] item['Subject code1'] = response.css('div#module-programmes h2::text').get().split()[0] item['Topic'] = response.css('div#module-programmes h2::text').get().split('\n')[-1] Syllabus = response.css('div#module-programmes')
normal
{ "blob_id": "fb4a95197882cc6fe72a5f3c2420a474d9cd97aa", "index": 7751, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass LeedsAcUkSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n\n def parse(self, response):\n item = {}\n item['Subject'] = response.css('div#module-programmes h2::text').get(\n ).split()[-1]\n item['Subject short'] = response.css('div#module-programmes h2::text'\n ).get().split()[0].split('3')[0]\n item['Subject code1'] = response.css('div#module-programmes h2::text'\n ).get().split()[0]\n item['Topic'] = response.css('div#module-programmes h2::text').get(\n ).split('\\n')[-1]\n Syllabus = response.css('div#module-programmes')\n", "step-3": "<mask token>\n\n\nclass LeedsAcUkSpider(scrapy.Spider):\n name = 'leeds_ac_uk'\n allowed_domains = ['webprod3.leeds.ac.uk']\n start_urls = [\n 'http://webprod3.leeds.ac.uk/catalogue/dynmodules.asp?Y=201920&M=ANAT-3105'\n ]\n\n def parse(self, response):\n item = {}\n item['Subject'] = response.css('div#module-programmes h2::text').get(\n ).split()[-1]\n item['Subject short'] = response.css('div#module-programmes h2::text'\n ).get().split()[0].split('3')[0]\n item['Subject code1'] = response.css('div#module-programmes h2::text'\n ).get().split()[0]\n item['Topic'] = response.css('div#module-programmes h2::text').get(\n ).split('\\n')[-1]\n Syllabus = response.css('div#module-programmes')\n", "step-4": "import scrapy\nimport re\n\n\nclass LeedsAcUkSpider(scrapy.Spider):\n name = 'leeds_ac_uk'\n allowed_domains = ['webprod3.leeds.ac.uk']\n start_urls = [\n 'http://webprod3.leeds.ac.uk/catalogue/dynmodules.asp?Y=201920&M=ANAT-3105'\n ]\n\n def parse(self, response):\n item = {}\n item['Subject'] = response.css('div#module-programmes h2::text').get(\n ).split()[-1]\n item['Subject short'] = response.css('div#module-programmes h2::text'\n ).get().split()[0].split('3')[0]\n item['Subject code1'] = response.css('div#module-programmes h2::text'\n ).get().split()[0]\n item['Topic'] = response.css('div#module-programmes h2::text').get(\n ).split('\\n')[-1]\n Syllabus = response.css('div#module-programmes')\n", "step-5": "# -*- coding: utf-8 -*-\nimport scrapy\nimport re\n\nclass LeedsAcUkSpider(scrapy.Spider):\n name = 'leeds_ac_uk'\n allowed_domains = ['webprod3.leeds.ac.uk']\n start_urls = ['http://webprod3.leeds.ac.uk/catalogue/dynmodules.asp?Y=201920&M=ANAT-3105']\n\n def parse(self, response):\n item = {}\n item['Subject'] = response.css('div#module-programmes h2::text').get().split()[-1]\n item['Subject short'] = response.css('div#module-programmes h2::text').get().split()[0].split('3')[0]\n item['Subject code1'] = response.css('div#module-programmes h2::text').get().split()[0]\n item['Topic'] = response.css('div#module-programmes h2::text').get().split('\\n')[-1]\n Syllabus = response.css('div#module-programmes')", "step-ids": [ 0, 2, 3, 4, 5 ] }
[ 0, 2, 3, 4, 5 ]
from django.apps import AppConfig from django.utils.translation import gettext_lazy as _ class SearchConfig(AppConfig): name = 'search' verbose_name = _("Search")
normal
{ "blob_id": "f47e4d6ff079b6ac2320467d87b34ae82face032", "index": 4506, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass SearchConfig(AppConfig):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass SearchConfig(AppConfig):\n name = 'search'\n verbose_name = _('Search')\n", "step-4": "from django.apps import AppConfig\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass SearchConfig(AppConfig):\n name = 'search'\n verbose_name = _('Search')\n", "step-5": "from django.apps import AppConfig\r\nfrom django.utils.translation import gettext_lazy as _\r\n\r\nclass SearchConfig(AppConfig):\r\n name = 'search'\r\n verbose_name = _(\"Search\")\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import tensorflow as tf from tensorflow.keras import Model from tensorflow.keras.layers import Dense, Flatten, Conv2D, BatchNormalization, LeakyReLU, Reshape, Conv2DTranspose import tensorflow_hub as hub from collections import Counter import numpy as np import sys sys.path.append('../data') from imageio import imwrite import os import argparse from preprocessing import * # this time, katherine is here T_TTTT # Killing optional CPU driver warnings os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' gpu_available = tf.test.is_gpu_available() print("GPU Available: ", gpu_available) performance_dict = {} parser = argparse.ArgumentParser(description='DCGAN') parser.add_argument('--img-dir', type=str, default='./data/celebA', help='Data where training images live') parser.add_argument('--out-dir', type=str, default='./output', help='Data where sampled output images will be written') parser.add_argument('--mode', type=str, default='train', help='Can be "train" or "test"') parser.add_argument('--restore-checkpoint', action='store_true', help='Use this flag if you want to resuming training from a previously-saved checkpoint') parser.add_argument('--z-dim', type=int, default=100, help='Dimensionality of the latent space') parser.add_argument('--batch-size', type=int, default=128, help='Sizes of image batches fed through the network') parser.add_argument('--num-data-threads', type=int, default=2, help='Number of threads to use when loading & pre-processing training images') parser.add_argument('--num-epochs', type=int, default=10, help='Number of passes through the training data to make before stopping') parser.add_argument('--learn-rate', type=float, default=0.0002, help='Learning rate for Adam optimizer') parser.add_argument('--beta1', type=float, default=0.5, help='"beta1" parameter for Adam optimizer') parser.add_argument('--num-gen-updates', type=int, default=2, help='Number of generator updates per discriminator update') parser.add_argument('--log-every', type=int, default=7, help='Print losses after every [this many] training iterations') parser.add_argument('--save-every', type=int, default=500, help='Save the state of the network after every [this many] training iterations') parser.add_argument('--device', type=str, default='GPU:0' if gpu_available else 'CPU:0', help='specific the device of computation eg. CPU:0, GPU:0, GPU:1, GPU:2, ... ') args = parser.parse_args() class DeepFont(tf.keras.Model): def __init__(self): super(DeepFont, self).__init__() self.batch_size = 128 self.model = tf.keras.Sequential() self.model.add(tf.keras.layers.Reshape((96, 96, 1))) self.model.add(tf.keras.layers.Conv2D(trainable=False, filters=64, strides=(2,2), kernel_size=(3,3), padding='same', name='conv_layer1', input_shape=(96, 96,1))) self.model.add(tf.keras.layers.BatchNormalization()) self.model.add(tf.keras.layers.MaxPooling2D(pool_size=(2,2), strides=None, padding='same')) self.model.add(tf.keras.layers.Conv2D(trainable=False, filters=128, strides=(1,1), kernel_size=(3,3), padding='same', name='conv_layer2')) self.model.add(tf.keras.layers.BatchNormalization()) self.model.add(tf.keras.layers.MaxPooling2D(pool_size=(2,2), strides=None, padding='same')) self.model.add(tf.keras.layers.Conv2D(256, kernel_size=(3,3), strides=(1,1), padding='same')) self.model.add(tf.keras.layers.Conv2D(256, kernel_size=(3,3), strides=(1,1), padding='same')) self.model.add(tf.keras.layers.Conv2D(256, kernel_size=(3,3), strides=(1,1), padding='same')) self.model.add(tf.keras.layers.Flatten()) self.model.add(tf.keras.layers.Dense(512, activation='relu')) self.model.add(tf.keras.layers.Dense(512, activation='relu')) self.model.add(tf.keras.layers.Dense(150, activation='softmax')) self.optimizer = tf.keras.optimizers.Adam(learning_rate = 0.01) def call(self, inputs): """ input: batch of preprocessed 96x96 images output: probabilities for each batch image and its classification distribution Runs the model on a batch of inputs. """ return self.model(inputs) def loss_function(self, probs, labels): """ input: probs - probabilities generated by the model labels - true labels for every imag output: return loss of the batch being processed Uses sparse categorical crossentropy loss. """ loss = tf.keras.losses.sparse_categorical_crossentropy(labels, probs) return tf.reduce_mean(loss) def total_accuracy(self, probs, labels): """ input: probs - batch of probs (batch size x 150) labels - batch of true labels for images(batch size x 150) output: the accuracy of the model (+1 if correct label) over a batch """ acc = 0 top_five = np.argsort(probs, axis = 1) # 256 x 150 top_five = np.array(top_five).reshape((self.batch_size, 150)) top_five = top_five[:, -1:] # 5 x 150 for i in range (len(labels)): if labels[i] not in performance_dict: performance_dict[labels[i]] = 0 if labels[i] in top_five[i]: acc += 1 performance_dict[labels[i]] += 1 else: performance_dict[labels[i]] -= 1 return (acc / float(self.batch_size)) def get_top_five(self, predictions): """ input: predictions - prbs generated by the model output: array of top 5 font families that the model thinks the image belongs to Runs the model on a batch of inputs. """ predictions = np.sum(predictions, axis = 0) # sums the columns of the logits shape is (150,) top_five = np.argsort(predictions, axis = 0) top_five = np.array(top_five) top_five = top_five[-5:] with open('150_fonts_backwards.json') as json_file: font_subset = json.load(json_file) top_five_fonts = [] for num in top_five: top_five_fonts.append(font_subset[str(num)]) return top_five_fonts def train(model, train_inputs, train_labels): """ input: train_inputs - batch of training images train_labels - batch of training labels output: none Trains the model for a certain number of batches. """ average_loss = 0 num_batches = len(train_inputs)//model.batch_size for i in range(num_batches): with tf.GradientTape() as tape: temp_inputs = train_inputs[i*model.batch_size:(i+1)*model.batch_size] temp_train_labels = train_labels[i*model.batch_size:(i+1)*model.batch_size] predictions = model.call(temp_inputs) loss = model.loss_function(predictions, temp_train_labels) average_loss += loss if i % 1000 == 0: print("---Batch", i, " Loss: ", loss) gradients = tape.gradient(loss, model.trainable_variables) model.optimizer.apply_gradients(zip(gradients, model.trainable_variables)) print("****AVERAGE LOSS: ", average_loss / float(num_batches)) def test(model, test_inputs, test_labels): """ input: test_inputs - batch of testing images test_labels - batch of testing labels output: accuracy across the entire set of batches Tests the training inputs against the model's prediction of what font class it thinks each training image belongs to. """ num_batches = len(test_inputs) // (model.batch_size) acc = 0 for i in range(num_batches): batch_inputs = test_inputs[i * model.batch_size: (i+1) * model.batch_size] batch_labels = test_labels[i * model.batch_size: (i+1) * model.batch_size] batch_inputs = np.array(batch_inputs) batch_labels = np.array(batch_labels) predictions = model.call(batch_inputs) # prediction for a single image batch_accuracy = model.total_accuracy(predictions, batch_labels) if i % 100 == 0: print("batch accuracy", batch_accuracy) acc += batch_accuracy average_accuracy = acc / float(num_batches) return average_accuracy def test_single_img(model, image_path): """ input: image_path - the image path of whatever image file you would like to test output: none Prints the top 5 fonts the model predicts for a particular image. """ crops = [] image = alter_image(image_path) image = resize_image(image, 96) cropped_images = generate_crop(image, 96, 10) for c in cropped_images: crops.append(c) predictions = model.call(crops) # prediction for a single image print(predictions.shape) top_5 = model.get_top_five(predictions) print(top_5) ## -------------------------------------------------------------------------------------- def main(): model = DeepFont() model.load_weights('weights_leaky_relu.h5', by_name=True) # For saving/loading models checkpoint_dir = './checkpoints_df' checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") checkpoint = tf.train.Checkpoint(model = model) manager = tf.train.CheckpointManager(checkpoint, checkpoint_dir, max_to_keep=3) # Ensure the output directory exists if not os.path.exists(args.out_dir): os.makedirs(args.out_dir) if args.restore_checkpoint or args.mode == 'test' or args.mode == 'single_img': # restores the lates checkpoint using from the manager print("Running test mode...") checkpoint.restore(manager.latest_checkpoint) try: # Specify an invalid GPU device with tf.device('/device:' + args.device): if args.mode == 'train': train_inputs, train_labels = get_train_df('./shuffled_train_inputs.hdf5', './shuffled_train_labels.hdf5') for epoch in range(0, args.num_epochs): print('========================== EPOCH %d ==========================' % epoch) train(model, train_inputs, train_labels) # Save at the end of the epoch, too print("**** SAVING CHECKPOINT AT END OF EPOCH ****") manager.save() if args.mode == 'test': test_inputs, test_labels = get_test_df("./combined_test_inputs.hdf5", "./combined_test_labels.hdf5") print("--test accuracy--", test(model, test_inputs, test_labels)) if args.mode == "single_img": test_single_img(model, './0.png') except RuntimeError as e: print(e) if __name__ == '__main__': main()
normal
{ "blob_id": "919239391c6f74d0d8627d3b851beb374eb11d25", "index": 4785, "step-1": "<mask token>\n\n\nclass DeepFont(tf.keras.Model):\n\n def __init__(self):\n super(DeepFont, self).__init__()\n self.batch_size = 128\n self.model = tf.keras.Sequential()\n self.model.add(tf.keras.layers.Reshape((96, 96, 1)))\n self.model.add(tf.keras.layers.Conv2D(trainable=False, filters=64,\n strides=(2, 2), kernel_size=(3, 3), padding='same', name=\n 'conv_layer1', input_shape=(96, 96, 1)))\n self.model.add(tf.keras.layers.BatchNormalization())\n self.model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2),\n strides=None, padding='same'))\n self.model.add(tf.keras.layers.Conv2D(trainable=False, filters=128,\n strides=(1, 1), kernel_size=(3, 3), padding='same', name=\n 'conv_layer2'))\n self.model.add(tf.keras.layers.BatchNormalization())\n self.model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2),\n strides=None, padding='same'))\n self.model.add(tf.keras.layers.Conv2D(256, kernel_size=(3, 3),\n strides=(1, 1), padding='same'))\n self.model.add(tf.keras.layers.Conv2D(256, kernel_size=(3, 3),\n strides=(1, 1), padding='same'))\n self.model.add(tf.keras.layers.Conv2D(256, kernel_size=(3, 3),\n strides=(1, 1), padding='same'))\n self.model.add(tf.keras.layers.Flatten())\n self.model.add(tf.keras.layers.Dense(512, activation='relu'))\n self.model.add(tf.keras.layers.Dense(512, activation='relu'))\n self.model.add(tf.keras.layers.Dense(150, activation='softmax'))\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)\n\n def call(self, inputs):\n \"\"\" input: batch of preprocessed 96x96 images\n\t\t\toutput: probabilities for each batch image and its classification distribution\n\n\t\t\tRuns the model on a batch of inputs.\n\t\t\"\"\"\n return self.model(inputs)\n\n def loss_function(self, probs, labels):\n \"\"\" input: probs - probabilities generated by the model\n\t\t\t\t labels - true labels for every imag\n\t\t\toutput: return loss of the batch being processed\n\n\t\t\tUses sparse categorical crossentropy loss.\n\t\t\"\"\"\n loss = tf.keras.losses.sparse_categorical_crossentropy(labels, probs)\n return tf.reduce_mean(loss)\n\n def total_accuracy(self, probs, labels):\n \"\"\" input: probs - batch of probs (batch size x 150)\n\t\t\t\t\t labels - batch of true labels for images(batch size x 150)\n\t\t\toutput: the accuracy of the model (+1 if correct label) over a batch\n\t\t\"\"\"\n acc = 0\n top_five = np.argsort(probs, axis=1)\n top_five = np.array(top_five).reshape((self.batch_size, 150))\n top_five = top_five[:, -1:]\n for i in range(len(labels)):\n if labels[i] not in performance_dict:\n performance_dict[labels[i]] = 0\n if labels[i] in top_five[i]:\n acc += 1\n performance_dict[labels[i]] += 1\n else:\n performance_dict[labels[i]] -= 1\n return acc / float(self.batch_size)\n\n def get_top_five(self, predictions):\n \"\"\" input: predictions - prbs generated by the model\n\t\t\toutput: array of top 5 font families that the model thinks the image belongs to\n\n\t\t\tRuns the model on a batch of inputs.\n\t\t\"\"\"\n predictions = np.sum(predictions, axis=0)\n top_five = np.argsort(predictions, axis=0)\n top_five = np.array(top_five)\n top_five = top_five[-5:]\n with open('150_fonts_backwards.json') as json_file:\n font_subset = json.load(json_file)\n top_five_fonts = []\n for num in top_five:\n top_five_fonts.append(font_subset[str(num)])\n return top_five_fonts\n\n\ndef train(model, train_inputs, train_labels):\n \"\"\" input: train_inputs - batch of training images\n\t\t\t train_labels - batch of training labels\n\t\toutput: none\n\n\t\tTrains the model for a certain number of batches.\n\t\"\"\"\n average_loss = 0\n num_batches = len(train_inputs) // model.batch_size\n for i in range(num_batches):\n with tf.GradientTape() as tape:\n temp_inputs = train_inputs[i * model.batch_size:(i + 1) * model\n .batch_size]\n temp_train_labels = train_labels[i * model.batch_size:(i + 1) *\n model.batch_size]\n predictions = model.call(temp_inputs)\n loss = model.loss_function(predictions, temp_train_labels)\n average_loss += loss\n if i % 1000 == 0:\n print('---Batch', i, ' Loss: ', loss)\n gradients = tape.gradient(loss, model.trainable_variables)\n model.optimizer.apply_gradients(zip(gradients, model.\n trainable_variables))\n print('****AVERAGE LOSS: ', average_loss / float(num_batches))\n\n\ndef test(model, test_inputs, test_labels):\n \"\"\" input: test_inputs - batch of testing images\n\t\t\t test_labels - batch of testing labels\n\t\toutput: accuracy across the entire set of batches\n\n\t\tTests the training inputs against the model's prediction of what font class it thinks each training image\n\t\tbelongs to.\n\t\"\"\"\n num_batches = len(test_inputs) // model.batch_size\n acc = 0\n for i in range(num_batches):\n batch_inputs = test_inputs[i * model.batch_size:(i + 1) * model.\n batch_size]\n batch_labels = test_labels[i * model.batch_size:(i + 1) * model.\n batch_size]\n batch_inputs = np.array(batch_inputs)\n batch_labels = np.array(batch_labels)\n predictions = model.call(batch_inputs)\n batch_accuracy = model.total_accuracy(predictions, batch_labels)\n if i % 100 == 0:\n print('batch accuracy', batch_accuracy)\n acc += batch_accuracy\n average_accuracy = acc / float(num_batches)\n return average_accuracy\n\n\ndef test_single_img(model, image_path):\n \"\"\" input: image_path - the image path of whatever image file you would like to test\n\t\toutput: none\n\n\t\tPrints the top 5 fonts the model predicts for a particular image.\n\t\"\"\"\n crops = []\n image = alter_image(image_path)\n image = resize_image(image, 96)\n cropped_images = generate_crop(image, 96, 10)\n for c in cropped_images:\n crops.append(c)\n predictions = model.call(crops)\n print(predictions.shape)\n top_5 = model.get_top_five(predictions)\n print(top_5)\n\n\ndef main():\n model = DeepFont()\n model.load_weights('weights_leaky_relu.h5', by_name=True)\n checkpoint_dir = './checkpoints_df'\n checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt')\n checkpoint = tf.train.Checkpoint(model=model)\n manager = tf.train.CheckpointManager(checkpoint, checkpoint_dir,\n max_to_keep=3)\n if not os.path.exists(args.out_dir):\n os.makedirs(args.out_dir)\n if (args.restore_checkpoint or args.mode == 'test' or args.mode ==\n 'single_img'):\n print('Running test mode...')\n checkpoint.restore(manager.latest_checkpoint)\n try:\n with tf.device('/device:' + args.device):\n if args.mode == 'train':\n train_inputs, train_labels = get_train_df(\n './shuffled_train_inputs.hdf5',\n './shuffled_train_labels.hdf5')\n for epoch in range(0, args.num_epochs):\n print(\n '========================== EPOCH %d =========================='\n % epoch)\n train(model, train_inputs, train_labels)\n print('**** SAVING CHECKPOINT AT END OF EPOCH ****')\n manager.save()\n if args.mode == 'test':\n test_inputs, test_labels = get_test_df(\n './combined_test_inputs.hdf5',\n './combined_test_labels.hdf5')\n print('--test accuracy--', test(model, test_inputs,\n test_labels))\n if args.mode == 'single_img':\n test_single_img(model, './0.png')\n except RuntimeError as e:\n print(e)\n\n\n<mask token>\n", "step-2": "<mask token>\nsys.path.append('../data')\n<mask token>\nprint('GPU Available: ', gpu_available)\n<mask token>\nparser.add_argument('--img-dir', type=str, default='./data/celebA', help=\n 'Data where training images live')\nparser.add_argument('--out-dir', type=str, default='./output', help=\n 'Data where sampled output images will be written')\nparser.add_argument('--mode', type=str, default='train', help=\n 'Can be \"train\" or \"test\"')\nparser.add_argument('--restore-checkpoint', action='store_true', help=\n 'Use this flag if you want to resuming training from a previously-saved checkpoint'\n )\nparser.add_argument('--z-dim', type=int, default=100, help=\n 'Dimensionality of the latent space')\nparser.add_argument('--batch-size', type=int, default=128, help=\n 'Sizes of image batches fed through the network')\nparser.add_argument('--num-data-threads', type=int, default=2, help=\n 'Number of threads to use when loading & pre-processing training images')\nparser.add_argument('--num-epochs', type=int, default=10, help=\n 'Number of passes through the training data to make before stopping')\nparser.add_argument('--learn-rate', type=float, default=0.0002, help=\n 'Learning rate for Adam optimizer')\nparser.add_argument('--beta1', type=float, default=0.5, help=\n '\"beta1\" parameter for Adam optimizer')\nparser.add_argument('--num-gen-updates', type=int, default=2, help=\n 'Number of generator updates per discriminator update')\nparser.add_argument('--log-every', type=int, default=7, help=\n 'Print losses after every [this many] training iterations')\nparser.add_argument('--save-every', type=int, default=500, help=\n 'Save the state of the network after every [this many] training iterations'\n )\nparser.add_argument('--device', type=str, default='GPU:0' if gpu_available else\n 'CPU:0', help=\n 'specific the device of computation eg. CPU:0, GPU:0, GPU:1, GPU:2, ... ')\n<mask token>\n\n\nclass DeepFont(tf.keras.Model):\n\n def __init__(self):\n super(DeepFont, self).__init__()\n self.batch_size = 128\n self.model = tf.keras.Sequential()\n self.model.add(tf.keras.layers.Reshape((96, 96, 1)))\n self.model.add(tf.keras.layers.Conv2D(trainable=False, filters=64,\n strides=(2, 2), kernel_size=(3, 3), padding='same', name=\n 'conv_layer1', input_shape=(96, 96, 1)))\n self.model.add(tf.keras.layers.BatchNormalization())\n self.model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2),\n strides=None, padding='same'))\n self.model.add(tf.keras.layers.Conv2D(trainable=False, filters=128,\n strides=(1, 1), kernel_size=(3, 3), padding='same', name=\n 'conv_layer2'))\n self.model.add(tf.keras.layers.BatchNormalization())\n self.model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2),\n strides=None, padding='same'))\n self.model.add(tf.keras.layers.Conv2D(256, kernel_size=(3, 3),\n strides=(1, 1), padding='same'))\n self.model.add(tf.keras.layers.Conv2D(256, kernel_size=(3, 3),\n strides=(1, 1), padding='same'))\n self.model.add(tf.keras.layers.Conv2D(256, kernel_size=(3, 3),\n strides=(1, 1), padding='same'))\n self.model.add(tf.keras.layers.Flatten())\n self.model.add(tf.keras.layers.Dense(512, activation='relu'))\n self.model.add(tf.keras.layers.Dense(512, activation='relu'))\n self.model.add(tf.keras.layers.Dense(150, activation='softmax'))\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)\n\n def call(self, inputs):\n \"\"\" input: batch of preprocessed 96x96 images\n\t\t\toutput: probabilities for each batch image and its classification distribution\n\n\t\t\tRuns the model on a batch of inputs.\n\t\t\"\"\"\n return self.model(inputs)\n\n def loss_function(self, probs, labels):\n \"\"\" input: probs - probabilities generated by the model\n\t\t\t\t labels - true labels for every imag\n\t\t\toutput: return loss of the batch being processed\n\n\t\t\tUses sparse categorical crossentropy loss.\n\t\t\"\"\"\n loss = tf.keras.losses.sparse_categorical_crossentropy(labels, probs)\n return tf.reduce_mean(loss)\n\n def total_accuracy(self, probs, labels):\n \"\"\" input: probs - batch of probs (batch size x 150)\n\t\t\t\t\t labels - batch of true labels for images(batch size x 150)\n\t\t\toutput: the accuracy of the model (+1 if correct label) over a batch\n\t\t\"\"\"\n acc = 0\n top_five = np.argsort(probs, axis=1)\n top_five = np.array(top_five).reshape((self.batch_size, 150))\n top_five = top_five[:, -1:]\n for i in range(len(labels)):\n if labels[i] not in performance_dict:\n performance_dict[labels[i]] = 0\n if labels[i] in top_five[i]:\n acc += 1\n performance_dict[labels[i]] += 1\n else:\n performance_dict[labels[i]] -= 1\n return acc / float(self.batch_size)\n\n def get_top_five(self, predictions):\n \"\"\" input: predictions - prbs generated by the model\n\t\t\toutput: array of top 5 font families that the model thinks the image belongs to\n\n\t\t\tRuns the model on a batch of inputs.\n\t\t\"\"\"\n predictions = np.sum(predictions, axis=0)\n top_five = np.argsort(predictions, axis=0)\n top_five = np.array(top_five)\n top_five = top_five[-5:]\n with open('150_fonts_backwards.json') as json_file:\n font_subset = json.load(json_file)\n top_five_fonts = []\n for num in top_five:\n top_five_fonts.append(font_subset[str(num)])\n return top_five_fonts\n\n\ndef train(model, train_inputs, train_labels):\n \"\"\" input: train_inputs - batch of training images\n\t\t\t train_labels - batch of training labels\n\t\toutput: none\n\n\t\tTrains the model for a certain number of batches.\n\t\"\"\"\n average_loss = 0\n num_batches = len(train_inputs) // model.batch_size\n for i in range(num_batches):\n with tf.GradientTape() as tape:\n temp_inputs = train_inputs[i * model.batch_size:(i + 1) * model\n .batch_size]\n temp_train_labels = train_labels[i * model.batch_size:(i + 1) *\n model.batch_size]\n predictions = model.call(temp_inputs)\n loss = model.loss_function(predictions, temp_train_labels)\n average_loss += loss\n if i % 1000 == 0:\n print('---Batch', i, ' Loss: ', loss)\n gradients = tape.gradient(loss, model.trainable_variables)\n model.optimizer.apply_gradients(zip(gradients, model.\n trainable_variables))\n print('****AVERAGE LOSS: ', average_loss / float(num_batches))\n\n\ndef test(model, test_inputs, test_labels):\n \"\"\" input: test_inputs - batch of testing images\n\t\t\t test_labels - batch of testing labels\n\t\toutput: accuracy across the entire set of batches\n\n\t\tTests the training inputs against the model's prediction of what font class it thinks each training image\n\t\tbelongs to.\n\t\"\"\"\n num_batches = len(test_inputs) // model.batch_size\n acc = 0\n for i in range(num_batches):\n batch_inputs = test_inputs[i * model.batch_size:(i + 1) * model.\n batch_size]\n batch_labels = test_labels[i * model.batch_size:(i + 1) * model.\n batch_size]\n batch_inputs = np.array(batch_inputs)\n batch_labels = np.array(batch_labels)\n predictions = model.call(batch_inputs)\n batch_accuracy = model.total_accuracy(predictions, batch_labels)\n if i % 100 == 0:\n print('batch accuracy', batch_accuracy)\n acc += batch_accuracy\n average_accuracy = acc / float(num_batches)\n return average_accuracy\n\n\ndef test_single_img(model, image_path):\n \"\"\" input: image_path - the image path of whatever image file you would like to test\n\t\toutput: none\n\n\t\tPrints the top 5 fonts the model predicts for a particular image.\n\t\"\"\"\n crops = []\n image = alter_image(image_path)\n image = resize_image(image, 96)\n cropped_images = generate_crop(image, 96, 10)\n for c in cropped_images:\n crops.append(c)\n predictions = model.call(crops)\n print(predictions.shape)\n top_5 = model.get_top_five(predictions)\n print(top_5)\n\n\ndef main():\n model = DeepFont()\n model.load_weights('weights_leaky_relu.h5', by_name=True)\n checkpoint_dir = './checkpoints_df'\n checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt')\n checkpoint = tf.train.Checkpoint(model=model)\n manager = tf.train.CheckpointManager(checkpoint, checkpoint_dir,\n max_to_keep=3)\n if not os.path.exists(args.out_dir):\n os.makedirs(args.out_dir)\n if (args.restore_checkpoint or args.mode == 'test' or args.mode ==\n 'single_img'):\n print('Running test mode...')\n checkpoint.restore(manager.latest_checkpoint)\n try:\n with tf.device('/device:' + args.device):\n if args.mode == 'train':\n train_inputs, train_labels = get_train_df(\n './shuffled_train_inputs.hdf5',\n './shuffled_train_labels.hdf5')\n for epoch in range(0, args.num_epochs):\n print(\n '========================== EPOCH %d =========================='\n % epoch)\n train(model, train_inputs, train_labels)\n print('**** SAVING CHECKPOINT AT END OF EPOCH ****')\n manager.save()\n if args.mode == 'test':\n test_inputs, test_labels = get_test_df(\n './combined_test_inputs.hdf5',\n './combined_test_labels.hdf5')\n print('--test accuracy--', test(model, test_inputs,\n test_labels))\n if args.mode == 'single_img':\n test_single_img(model, './0.png')\n except RuntimeError as e:\n print(e)\n\n\nif __name__ == '__main__':\n main()\n", "step-3": "<mask token>\nsys.path.append('../data')\n<mask token>\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\ngpu_available = tf.test.is_gpu_available()\nprint('GPU Available: ', gpu_available)\nperformance_dict = {}\nparser = argparse.ArgumentParser(description='DCGAN')\nparser.add_argument('--img-dir', type=str, default='./data/celebA', help=\n 'Data where training images live')\nparser.add_argument('--out-dir', type=str, default='./output', help=\n 'Data where sampled output images will be written')\nparser.add_argument('--mode', type=str, default='train', help=\n 'Can be \"train\" or \"test\"')\nparser.add_argument('--restore-checkpoint', action='store_true', help=\n 'Use this flag if you want to resuming training from a previously-saved checkpoint'\n )\nparser.add_argument('--z-dim', type=int, default=100, help=\n 'Dimensionality of the latent space')\nparser.add_argument('--batch-size', type=int, default=128, help=\n 'Sizes of image batches fed through the network')\nparser.add_argument('--num-data-threads', type=int, default=2, help=\n 'Number of threads to use when loading & pre-processing training images')\nparser.add_argument('--num-epochs', type=int, default=10, help=\n 'Number of passes through the training data to make before stopping')\nparser.add_argument('--learn-rate', type=float, default=0.0002, help=\n 'Learning rate for Adam optimizer')\nparser.add_argument('--beta1', type=float, default=0.5, help=\n '\"beta1\" parameter for Adam optimizer')\nparser.add_argument('--num-gen-updates', type=int, default=2, help=\n 'Number of generator updates per discriminator update')\nparser.add_argument('--log-every', type=int, default=7, help=\n 'Print losses after every [this many] training iterations')\nparser.add_argument('--save-every', type=int, default=500, help=\n 'Save the state of the network after every [this many] training iterations'\n )\nparser.add_argument('--device', type=str, default='GPU:0' if gpu_available else\n 'CPU:0', help=\n 'specific the device of computation eg. CPU:0, GPU:0, GPU:1, GPU:2, ... ')\nargs = parser.parse_args()\n\n\nclass DeepFont(tf.keras.Model):\n\n def __init__(self):\n super(DeepFont, self).__init__()\n self.batch_size = 128\n self.model = tf.keras.Sequential()\n self.model.add(tf.keras.layers.Reshape((96, 96, 1)))\n self.model.add(tf.keras.layers.Conv2D(trainable=False, filters=64,\n strides=(2, 2), kernel_size=(3, 3), padding='same', name=\n 'conv_layer1', input_shape=(96, 96, 1)))\n self.model.add(tf.keras.layers.BatchNormalization())\n self.model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2),\n strides=None, padding='same'))\n self.model.add(tf.keras.layers.Conv2D(trainable=False, filters=128,\n strides=(1, 1), kernel_size=(3, 3), padding='same', name=\n 'conv_layer2'))\n self.model.add(tf.keras.layers.BatchNormalization())\n self.model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2),\n strides=None, padding='same'))\n self.model.add(tf.keras.layers.Conv2D(256, kernel_size=(3, 3),\n strides=(1, 1), padding='same'))\n self.model.add(tf.keras.layers.Conv2D(256, kernel_size=(3, 3),\n strides=(1, 1), padding='same'))\n self.model.add(tf.keras.layers.Conv2D(256, kernel_size=(3, 3),\n strides=(1, 1), padding='same'))\n self.model.add(tf.keras.layers.Flatten())\n self.model.add(tf.keras.layers.Dense(512, activation='relu'))\n self.model.add(tf.keras.layers.Dense(512, activation='relu'))\n self.model.add(tf.keras.layers.Dense(150, activation='softmax'))\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)\n\n def call(self, inputs):\n \"\"\" input: batch of preprocessed 96x96 images\n\t\t\toutput: probabilities for each batch image and its classification distribution\n\n\t\t\tRuns the model on a batch of inputs.\n\t\t\"\"\"\n return self.model(inputs)\n\n def loss_function(self, probs, labels):\n \"\"\" input: probs - probabilities generated by the model\n\t\t\t\t labels - true labels for every imag\n\t\t\toutput: return loss of the batch being processed\n\n\t\t\tUses sparse categorical crossentropy loss.\n\t\t\"\"\"\n loss = tf.keras.losses.sparse_categorical_crossentropy(labels, probs)\n return tf.reduce_mean(loss)\n\n def total_accuracy(self, probs, labels):\n \"\"\" input: probs - batch of probs (batch size x 150)\n\t\t\t\t\t labels - batch of true labels for images(batch size x 150)\n\t\t\toutput: the accuracy of the model (+1 if correct label) over a batch\n\t\t\"\"\"\n acc = 0\n top_five = np.argsort(probs, axis=1)\n top_five = np.array(top_five).reshape((self.batch_size, 150))\n top_five = top_five[:, -1:]\n for i in range(len(labels)):\n if labels[i] not in performance_dict:\n performance_dict[labels[i]] = 0\n if labels[i] in top_five[i]:\n acc += 1\n performance_dict[labels[i]] += 1\n else:\n performance_dict[labels[i]] -= 1\n return acc / float(self.batch_size)\n\n def get_top_five(self, predictions):\n \"\"\" input: predictions - prbs generated by the model\n\t\t\toutput: array of top 5 font families that the model thinks the image belongs to\n\n\t\t\tRuns the model on a batch of inputs.\n\t\t\"\"\"\n predictions = np.sum(predictions, axis=0)\n top_five = np.argsort(predictions, axis=0)\n top_five = np.array(top_five)\n top_five = top_five[-5:]\n with open('150_fonts_backwards.json') as json_file:\n font_subset = json.load(json_file)\n top_five_fonts = []\n for num in top_five:\n top_five_fonts.append(font_subset[str(num)])\n return top_five_fonts\n\n\ndef train(model, train_inputs, train_labels):\n \"\"\" input: train_inputs - batch of training images\n\t\t\t train_labels - batch of training labels\n\t\toutput: none\n\n\t\tTrains the model for a certain number of batches.\n\t\"\"\"\n average_loss = 0\n num_batches = len(train_inputs) // model.batch_size\n for i in range(num_batches):\n with tf.GradientTape() as tape:\n temp_inputs = train_inputs[i * model.batch_size:(i + 1) * model\n .batch_size]\n temp_train_labels = train_labels[i * model.batch_size:(i + 1) *\n model.batch_size]\n predictions = model.call(temp_inputs)\n loss = model.loss_function(predictions, temp_train_labels)\n average_loss += loss\n if i % 1000 == 0:\n print('---Batch', i, ' Loss: ', loss)\n gradients = tape.gradient(loss, model.trainable_variables)\n model.optimizer.apply_gradients(zip(gradients, model.\n trainable_variables))\n print('****AVERAGE LOSS: ', average_loss / float(num_batches))\n\n\ndef test(model, test_inputs, test_labels):\n \"\"\" input: test_inputs - batch of testing images\n\t\t\t test_labels - batch of testing labels\n\t\toutput: accuracy across the entire set of batches\n\n\t\tTests the training inputs against the model's prediction of what font class it thinks each training image\n\t\tbelongs to.\n\t\"\"\"\n num_batches = len(test_inputs) // model.batch_size\n acc = 0\n for i in range(num_batches):\n batch_inputs = test_inputs[i * model.batch_size:(i + 1) * model.\n batch_size]\n batch_labels = test_labels[i * model.batch_size:(i + 1) * model.\n batch_size]\n batch_inputs = np.array(batch_inputs)\n batch_labels = np.array(batch_labels)\n predictions = model.call(batch_inputs)\n batch_accuracy = model.total_accuracy(predictions, batch_labels)\n if i % 100 == 0:\n print('batch accuracy', batch_accuracy)\n acc += batch_accuracy\n average_accuracy = acc / float(num_batches)\n return average_accuracy\n\n\ndef test_single_img(model, image_path):\n \"\"\" input: image_path - the image path of whatever image file you would like to test\n\t\toutput: none\n\n\t\tPrints the top 5 fonts the model predicts for a particular image.\n\t\"\"\"\n crops = []\n image = alter_image(image_path)\n image = resize_image(image, 96)\n cropped_images = generate_crop(image, 96, 10)\n for c in cropped_images:\n crops.append(c)\n predictions = model.call(crops)\n print(predictions.shape)\n top_5 = model.get_top_five(predictions)\n print(top_5)\n\n\ndef main():\n model = DeepFont()\n model.load_weights('weights_leaky_relu.h5', by_name=True)\n checkpoint_dir = './checkpoints_df'\n checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt')\n checkpoint = tf.train.Checkpoint(model=model)\n manager = tf.train.CheckpointManager(checkpoint, checkpoint_dir,\n max_to_keep=3)\n if not os.path.exists(args.out_dir):\n os.makedirs(args.out_dir)\n if (args.restore_checkpoint or args.mode == 'test' or args.mode ==\n 'single_img'):\n print('Running test mode...')\n checkpoint.restore(manager.latest_checkpoint)\n try:\n with tf.device('/device:' + args.device):\n if args.mode == 'train':\n train_inputs, train_labels = get_train_df(\n './shuffled_train_inputs.hdf5',\n './shuffled_train_labels.hdf5')\n for epoch in range(0, args.num_epochs):\n print(\n '========================== EPOCH %d =========================='\n % epoch)\n train(model, train_inputs, train_labels)\n print('**** SAVING CHECKPOINT AT END OF EPOCH ****')\n manager.save()\n if args.mode == 'test':\n test_inputs, test_labels = get_test_df(\n './combined_test_inputs.hdf5',\n './combined_test_labels.hdf5')\n print('--test accuracy--', test(model, test_inputs,\n test_labels))\n if args.mode == 'single_img':\n test_single_img(model, './0.png')\n except RuntimeError as e:\n print(e)\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "import tensorflow as tf\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.layers import Dense, Flatten, Conv2D, BatchNormalization, LeakyReLU, Reshape, Conv2DTranspose\nimport tensorflow_hub as hub\nfrom collections import Counter\nimport numpy as np\nimport sys\nsys.path.append('../data')\nfrom imageio import imwrite\nimport os\nimport argparse\nfrom preprocessing import *\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\ngpu_available = tf.test.is_gpu_available()\nprint('GPU Available: ', gpu_available)\nperformance_dict = {}\nparser = argparse.ArgumentParser(description='DCGAN')\nparser.add_argument('--img-dir', type=str, default='./data/celebA', help=\n 'Data where training images live')\nparser.add_argument('--out-dir', type=str, default='./output', help=\n 'Data where sampled output images will be written')\nparser.add_argument('--mode', type=str, default='train', help=\n 'Can be \"train\" or \"test\"')\nparser.add_argument('--restore-checkpoint', action='store_true', help=\n 'Use this flag if you want to resuming training from a previously-saved checkpoint'\n )\nparser.add_argument('--z-dim', type=int, default=100, help=\n 'Dimensionality of the latent space')\nparser.add_argument('--batch-size', type=int, default=128, help=\n 'Sizes of image batches fed through the network')\nparser.add_argument('--num-data-threads', type=int, default=2, help=\n 'Number of threads to use when loading & pre-processing training images')\nparser.add_argument('--num-epochs', type=int, default=10, help=\n 'Number of passes through the training data to make before stopping')\nparser.add_argument('--learn-rate', type=float, default=0.0002, help=\n 'Learning rate for Adam optimizer')\nparser.add_argument('--beta1', type=float, default=0.5, help=\n '\"beta1\" parameter for Adam optimizer')\nparser.add_argument('--num-gen-updates', type=int, default=2, help=\n 'Number of generator updates per discriminator update')\nparser.add_argument('--log-every', type=int, default=7, help=\n 'Print losses after every [this many] training iterations')\nparser.add_argument('--save-every', type=int, default=500, help=\n 'Save the state of the network after every [this many] training iterations'\n )\nparser.add_argument('--device', type=str, default='GPU:0' if gpu_available else\n 'CPU:0', help=\n 'specific the device of computation eg. CPU:0, GPU:0, GPU:1, GPU:2, ... ')\nargs = parser.parse_args()\n\n\nclass DeepFont(tf.keras.Model):\n\n def __init__(self):\n super(DeepFont, self).__init__()\n self.batch_size = 128\n self.model = tf.keras.Sequential()\n self.model.add(tf.keras.layers.Reshape((96, 96, 1)))\n self.model.add(tf.keras.layers.Conv2D(trainable=False, filters=64,\n strides=(2, 2), kernel_size=(3, 3), padding='same', name=\n 'conv_layer1', input_shape=(96, 96, 1)))\n self.model.add(tf.keras.layers.BatchNormalization())\n self.model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2),\n strides=None, padding='same'))\n self.model.add(tf.keras.layers.Conv2D(trainable=False, filters=128,\n strides=(1, 1), kernel_size=(3, 3), padding='same', name=\n 'conv_layer2'))\n self.model.add(tf.keras.layers.BatchNormalization())\n self.model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2),\n strides=None, padding='same'))\n self.model.add(tf.keras.layers.Conv2D(256, kernel_size=(3, 3),\n strides=(1, 1), padding='same'))\n self.model.add(tf.keras.layers.Conv2D(256, kernel_size=(3, 3),\n strides=(1, 1), padding='same'))\n self.model.add(tf.keras.layers.Conv2D(256, kernel_size=(3, 3),\n strides=(1, 1), padding='same'))\n self.model.add(tf.keras.layers.Flatten())\n self.model.add(tf.keras.layers.Dense(512, activation='relu'))\n self.model.add(tf.keras.layers.Dense(512, activation='relu'))\n self.model.add(tf.keras.layers.Dense(150, activation='softmax'))\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)\n\n def call(self, inputs):\n \"\"\" input: batch of preprocessed 96x96 images\n\t\t\toutput: probabilities for each batch image and its classification distribution\n\n\t\t\tRuns the model on a batch of inputs.\n\t\t\"\"\"\n return self.model(inputs)\n\n def loss_function(self, probs, labels):\n \"\"\" input: probs - probabilities generated by the model\n\t\t\t\t labels - true labels for every imag\n\t\t\toutput: return loss of the batch being processed\n\n\t\t\tUses sparse categorical crossentropy loss.\n\t\t\"\"\"\n loss = tf.keras.losses.sparse_categorical_crossentropy(labels, probs)\n return tf.reduce_mean(loss)\n\n def total_accuracy(self, probs, labels):\n \"\"\" input: probs - batch of probs (batch size x 150)\n\t\t\t\t\t labels - batch of true labels for images(batch size x 150)\n\t\t\toutput: the accuracy of the model (+1 if correct label) over a batch\n\t\t\"\"\"\n acc = 0\n top_five = np.argsort(probs, axis=1)\n top_five = np.array(top_five).reshape((self.batch_size, 150))\n top_five = top_five[:, -1:]\n for i in range(len(labels)):\n if labels[i] not in performance_dict:\n performance_dict[labels[i]] = 0\n if labels[i] in top_five[i]:\n acc += 1\n performance_dict[labels[i]] += 1\n else:\n performance_dict[labels[i]] -= 1\n return acc / float(self.batch_size)\n\n def get_top_five(self, predictions):\n \"\"\" input: predictions - prbs generated by the model\n\t\t\toutput: array of top 5 font families that the model thinks the image belongs to\n\n\t\t\tRuns the model on a batch of inputs.\n\t\t\"\"\"\n predictions = np.sum(predictions, axis=0)\n top_five = np.argsort(predictions, axis=0)\n top_five = np.array(top_five)\n top_five = top_five[-5:]\n with open('150_fonts_backwards.json') as json_file:\n font_subset = json.load(json_file)\n top_five_fonts = []\n for num in top_five:\n top_five_fonts.append(font_subset[str(num)])\n return top_five_fonts\n\n\ndef train(model, train_inputs, train_labels):\n \"\"\" input: train_inputs - batch of training images\n\t\t\t train_labels - batch of training labels\n\t\toutput: none\n\n\t\tTrains the model for a certain number of batches.\n\t\"\"\"\n average_loss = 0\n num_batches = len(train_inputs) // model.batch_size\n for i in range(num_batches):\n with tf.GradientTape() as tape:\n temp_inputs = train_inputs[i * model.batch_size:(i + 1) * model\n .batch_size]\n temp_train_labels = train_labels[i * model.batch_size:(i + 1) *\n model.batch_size]\n predictions = model.call(temp_inputs)\n loss = model.loss_function(predictions, temp_train_labels)\n average_loss += loss\n if i % 1000 == 0:\n print('---Batch', i, ' Loss: ', loss)\n gradients = tape.gradient(loss, model.trainable_variables)\n model.optimizer.apply_gradients(zip(gradients, model.\n trainable_variables))\n print('****AVERAGE LOSS: ', average_loss / float(num_batches))\n\n\ndef test(model, test_inputs, test_labels):\n \"\"\" input: test_inputs - batch of testing images\n\t\t\t test_labels - batch of testing labels\n\t\toutput: accuracy across the entire set of batches\n\n\t\tTests the training inputs against the model's prediction of what font class it thinks each training image\n\t\tbelongs to.\n\t\"\"\"\n num_batches = len(test_inputs) // model.batch_size\n acc = 0\n for i in range(num_batches):\n batch_inputs = test_inputs[i * model.batch_size:(i + 1) * model.\n batch_size]\n batch_labels = test_labels[i * model.batch_size:(i + 1) * model.\n batch_size]\n batch_inputs = np.array(batch_inputs)\n batch_labels = np.array(batch_labels)\n predictions = model.call(batch_inputs)\n batch_accuracy = model.total_accuracy(predictions, batch_labels)\n if i % 100 == 0:\n print('batch accuracy', batch_accuracy)\n acc += batch_accuracy\n average_accuracy = acc / float(num_batches)\n return average_accuracy\n\n\ndef test_single_img(model, image_path):\n \"\"\" input: image_path - the image path of whatever image file you would like to test\n\t\toutput: none\n\n\t\tPrints the top 5 fonts the model predicts for a particular image.\n\t\"\"\"\n crops = []\n image = alter_image(image_path)\n image = resize_image(image, 96)\n cropped_images = generate_crop(image, 96, 10)\n for c in cropped_images:\n crops.append(c)\n predictions = model.call(crops)\n print(predictions.shape)\n top_5 = model.get_top_five(predictions)\n print(top_5)\n\n\ndef main():\n model = DeepFont()\n model.load_weights('weights_leaky_relu.h5', by_name=True)\n checkpoint_dir = './checkpoints_df'\n checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt')\n checkpoint = tf.train.Checkpoint(model=model)\n manager = tf.train.CheckpointManager(checkpoint, checkpoint_dir,\n max_to_keep=3)\n if not os.path.exists(args.out_dir):\n os.makedirs(args.out_dir)\n if (args.restore_checkpoint or args.mode == 'test' or args.mode ==\n 'single_img'):\n print('Running test mode...')\n checkpoint.restore(manager.latest_checkpoint)\n try:\n with tf.device('/device:' + args.device):\n if args.mode == 'train':\n train_inputs, train_labels = get_train_df(\n './shuffled_train_inputs.hdf5',\n './shuffled_train_labels.hdf5')\n for epoch in range(0, args.num_epochs):\n print(\n '========================== EPOCH %d =========================='\n % epoch)\n train(model, train_inputs, train_labels)\n print('**** SAVING CHECKPOINT AT END OF EPOCH ****')\n manager.save()\n if args.mode == 'test':\n test_inputs, test_labels = get_test_df(\n './combined_test_inputs.hdf5',\n './combined_test_labels.hdf5')\n print('--test accuracy--', test(model, test_inputs,\n test_labels))\n if args.mode == 'single_img':\n test_single_img(model, './0.png')\n except RuntimeError as e:\n print(e)\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "import tensorflow as tf\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.layers import Dense, Flatten, Conv2D, BatchNormalization, LeakyReLU, Reshape, Conv2DTranspose\nimport tensorflow_hub as hub\nfrom collections import Counter\nimport numpy as np\n\nimport sys\nsys.path.append('../data')\n\nfrom imageio import imwrite\nimport os\nimport argparse\nfrom preprocessing import *\n\n# this time, katherine is here T_TTTT\n\n\n# Killing optional CPU driver warnings\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\ngpu_available = tf.test.is_gpu_available()\nprint(\"GPU Available: \", gpu_available)\n\n\nperformance_dict = {}\n\n\nparser = argparse.ArgumentParser(description='DCGAN')\n\nparser.add_argument('--img-dir', type=str, default='./data/celebA',\n\t\t\t\t\thelp='Data where training images live')\n\nparser.add_argument('--out-dir', type=str, default='./output',\n\t\t\t\t\thelp='Data where sampled output images will be written')\n\nparser.add_argument('--mode', type=str, default='train',\n\t\t\t\t\thelp='Can be \"train\" or \"test\"')\n\nparser.add_argument('--restore-checkpoint', action='store_true',\n\t\t\t\t\thelp='Use this flag if you want to resuming training from a previously-saved checkpoint')\n\nparser.add_argument('--z-dim', type=int, default=100,\n\t\t\t\t\thelp='Dimensionality of the latent space')\n\nparser.add_argument('--batch-size', type=int, default=128,\n\t\t\t\t\thelp='Sizes of image batches fed through the network')\n\nparser.add_argument('--num-data-threads', type=int, default=2,\n\t\t\t\t\thelp='Number of threads to use when loading & pre-processing training images')\n\nparser.add_argument('--num-epochs', type=int, default=10,\n\t\t\t\t\thelp='Number of passes through the training data to make before stopping')\n\nparser.add_argument('--learn-rate', type=float, default=0.0002,\n\t\t\t\t\thelp='Learning rate for Adam optimizer')\n\nparser.add_argument('--beta1', type=float, default=0.5,\n\t\t\t\t\thelp='\"beta1\" parameter for Adam optimizer')\n\nparser.add_argument('--num-gen-updates', type=int, default=2,\n\t\t\t\t\thelp='Number of generator updates per discriminator update')\n\nparser.add_argument('--log-every', type=int, default=7,\n\t\t\t\t\thelp='Print losses after every [this many] training iterations')\n\nparser.add_argument('--save-every', type=int, default=500,\n\t\t\t\t\thelp='Save the state of the network after every [this many] training iterations')\n\nparser.add_argument('--device', type=str, default='GPU:0' if gpu_available else 'CPU:0',\n\t\t\t\t\thelp='specific the device of computation eg. CPU:0, GPU:0, GPU:1, GPU:2, ... ')\n\nargs = parser.parse_args()\n\n\n\nclass DeepFont(tf.keras.Model):\n\tdef __init__(self):\n\t\tsuper(DeepFont, self).__init__()\n\t\tself.batch_size = 128\n\t\tself.model = tf.keras.Sequential()\n\t\tself.model.add(tf.keras.layers.Reshape((96, 96, 1)))\n\t\tself.model.add(tf.keras.layers.Conv2D(trainable=False, filters=64, strides=(2,2), kernel_size=(3,3), padding='same', name='conv_layer1', input_shape=(96, 96,1)))\n\t\tself.model.add(tf.keras.layers.BatchNormalization())\n\t\tself.model.add(tf.keras.layers.MaxPooling2D(pool_size=(2,2), strides=None, padding='same'))\n\n\t\tself.model.add(tf.keras.layers.Conv2D(trainable=False, filters=128, strides=(1,1), kernel_size=(3,3), padding='same', name='conv_layer2'))\n\t\tself.model.add(tf.keras.layers.BatchNormalization())\n\t\tself.model.add(tf.keras.layers.MaxPooling2D(pool_size=(2,2), strides=None, padding='same'))\n\n\t\tself.model.add(tf.keras.layers.Conv2D(256, kernel_size=(3,3), strides=(1,1), padding='same'))\n\t\tself.model.add(tf.keras.layers.Conv2D(256, kernel_size=(3,3), strides=(1,1), padding='same'))\n\t\tself.model.add(tf.keras.layers.Conv2D(256, kernel_size=(3,3), strides=(1,1), padding='same'))\n\n\t\tself.model.add(tf.keras.layers.Flatten())\n\t\tself.model.add(tf.keras.layers.Dense(512, activation='relu'))\n\t\tself.model.add(tf.keras.layers.Dense(512, activation='relu'))\n\t\tself.model.add(tf.keras.layers.Dense(150, activation='softmax'))\n\n\t\tself.optimizer = tf.keras.optimizers.Adam(learning_rate = 0.01)\n\n\tdef call(self, inputs):\n\t\t\"\"\" input: batch of preprocessed 96x96 images\n\t\t\toutput: probabilities for each batch image and its classification distribution\n\n\t\t\tRuns the model on a batch of inputs.\n\t\t\"\"\"\n\t\treturn self.model(inputs)\n\n\tdef loss_function(self, probs, labels):\n\t\t\"\"\" input: probs - probabilities generated by the model\n\t\t\t\t labels - true labels for every imag\n\t\t\toutput: return loss of the batch being processed\n\n\t\t\tUses sparse categorical crossentropy loss.\n\t\t\"\"\"\n\t\tloss = tf.keras.losses.sparse_categorical_crossentropy(labels, probs)\n\t\treturn tf.reduce_mean(loss)\n\n\tdef total_accuracy(self, probs, labels):\n\t\t\"\"\" input: probs - batch of probs (batch size x 150)\n\t\t\t\t\t labels - batch of true labels for images(batch size x 150)\n\t\t\toutput: the accuracy of the model (+1 if correct label) over a batch\n\t\t\"\"\"\n\t\tacc = 0\n\n\t\ttop_five = np.argsort(probs, axis = 1) # 256 x 150\n\t\ttop_five = np.array(top_five).reshape((self.batch_size, 150))\n\t\ttop_five = top_five[:, -1:] # 5 x 150\n\n\t\tfor i in range (len(labels)):\n\t\t\tif labels[i] not in performance_dict:\n\t\t\t\tperformance_dict[labels[i]] = 0\n\n\t\t\tif labels[i] in top_five[i]:\n\t\t\t\tacc += 1\n\t\t\t\tperformance_dict[labels[i]] += 1\n\t\t\telse:\n\t\t\t\tperformance_dict[labels[i]] -= 1\n\n\t\treturn (acc / float(self.batch_size))\n\n\tdef get_top_five(self, predictions):\n\t\t\"\"\" input: predictions - prbs generated by the model\n\t\t\toutput: array of top 5 font families that the model thinks the image belongs to\n\n\t\t\tRuns the model on a batch of inputs.\n\t\t\"\"\"\n\t\tpredictions = np.sum(predictions, axis = 0) # sums the columns of the logits shape is (150,)\n\n\t\ttop_five = np.argsort(predictions, axis = 0)\n\t\ttop_five = np.array(top_five)\n\t\ttop_five = top_five[-5:]\n\n\t\twith open('150_fonts_backwards.json') as json_file:\n\t\t\tfont_subset = json.load(json_file)\n\n\t\ttop_five_fonts = []\n\t\tfor num in top_five:\n\t\t\ttop_five_fonts.append(font_subset[str(num)])\n\t\treturn top_five_fonts\n\ndef train(model, train_inputs, train_labels):\n\t\"\"\" input: train_inputs - batch of training images\n\t\t\t train_labels - batch of training labels\n\t\toutput: none\n\n\t\tTrains the model for a certain number of batches.\n\t\"\"\"\n\taverage_loss = 0\n\tnum_batches = len(train_inputs)//model.batch_size\n\tfor i in range(num_batches):\n\t\twith tf.GradientTape() as tape:\n\t\t\ttemp_inputs = train_inputs[i*model.batch_size:(i+1)*model.batch_size]\n\t\t\ttemp_train_labels = train_labels[i*model.batch_size:(i+1)*model.batch_size]\n\n\t\t\tpredictions = model.call(temp_inputs)\n\t\t\tloss = model.loss_function(predictions, temp_train_labels)\n\t\t\taverage_loss += loss\n\t\t\tif i % 1000 == 0:\n\t\t\t\tprint(\"---Batch\", i, \" Loss: \", loss)\n\t\tgradients = tape.gradient(loss, model.trainable_variables)\n\t\tmodel.optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n\tprint(\"****AVERAGE LOSS: \", average_loss / float(num_batches))\n\n\ndef test(model, test_inputs, test_labels):\n\t\"\"\" input: test_inputs - batch of testing images\n\t\t\t test_labels - batch of testing labels\n\t\toutput: accuracy across the entire set of batches\n\n\t\tTests the training inputs against the model's prediction of what font class it thinks each training image\n\t\tbelongs to.\n\t\"\"\"\n\tnum_batches = len(test_inputs) // (model.batch_size)\n\n\n\tacc = 0\n\tfor i in range(num_batches):\n\t\tbatch_inputs = test_inputs[i * model.batch_size: (i+1) * model.batch_size]\n\t\tbatch_labels = test_labels[i * model.batch_size: (i+1) * model.batch_size]\n\n\t\tbatch_inputs = np.array(batch_inputs)\n\t\tbatch_labels = np.array(batch_labels)\n\n\t\tpredictions = model.call(batch_inputs) # prediction for a single image\n\n\t\tbatch_accuracy = model.total_accuracy(predictions, batch_labels)\n\n\t\tif i % 100 == 0:\n\t\t\tprint(\"batch accuracy\", batch_accuracy)\n\t\tacc += batch_accuracy\n\n\taverage_accuracy = acc / float(num_batches)\n\n\treturn average_accuracy\n\ndef test_single_img(model, image_path):\n\t\"\"\" input: image_path - the image path of whatever image file you would like to test\n\t\toutput: none\n\n\t\tPrints the top 5 fonts the model predicts for a particular image.\n\t\"\"\"\n\tcrops = []\n\n\timage = alter_image(image_path)\n\timage = resize_image(image, 96)\n\tcropped_images = generate_crop(image, 96, 10)\n\n\tfor c in cropped_images:\n\t\tcrops.append(c)\n\n\tpredictions = model.call(crops) # prediction for a single image\n\tprint(predictions.shape)\n\ttop_5 = model.get_top_five(predictions)\n\tprint(top_5)\n\n## --------------------------------------------------------------------------------------\n\ndef main():\n\n\tmodel = DeepFont()\n\tmodel.load_weights('weights_leaky_relu.h5', by_name=True)\n\n\t# For saving/loading models\n\tcheckpoint_dir = './checkpoints_df'\n\tcheckpoint_prefix = os.path.join(checkpoint_dir, \"ckpt\")\n\tcheckpoint = tf.train.Checkpoint(model = model)\n\tmanager = tf.train.CheckpointManager(checkpoint, checkpoint_dir, max_to_keep=3)\n\t# Ensure the output directory exists\n\tif not os.path.exists(args.out_dir):\n\t\tos.makedirs(args.out_dir)\n\n\tif args.restore_checkpoint or args.mode == 'test' or args.mode == 'single_img':\n\t\t# restores the lates checkpoint using from the manager\n\t\tprint(\"Running test mode...\")\n\t\tcheckpoint.restore(manager.latest_checkpoint)\n\n\ttry:\n\t\t# Specify an invalid GPU device\n\t\twith tf.device('/device:' + args.device):\n\t\t\tif args.mode == 'train':\n\t\t\t\ttrain_inputs, train_labels = get_train_df('./shuffled_train_inputs.hdf5', './shuffled_train_labels.hdf5')\n\t\t\t\tfor epoch in range(0, args.num_epochs):\n\t\t\t\t\tprint('========================== EPOCH %d ==========================' % epoch)\n\t\t\t\t\ttrain(model, train_inputs, train_labels)\n\t\t\t\t\t# Save at the end of the epoch, too\n\t\t\t\t\tprint(\"**** SAVING CHECKPOINT AT END OF EPOCH ****\")\n\t\t\t\t\tmanager.save()\n\t\t\tif args.mode == 'test':\n\t\t\t\ttest_inputs, test_labels = get_test_df(\"./combined_test_inputs.hdf5\", \"./combined_test_labels.hdf5\")\n\t\t\t\tprint(\"--test accuracy--\", test(model, test_inputs, test_labels))\n\t\t\tif args.mode == \"single_img\":\n\t\t\t\ttest_single_img(model, './0.png')\n\texcept RuntimeError as e:\n\t\tprint(e)\n\nif __name__ == '__main__':\n main()\n", "step-ids": [ 10, 11, 12, 13, 14 ] }
[ 10, 11, 12, 13, 14 ]
n = int(input('Digite um número inteiro: ')) print(' O dobro de {} é {}'.format(n, n * 2)) print(' O triplo de {} é {}'.format(n, n * 3)) print(' A Raiz quadrada de {} é {}'.format(n, n * n))
normal
{ "blob_id": "c0ad3d642f28cb11a8225d4d011dbb241bd88432", "index": 1661, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(' O dobro de {} é {}'.format(n, n * 2))\nprint(' O triplo de {} é {}'.format(n, n * 3))\nprint(' A Raiz quadrada de {} é {}'.format(n, n * n))\n", "step-3": "n = int(input('Digite um número inteiro: '))\nprint(' O dobro de {} é {}'.format(n, n * 2))\nprint(' O triplo de {} é {}'.format(n, n * 3))\nprint(' A Raiz quadrada de {} é {}'.format(n, n * n))\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import weakref from Qt import QtCore from Qt import QtGui from Qt.QtWidgets import QDoubleSpinBox from Qt.QtWidgets import QSpinBox from Qt.QtWidgets import QWidget from Qt.QtWidgets import QSpacerItem from Qt.QtWidgets import QPushButton from Qt.QtWidgets import QComboBox from Qt.QtWidgets import QLineEdit from Qt.QtWidgets import QCheckBox from Qt.QtWidgets import QGraphicsProxyWidget from Qt.QtWidgets import QGridLayout from Qt.QtWidgets import QHBoxLayout from Qt.QtWidgets import QSizePolicy from AGraphCommon import * from AbstractGraph import PinBase from ..Ui import FloatVector3InputWidget_ui from ..Ui import FloatVector4InputWidget_ui from ..Ui import Matrix33InputWidget_ui from ..Ui import Matrix44InputWidget_ui import pyrr def _configDoubleSpinBox(sb): sb.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX) sb.setSingleStep(FLOAT_SINGLE_STEP) sb.setDecimals(FLOAT_DECIMALS) def _configIntSpinBox(sb): sb.setRange(INT_RANGE_MIN, INT_RANGE_MAX) class InputWidgetRaw(QWidget): """ This type of widget can be used as a base class for complex ui generated by designer """ def __init__(self, parent=None, dataSetCallback=None, defaultValue=None, userStructClass=None, **kwds): super(InputWidgetRaw, self).__init__(parent=parent, **kwds) self._defaultValue = defaultValue # fuction with signature void(object) # this will set data to pin self.dataSetCallback = dataSetCallback def onResetValue(self): self.setWidgetValue(self._defaultValue) def setWidgetValue(self, value): '''to widget''' pass def widgetValueUpdated(self, value): '''from widget''' pass class InputWidgetSingle(InputWidgetRaw): """ This type of widget is used for a simple widgets like buttons, checkboxes etc. It consists of horizontal layout widget itself and reset button. """ def __init__(self, parent=None, dataSetCallback=None, defaultValue=None, userStructClass=None, **kwds): super(InputWidgetSingle, self).__init__(parent=parent, dataSetCallback=dataSetCallback, defaultValue=defaultValue, userStructClass=userStructClass, **kwds) # from widget self.bWidgetSet = False self.gridLayout = QGridLayout(self) self.gridLayout.setSpacing(1) self.gridLayout.setContentsMargins(0, 0, 0, 0) self.gridLayout.setObjectName("gridLayout") self.horizontalLayout = QHBoxLayout() self.horizontalLayout.setObjectName("horizontalLayout") spacerItem = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem) self.pbReset = QPushButton(self) self.pbReset.setMaximumSize(QtCore.QSize(25, 25)) self.pbReset.setText("") self.pbReset.setObjectName("pbReset") self.pbReset.setIcon(QtGui.QIcon(":/icons/resources/reset.png")) self.horizontalLayout.addWidget(self.pbReset) self.pbReset.clicked.connect(self.onResetValue) self.gridLayout.addLayout(self.horizontalLayout, 0, 0, 1, 1) self._index = 0 def setWidget(self, widget): self.horizontalLayout.insertWidget(self._index, widget) class ExecInputWidget(InputWidgetSingle): """docstring for ExecInputWidget""" def __init__(self, parent=None, **kwds): super(ExecInputWidget, self).__init__(parent=parent, **kwds) self.pb = QPushButton('execute', self) self.setWidget(self.pb) self.pb.clicked.connect(self.dataSetCallback) self.pbReset.deleteLater() def setObjectName(self,name): super(ExecInputWidget, self).setObjectName(name) self.pb.setText(name.split(".")[-1]) class EnumInputWidget(InputWidgetSingle): """ Enum input widget """ def __init__(self, parent=None, **kwds): super(EnumInputWidget, self).__init__(parent=parent, **kwds) # self._userStruct = kwds['userStructClass'] self.cb = QComboBox(self) self.setWidget(self.cb) for i in list(kwds['userStructClass']): self.cb.addItem(i.name, i.value) self.cb.currentIndexChanged[int].connect(self.dataSetCallback) def setWidgetValue(self, val): self.cb.setCurrentIndex(val) class FloatInputWidget(InputWidgetSingle): """ Floating point data input widget """ def __init__(self, parent=None, **kwds): super(FloatInputWidget, self).__init__(parent=parent, **kwds) self.sb = QDoubleSpinBox(self) _configDoubleSpinBox(self.sb) self.setWidget(self.sb) # when spin box updated call setter function self.sb.valueChanged.connect(lambda val: self.dataSetCallback(val)) def setWidgetValue(self, val): self.sb.setValue(float(val)) class IntInputWidget(InputWidgetSingle): """ Decimal number input widget """ def __init__(self, parent=None, **kwds): super(IntInputWidget, self).__init__(parent=parent, **kwds) self.sb = QSpinBox(self) _configIntSpinBox(self.sb) self.setWidget(self.sb) self.sb.valueChanged.connect(lambda val: self.dataSetCallback(val)) def setWidgetValue(self, val): self.sb.setValue(int(val)) class NoneInputWidget(InputWidgetSingle): """ String data input widget """ def __init__(self, parent=None, **kwds): super(NoneInputWidget, self).__init__(parent=parent, **kwds) self.le = QLineEdit(self) self.le.setContextMenuPolicy(QtCore.Qt.NoContextMenu) self.setWidget(self.le) self.le.textChanged.connect(lambda val: self.dataSetCallback(val)) self.le.setEnabled(False) def setWidgetValue(self, val): self.le.setText(str(val)) class StringInputWidget(InputWidgetSingle): """ String data input widget """ def __init__(self, parent=None, **kwds): super(StringInputWidget, self).__init__(parent=parent, **kwds) self.le = QLineEdit(self) self.le.setContextMenuPolicy(QtCore.Qt.NoContextMenu) self.setWidget(self.le) self.le.textChanged.connect(lambda val: self.dataSetCallback(val)) def setWidgetValue(self, val): self.le.setText(str(val)) class BoolInputWidget(InputWidgetSingle): """Boolean data input widget""" def __init__(self, parent=None, **kwds): super(BoolInputWidget, self).__init__(parent=parent, **kwds) self.cb = QCheckBox(self) self.setWidget(self.cb) self.cb.stateChanged.connect(lambda val: self.dataSetCallback(bool(val))) def setWidgetValue(self, val): if bool(val): self.cb.setCheckState(QtCore.Qt.Checked) else: self.cb.setCheckState(QtCore.Qt.Unchecked) class FloatVector3InputWidget(InputWidgetRaw, FloatVector3InputWidget_ui.Ui_Form): """Vector3 data input widget""" def __init__(self, **kwds): super(FloatVector3InputWidget, self).__init__(**kwds) self.setupUi(self) self._configSpinBoxes() self.dsbX.valueChanged.connect(self._onDataChangedX) self.dsbY.valueChanged.connect(self._onDataChangedY) self.dsbZ.valueChanged.connect(self._onDataChangedZ) self.pbReset.clicked.connect(self.onResetValue) def asDataTypeClass(self): return pyrr.Vector3([self.dsbX.value(), self.dsbY.value(), self.dsbZ.value()]) def _configSpinBoxes(self): self.dsbX.setDecimals(FLOAT_DECIMALS) self.dsbY.setDecimals(FLOAT_DECIMALS) self.dsbZ.setDecimals(FLOAT_DECIMALS) self.dsbX.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX) self.dsbY.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX) self.dsbZ.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX) self.dsbX.setSingleStep(FLOAT_SINGLE_STEP) self.dsbY.setSingleStep(FLOAT_SINGLE_STEP) self.dsbZ.setSingleStep(FLOAT_SINGLE_STEP) def _onDataChangedX(self, val): v = self.asDataTypeClass() v.x = val self.dataSetCallback(v) def _onDataChangedY(self, val): v = self.asDataTypeClass() v.y = val self.dataSetCallback(v) def _onDataChangedZ(self, val): v = self.asDataTypeClass() v.z = val self.dataSetCallback(v) def setWidgetValue(self, val): self.dsbX.setValue(val.x) self.dsbY.setValue(val.y) self.dsbZ.setValue(val.z) class FloatVector4InputWidget(InputWidgetRaw, FloatVector4InputWidget_ui.Ui_Form): """Vector4 data input widget""" def __init__(self, **kwds): super(FloatVector4InputWidget, self).__init__(**kwds) self.setupUi(self) self._configSpinBoxes() self.dsbX.valueChanged.connect(self._onDataChangedX) self.dsbY.valueChanged.connect(self._onDataChangedY) self.dsbZ.valueChanged.connect(self._onDataChangedZ) self.dsbW.valueChanged.connect(self._onDataChangedW) self.pbReset.clicked.connect(self.onResetValue) def asDataTypeClass(self): return pyrr.Vector4([self.dsbX.value(), self.dsbY.value(), self.dsbZ.value(), self.dsbW.value()]) def _configSpinBoxes(self): self.dsbX.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX) self.dsbY.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX) self.dsbZ.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX) self.dsbW.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX) self.dsbX.setSingleStep(FLOAT_SINGLE_STEP) self.dsbY.setSingleStep(FLOAT_SINGLE_STEP) self.dsbZ.setSingleStep(FLOAT_SINGLE_STEP) self.dsbW.setSingleStep(FLOAT_SINGLE_STEP) self.dsbX.setDecimals(FLOAT_DECIMALS) self.dsbY.setDecimals(FLOAT_DECIMALS) self.dsbZ.setDecimals(FLOAT_DECIMALS) self.dsbW.setDecimals(FLOAT_DECIMALS) def _onDataChangedX(self, val): v = self.asDataTypeClass() v.x = val self.dataSetCallback(v) def _onDataChangedY(self, val): v = self.asDataTypeClass() v.y = val self.dataSetCallback(v) def _onDataChangedZ(self, val): v = self.asDataTypeClass() v.z = val self.dataSetCallback(v) def _onDataChangedW(self, val): v = self.asDataTypeClass() v.w = val self.dataSetCallback(v) def setWidgetValue(self, val): self.dsbX.setValue(val.x) self.dsbY.setValue(val.y) self.dsbZ.setValue(val.z) self.dsbW.setValue(val.w) class QuatInputWidget(FloatVector4InputWidget): """Quaternion data input widget""" def __init__(self, **kwds): super(QuatInputWidget, self).__init__(**kwds) def asDataTypeClass(self): return pyrr.Quaternion([self.dsbX.value(), self.dsbY.value(), self.dsbZ.value(), self.dsbW.value()]) class Matrix33InputWidget(InputWidgetRaw, Matrix33InputWidget_ui.Ui_Form): """Matrix33 data input widget""" def __init__(self, parent=None, **kwds): super(Matrix33InputWidget, self).__init__(parent=parent, **kwds) self.setupUi(self) self._configSpinBoxes() self.dsbm11.valueChanged.connect(self.m11Changed) self.dsbm12.valueChanged.connect(self.m12Changed) self.dsbm13.valueChanged.connect(self.m13Changed) self.dsbm21.valueChanged.connect(self.m21Changed) self.dsbm22.valueChanged.connect(self.m22Changed) self.dsbm23.valueChanged.connect(self.m23Changed) self.dsbm31.valueChanged.connect(self.m31Changed) self.dsbm32.valueChanged.connect(self.m32Changed) self.dsbm33.valueChanged.connect(self.m33Changed) self.pbReset.clicked.connect(self.onResetValue) def asDataTypeClass(self): return pyrr.Matrix33([ [self.dsbm11.value(), self.dsbm12.value(), self.dsbm13.value()], [self.dsbm21.value(), self.dsbm22.value(), self.dsbm23.value()], [self.dsbm31.value(), self.dsbm32.value(), self.dsbm33.value()] ]) def _configSpinBoxes(self): ls = [self.dsbm11, self.dsbm12, self.dsbm13, self.dsbm21, self.dsbm22, self.dsbm23, self.dsbm31, self.dsbm32, self.dsbm33] for sb in ls: sb.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX) sb.setSingleStep(FLOAT_SINGLE_STEP) sb.setDecimals(FLOAT_DECIMALS) def m11Changed(self, val): m = self.asDataTypeClass() m.m11 = val self.dataSetCallback(m) def m12Changed(self, val): m = self.asDataTypeClass() m.m12 = val self.dataSetCallback(m) def m13Changed(self, val): m = self.asDataTypeClass() m.m13 = val self.dataSetCallback(m) def m21Changed(self, val): m = self.asDataTypeClass() m.m21 = val self.dataSetCallback(m) def m22Changed(self, val): m = self.asDataTypeClass() m.m22 = val self.dataSetCallback(m) def m23Changed(self, val): m = self.asDataTypeClass() m.m23 = val self.dataSetCallback(m) def m31Changed(self, val): m = self.asDataTypeClass() m.m31 = val self.dataSetCallback(m) def m32Changed(self, val): m = self.asDataTypeClass() m.m32 = val self.dataSetCallback(m) def m33Changed(self, val): m = self.asDataTypeClass() m.m33 = val self.dataSetCallback(m) def setWidgetValue(self, val): self.dsbm11.setValue(val.m11) self.dsbm12.setValue(val.m12) self.dsbm13.setValue(val.m13) self.dsbm21.setValue(val.m21) self.dsbm22.setValue(val.m22) self.dsbm23.setValue(val.m23) self.dsbm31.setValue(val.m31) self.dsbm32.setValue(val.m32) self.dsbm33.setValue(val.m33) class Matrix44InputWidget(InputWidgetRaw, Matrix44InputWidget_ui.Ui_Form): """Matrix44 data input widget""" def __init__(self, parent=None, **kwds): super(Matrix44InputWidget, self).__init__(parent=parent, **kwds) self.setupUi(self) self._configSpinBoxes() self.dsbm11.valueChanged.connect(self.m11Changed) self.dsbm12.valueChanged.connect(self.m12Changed) self.dsbm13.valueChanged.connect(self.m13Changed) self.dsbm14.valueChanged.connect(self.m14Changed) self.dsbm21.valueChanged.connect(self.m21Changed) self.dsbm22.valueChanged.connect(self.m22Changed) self.dsbm23.valueChanged.connect(self.m23Changed) self.dsbm24.valueChanged.connect(self.m24Changed) self.dsbm31.valueChanged.connect(self.m31Changed) self.dsbm32.valueChanged.connect(self.m32Changed) self.dsbm33.valueChanged.connect(self.m33Changed) self.dsbm34.valueChanged.connect(self.m34Changed) self.dsbm41.valueChanged.connect(self.m41Changed) self.dsbm42.valueChanged.connect(self.m42Changed) self.dsbm43.valueChanged.connect(self.m43Changed) self.dsbm44.valueChanged.connect(self.m44Changed) self.pbReset.clicked.connect(self.onResetValue) def asDataTypeClass(self): return pyrr.Matrix44([ [self.dsbm11.value(), self.dsbm12.value(), self.dsbm13.value(), self.dsbm14.value()], [self.dsbm21.value(), self.dsbm22.value(), self.dsbm23.value(), self.dsbm24.value()], [self.dsbm31.value(), self.dsbm32.value(), self.dsbm33.value(), self.dsbm34.value()], [self.dsbm41.value(), self.dsbm42.value(), self.dsbm43.value(), self.dsbm44.value()] ]) def _configSpinBoxes(self): ls = [self.dsbm11, self.dsbm12, self.dsbm13, self.dsbm14, self.dsbm21, self.dsbm22, self.dsbm23, self.dsbm24, self.dsbm31, self.dsbm32, self.dsbm33, self.dsbm34, self.dsbm41, self.dsbm42, self.dsbm43, self.dsbm44] for sb in ls: sb.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX) sb.setSingleStep(FLOAT_SINGLE_STEP) sb.setDecimals(FLOAT_DECIMALS) def m11Changed(self, val): m = self.asDataTypeClass() m.m11 = val self.dataSetCallback(m) def m12Changed(self, val): m = self.asDataTypeClass() m.m12 = val self.dataSetCallback(m) def m13Changed(self, val): m = self.asDataTypeClass() m.m13 = val self.dataSetCallback(m) def m14Changed(self, val): m = self.asDataTypeClass() m.m14 = val self.dataSetCallback(m) def m21Changed(self, val): m = self.asDataTypeClass() m.m21 = val self.dataSetCallback(m) def m22Changed(self, val): m = self.asDataTypeClass() m.m22 = val self.dataSetCallback(m) def m23Changed(self, val): m = self.asDataTypeClass() m.m23 = val self.dataSetCallback(m) def m24Changed(self, val): m = self.asDataTypeClass() m.m24 = val self.dataSetCallback(m) def m31Changed(self, val): m = self.asDataTypeClass() m.m31 = val self.dataSetCallback(m) def m32Changed(self, val): m = self.asDataTypeClass() m.m32 = val self.dataSetCallback(m) def m33Changed(self, val): m = self.asDataTypeClass() m.m33 = val self.dataSetCallback(m) def m34Changed(self, val): m = self.asDataTypeClass() m.m34 = val self.dataSetCallback(m) def m41Changed(self, val): m = self.asDataTypeClass() m.m41 = val self.dataSetCallback(m) def m42Changed(self, val): m = self.asDataTypeClass() m.m42 = val self.dataSetCallback(m) def m43Changed(self, val): m = self.asDataTypeClass() m.m43 = val self.dataSetCallback(m) def m44Changed(self, val): m = self.asDataTypeClass() m.m44 = val self.dataSetCallback(m) def setWidgetValue(self, val): self.dsbm11.setValue(val.m11) self.dsbm12.setValue(val.m12) self.dsbm13.setValue(val.m13) self.dsbm14.setValue(val.m14) self.dsbm21.setValue(val.m21) self.dsbm22.setValue(val.m22) self.dsbm23.setValue(val.m23) self.dsbm24.setValue(val.m24) self.dsbm31.setValue(val.m31) self.dsbm32.setValue(val.m32) self.dsbm33.setValue(val.m33) self.dsbm34.setValue(val.m34) self.dsbm41.setValue(val.m41) self.dsbm42.setValue(val.m42) self.dsbm43.setValue(val.m43) self.dsbm44.setValue(val.m44) def getInputWidget(dataType, dataSetter, defaultValue, userStructClass): ''' factory method ''' if dataType == DataTypes.Float: return FloatInputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue) if dataType == DataTypes.Int: return IntInputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue) if dataType == DataTypes.String: return StringInputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue) if dataType == DataTypes.Bool: return BoolInputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue) if dataType == DataTypes.FloatVector3: return FloatVector3InputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue) if dataType == DataTypes.FloatVector4: return FloatVector4InputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue) if dataType == DataTypes.Quaternion: return QuatInputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue) if dataType == DataTypes.Matrix33: return Matrix33InputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue) if dataType == DataTypes.Matrix44: return Matrix44InputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue) if dataType == DataTypes.Exec: return ExecInputWidget(dataSetCallback=dataSetter, defaultValue=None) if dataType == DataTypes.Enum: return EnumInputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue, userStructClass=userStructClass) return NoneInputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue)
normal
{ "blob_id": "023dc23a5e649c2fbbb45ff577dffa3b5d2aac64", "index": 7904, "step-1": "<mask token>\n\n\nclass FloatVector3InputWidget(InputWidgetRaw, FloatVector3InputWidget_ui.\n Ui_Form):\n <mask token>\n\n def __init__(self, **kwds):\n super(FloatVector3InputWidget, self).__init__(**kwds)\n self.setupUi(self)\n self._configSpinBoxes()\n self.dsbX.valueChanged.connect(self._onDataChangedX)\n self.dsbY.valueChanged.connect(self._onDataChangedY)\n self.dsbZ.valueChanged.connect(self._onDataChangedZ)\n self.pbReset.clicked.connect(self.onResetValue)\n\n def asDataTypeClass(self):\n return pyrr.Vector3([self.dsbX.value(), self.dsbY.value(), self.\n dsbZ.value()])\n\n def _configSpinBoxes(self):\n self.dsbX.setDecimals(FLOAT_DECIMALS)\n self.dsbY.setDecimals(FLOAT_DECIMALS)\n self.dsbZ.setDecimals(FLOAT_DECIMALS)\n self.dsbX.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbY.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbZ.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbX.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbY.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbZ.setSingleStep(FLOAT_SINGLE_STEP)\n\n def _onDataChangedX(self, val):\n v = self.asDataTypeClass()\n v.x = val\n self.dataSetCallback(v)\n\n def _onDataChangedY(self, val):\n v = self.asDataTypeClass()\n v.y = val\n self.dataSetCallback(v)\n\n def _onDataChangedZ(self, val):\n v = self.asDataTypeClass()\n v.z = val\n self.dataSetCallback(v)\n\n def setWidgetValue(self, val):\n self.dsbX.setValue(val.x)\n self.dsbY.setValue(val.y)\n self.dsbZ.setValue(val.z)\n\n\nclass FloatVector4InputWidget(InputWidgetRaw, FloatVector4InputWidget_ui.\n Ui_Form):\n \"\"\"Vector4 data input widget\"\"\"\n\n def __init__(self, **kwds):\n super(FloatVector4InputWidget, self).__init__(**kwds)\n self.setupUi(self)\n self._configSpinBoxes()\n self.dsbX.valueChanged.connect(self._onDataChangedX)\n self.dsbY.valueChanged.connect(self._onDataChangedY)\n self.dsbZ.valueChanged.connect(self._onDataChangedZ)\n self.dsbW.valueChanged.connect(self._onDataChangedW)\n self.pbReset.clicked.connect(self.onResetValue)\n\n def asDataTypeClass(self):\n return pyrr.Vector4([self.dsbX.value(), self.dsbY.value(), self.\n dsbZ.value(), self.dsbW.value()])\n\n def _configSpinBoxes(self):\n self.dsbX.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbY.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbZ.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbW.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbX.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbY.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbZ.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbW.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbX.setDecimals(FLOAT_DECIMALS)\n self.dsbY.setDecimals(FLOAT_DECIMALS)\n self.dsbZ.setDecimals(FLOAT_DECIMALS)\n self.dsbW.setDecimals(FLOAT_DECIMALS)\n\n def _onDataChangedX(self, val):\n v = self.asDataTypeClass()\n v.x = val\n self.dataSetCallback(v)\n\n def _onDataChangedY(self, val):\n v = self.asDataTypeClass()\n v.y = val\n self.dataSetCallback(v)\n\n def _onDataChangedZ(self, val):\n v = self.asDataTypeClass()\n v.z = val\n self.dataSetCallback(v)\n\n def _onDataChangedW(self, val):\n v = self.asDataTypeClass()\n v.w = val\n self.dataSetCallback(v)\n\n def setWidgetValue(self, val):\n self.dsbX.setValue(val.x)\n self.dsbY.setValue(val.y)\n self.dsbZ.setValue(val.z)\n self.dsbW.setValue(val.w)\n\n\nclass QuatInputWidget(FloatVector4InputWidget):\n \"\"\"Quaternion data input widget\"\"\"\n\n def __init__(self, **kwds):\n super(QuatInputWidget, self).__init__(**kwds)\n\n def asDataTypeClass(self):\n return pyrr.Quaternion([self.dsbX.value(), self.dsbY.value(), self.\n dsbZ.value(), self.dsbW.value()])\n\n\nclass Matrix33InputWidget(InputWidgetRaw, Matrix33InputWidget_ui.Ui_Form):\n \"\"\"Matrix33 data input widget\"\"\"\n\n def __init__(self, parent=None, **kwds):\n super(Matrix33InputWidget, self).__init__(parent=parent, **kwds)\n self.setupUi(self)\n self._configSpinBoxes()\n self.dsbm11.valueChanged.connect(self.m11Changed)\n self.dsbm12.valueChanged.connect(self.m12Changed)\n self.dsbm13.valueChanged.connect(self.m13Changed)\n self.dsbm21.valueChanged.connect(self.m21Changed)\n self.dsbm22.valueChanged.connect(self.m22Changed)\n self.dsbm23.valueChanged.connect(self.m23Changed)\n self.dsbm31.valueChanged.connect(self.m31Changed)\n self.dsbm32.valueChanged.connect(self.m32Changed)\n self.dsbm33.valueChanged.connect(self.m33Changed)\n self.pbReset.clicked.connect(self.onResetValue)\n\n def asDataTypeClass(self):\n return pyrr.Matrix33([[self.dsbm11.value(), self.dsbm12.value(),\n self.dsbm13.value()], [self.dsbm21.value(), self.dsbm22.value(),\n self.dsbm23.value()], [self.dsbm31.value(), self.dsbm32.value(),\n self.dsbm33.value()]])\n\n def _configSpinBoxes(self):\n ls = [self.dsbm11, self.dsbm12, self.dsbm13, self.dsbm21, self.\n dsbm22, self.dsbm23, self.dsbm31, self.dsbm32, self.dsbm33]\n for sb in ls:\n sb.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n sb.setSingleStep(FLOAT_SINGLE_STEP)\n sb.setDecimals(FLOAT_DECIMALS)\n\n def m11Changed(self, val):\n m = self.asDataTypeClass()\n m.m11 = val\n self.dataSetCallback(m)\n\n def m12Changed(self, val):\n m = self.asDataTypeClass()\n m.m12 = val\n self.dataSetCallback(m)\n\n def m13Changed(self, val):\n m = self.asDataTypeClass()\n m.m13 = val\n self.dataSetCallback(m)\n\n def m21Changed(self, val):\n m = self.asDataTypeClass()\n m.m21 = val\n self.dataSetCallback(m)\n\n def m22Changed(self, val):\n m = self.asDataTypeClass()\n m.m22 = val\n self.dataSetCallback(m)\n\n def m23Changed(self, val):\n m = self.asDataTypeClass()\n m.m23 = val\n self.dataSetCallback(m)\n\n def m31Changed(self, val):\n m = self.asDataTypeClass()\n m.m31 = val\n self.dataSetCallback(m)\n\n def m32Changed(self, val):\n m = self.asDataTypeClass()\n m.m32 = val\n self.dataSetCallback(m)\n\n def m33Changed(self, val):\n m = self.asDataTypeClass()\n m.m33 = val\n self.dataSetCallback(m)\n\n def setWidgetValue(self, val):\n self.dsbm11.setValue(val.m11)\n self.dsbm12.setValue(val.m12)\n self.dsbm13.setValue(val.m13)\n self.dsbm21.setValue(val.m21)\n self.dsbm22.setValue(val.m22)\n self.dsbm23.setValue(val.m23)\n self.dsbm31.setValue(val.m31)\n self.dsbm32.setValue(val.m32)\n self.dsbm33.setValue(val.m33)\n\n\nclass Matrix44InputWidget(InputWidgetRaw, Matrix44InputWidget_ui.Ui_Form):\n \"\"\"Matrix44 data input widget\"\"\"\n\n def __init__(self, parent=None, **kwds):\n super(Matrix44InputWidget, self).__init__(parent=parent, **kwds)\n self.setupUi(self)\n self._configSpinBoxes()\n self.dsbm11.valueChanged.connect(self.m11Changed)\n self.dsbm12.valueChanged.connect(self.m12Changed)\n self.dsbm13.valueChanged.connect(self.m13Changed)\n self.dsbm14.valueChanged.connect(self.m14Changed)\n self.dsbm21.valueChanged.connect(self.m21Changed)\n self.dsbm22.valueChanged.connect(self.m22Changed)\n self.dsbm23.valueChanged.connect(self.m23Changed)\n self.dsbm24.valueChanged.connect(self.m24Changed)\n self.dsbm31.valueChanged.connect(self.m31Changed)\n self.dsbm32.valueChanged.connect(self.m32Changed)\n self.dsbm33.valueChanged.connect(self.m33Changed)\n self.dsbm34.valueChanged.connect(self.m34Changed)\n self.dsbm41.valueChanged.connect(self.m41Changed)\n self.dsbm42.valueChanged.connect(self.m42Changed)\n self.dsbm43.valueChanged.connect(self.m43Changed)\n self.dsbm44.valueChanged.connect(self.m44Changed)\n self.pbReset.clicked.connect(self.onResetValue)\n\n def asDataTypeClass(self):\n return pyrr.Matrix44([[self.dsbm11.value(), self.dsbm12.value(),\n self.dsbm13.value(), self.dsbm14.value()], [self.dsbm21.value(),\n self.dsbm22.value(), self.dsbm23.value(), self.dsbm24.value()],\n [self.dsbm31.value(), self.dsbm32.value(), self.dsbm33.value(),\n self.dsbm34.value()], [self.dsbm41.value(), self.dsbm42.value(),\n self.dsbm43.value(), self.dsbm44.value()]])\n\n def _configSpinBoxes(self):\n ls = [self.dsbm11, self.dsbm12, self.dsbm13, self.dsbm14, self.\n dsbm21, self.dsbm22, self.dsbm23, self.dsbm24, self.dsbm31,\n self.dsbm32, self.dsbm33, self.dsbm34, self.dsbm41, self.dsbm42,\n self.dsbm43, self.dsbm44]\n for sb in ls:\n sb.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n sb.setSingleStep(FLOAT_SINGLE_STEP)\n sb.setDecimals(FLOAT_DECIMALS)\n\n def m11Changed(self, val):\n m = self.asDataTypeClass()\n m.m11 = val\n self.dataSetCallback(m)\n\n def m12Changed(self, val):\n m = self.asDataTypeClass()\n m.m12 = val\n self.dataSetCallback(m)\n\n def m13Changed(self, val):\n m = self.asDataTypeClass()\n m.m13 = val\n self.dataSetCallback(m)\n\n def m14Changed(self, val):\n m = self.asDataTypeClass()\n m.m14 = val\n self.dataSetCallback(m)\n\n def m21Changed(self, val):\n m = self.asDataTypeClass()\n m.m21 = val\n self.dataSetCallback(m)\n\n def m22Changed(self, val):\n m = self.asDataTypeClass()\n m.m22 = val\n self.dataSetCallback(m)\n\n def m23Changed(self, val):\n m = self.asDataTypeClass()\n m.m23 = val\n self.dataSetCallback(m)\n\n def m24Changed(self, val):\n m = self.asDataTypeClass()\n m.m24 = val\n self.dataSetCallback(m)\n\n def m31Changed(self, val):\n m = self.asDataTypeClass()\n m.m31 = val\n self.dataSetCallback(m)\n\n def m32Changed(self, val):\n m = self.asDataTypeClass()\n m.m32 = val\n self.dataSetCallback(m)\n\n def m33Changed(self, val):\n m = self.asDataTypeClass()\n m.m33 = val\n self.dataSetCallback(m)\n\n def m34Changed(self, val):\n m = self.asDataTypeClass()\n m.m34 = val\n self.dataSetCallback(m)\n\n def m41Changed(self, val):\n m = self.asDataTypeClass()\n m.m41 = val\n self.dataSetCallback(m)\n\n def m42Changed(self, val):\n m = self.asDataTypeClass()\n m.m42 = val\n self.dataSetCallback(m)\n\n def m43Changed(self, val):\n m = self.asDataTypeClass()\n m.m43 = val\n self.dataSetCallback(m)\n\n def m44Changed(self, val):\n m = self.asDataTypeClass()\n m.m44 = val\n self.dataSetCallback(m)\n\n def setWidgetValue(self, val):\n self.dsbm11.setValue(val.m11)\n self.dsbm12.setValue(val.m12)\n self.dsbm13.setValue(val.m13)\n self.dsbm14.setValue(val.m14)\n self.dsbm21.setValue(val.m21)\n self.dsbm22.setValue(val.m22)\n self.dsbm23.setValue(val.m23)\n self.dsbm24.setValue(val.m24)\n self.dsbm31.setValue(val.m31)\n self.dsbm32.setValue(val.m32)\n self.dsbm33.setValue(val.m33)\n self.dsbm34.setValue(val.m34)\n self.dsbm41.setValue(val.m41)\n self.dsbm42.setValue(val.m42)\n self.dsbm43.setValue(val.m43)\n self.dsbm44.setValue(val.m44)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass FloatVector3InputWidget(InputWidgetRaw, FloatVector3InputWidget_ui.\n Ui_Form):\n \"\"\"Vector3 data input widget\"\"\"\n\n def __init__(self, **kwds):\n super(FloatVector3InputWidget, self).__init__(**kwds)\n self.setupUi(self)\n self._configSpinBoxes()\n self.dsbX.valueChanged.connect(self._onDataChangedX)\n self.dsbY.valueChanged.connect(self._onDataChangedY)\n self.dsbZ.valueChanged.connect(self._onDataChangedZ)\n self.pbReset.clicked.connect(self.onResetValue)\n\n def asDataTypeClass(self):\n return pyrr.Vector3([self.dsbX.value(), self.dsbY.value(), self.\n dsbZ.value()])\n\n def _configSpinBoxes(self):\n self.dsbX.setDecimals(FLOAT_DECIMALS)\n self.dsbY.setDecimals(FLOAT_DECIMALS)\n self.dsbZ.setDecimals(FLOAT_DECIMALS)\n self.dsbX.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbY.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbZ.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbX.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbY.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbZ.setSingleStep(FLOAT_SINGLE_STEP)\n\n def _onDataChangedX(self, val):\n v = self.asDataTypeClass()\n v.x = val\n self.dataSetCallback(v)\n\n def _onDataChangedY(self, val):\n v = self.asDataTypeClass()\n v.y = val\n self.dataSetCallback(v)\n\n def _onDataChangedZ(self, val):\n v = self.asDataTypeClass()\n v.z = val\n self.dataSetCallback(v)\n\n def setWidgetValue(self, val):\n self.dsbX.setValue(val.x)\n self.dsbY.setValue(val.y)\n self.dsbZ.setValue(val.z)\n\n\nclass FloatVector4InputWidget(InputWidgetRaw, FloatVector4InputWidget_ui.\n Ui_Form):\n \"\"\"Vector4 data input widget\"\"\"\n\n def __init__(self, **kwds):\n super(FloatVector4InputWidget, self).__init__(**kwds)\n self.setupUi(self)\n self._configSpinBoxes()\n self.dsbX.valueChanged.connect(self._onDataChangedX)\n self.dsbY.valueChanged.connect(self._onDataChangedY)\n self.dsbZ.valueChanged.connect(self._onDataChangedZ)\n self.dsbW.valueChanged.connect(self._onDataChangedW)\n self.pbReset.clicked.connect(self.onResetValue)\n\n def asDataTypeClass(self):\n return pyrr.Vector4([self.dsbX.value(), self.dsbY.value(), self.\n dsbZ.value(), self.dsbW.value()])\n\n def _configSpinBoxes(self):\n self.dsbX.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbY.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbZ.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbW.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbX.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbY.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbZ.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbW.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbX.setDecimals(FLOAT_DECIMALS)\n self.dsbY.setDecimals(FLOAT_DECIMALS)\n self.dsbZ.setDecimals(FLOAT_DECIMALS)\n self.dsbW.setDecimals(FLOAT_DECIMALS)\n\n def _onDataChangedX(self, val):\n v = self.asDataTypeClass()\n v.x = val\n self.dataSetCallback(v)\n\n def _onDataChangedY(self, val):\n v = self.asDataTypeClass()\n v.y = val\n self.dataSetCallback(v)\n\n def _onDataChangedZ(self, val):\n v = self.asDataTypeClass()\n v.z = val\n self.dataSetCallback(v)\n\n def _onDataChangedW(self, val):\n v = self.asDataTypeClass()\n v.w = val\n self.dataSetCallback(v)\n\n def setWidgetValue(self, val):\n self.dsbX.setValue(val.x)\n self.dsbY.setValue(val.y)\n self.dsbZ.setValue(val.z)\n self.dsbW.setValue(val.w)\n\n\nclass QuatInputWidget(FloatVector4InputWidget):\n \"\"\"Quaternion data input widget\"\"\"\n\n def __init__(self, **kwds):\n super(QuatInputWidget, self).__init__(**kwds)\n\n def asDataTypeClass(self):\n return pyrr.Quaternion([self.dsbX.value(), self.dsbY.value(), self.\n dsbZ.value(), self.dsbW.value()])\n\n\nclass Matrix33InputWidget(InputWidgetRaw, Matrix33InputWidget_ui.Ui_Form):\n \"\"\"Matrix33 data input widget\"\"\"\n\n def __init__(self, parent=None, **kwds):\n super(Matrix33InputWidget, self).__init__(parent=parent, **kwds)\n self.setupUi(self)\n self._configSpinBoxes()\n self.dsbm11.valueChanged.connect(self.m11Changed)\n self.dsbm12.valueChanged.connect(self.m12Changed)\n self.dsbm13.valueChanged.connect(self.m13Changed)\n self.dsbm21.valueChanged.connect(self.m21Changed)\n self.dsbm22.valueChanged.connect(self.m22Changed)\n self.dsbm23.valueChanged.connect(self.m23Changed)\n self.dsbm31.valueChanged.connect(self.m31Changed)\n self.dsbm32.valueChanged.connect(self.m32Changed)\n self.dsbm33.valueChanged.connect(self.m33Changed)\n self.pbReset.clicked.connect(self.onResetValue)\n\n def asDataTypeClass(self):\n return pyrr.Matrix33([[self.dsbm11.value(), self.dsbm12.value(),\n self.dsbm13.value()], [self.dsbm21.value(), self.dsbm22.value(),\n self.dsbm23.value()], [self.dsbm31.value(), self.dsbm32.value(),\n self.dsbm33.value()]])\n\n def _configSpinBoxes(self):\n ls = [self.dsbm11, self.dsbm12, self.dsbm13, self.dsbm21, self.\n dsbm22, self.dsbm23, self.dsbm31, self.dsbm32, self.dsbm33]\n for sb in ls:\n sb.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n sb.setSingleStep(FLOAT_SINGLE_STEP)\n sb.setDecimals(FLOAT_DECIMALS)\n\n def m11Changed(self, val):\n m = self.asDataTypeClass()\n m.m11 = val\n self.dataSetCallback(m)\n\n def m12Changed(self, val):\n m = self.asDataTypeClass()\n m.m12 = val\n self.dataSetCallback(m)\n\n def m13Changed(self, val):\n m = self.asDataTypeClass()\n m.m13 = val\n self.dataSetCallback(m)\n\n def m21Changed(self, val):\n m = self.asDataTypeClass()\n m.m21 = val\n self.dataSetCallback(m)\n\n def m22Changed(self, val):\n m = self.asDataTypeClass()\n m.m22 = val\n self.dataSetCallback(m)\n\n def m23Changed(self, val):\n m = self.asDataTypeClass()\n m.m23 = val\n self.dataSetCallback(m)\n\n def m31Changed(self, val):\n m = self.asDataTypeClass()\n m.m31 = val\n self.dataSetCallback(m)\n\n def m32Changed(self, val):\n m = self.asDataTypeClass()\n m.m32 = val\n self.dataSetCallback(m)\n\n def m33Changed(self, val):\n m = self.asDataTypeClass()\n m.m33 = val\n self.dataSetCallback(m)\n\n def setWidgetValue(self, val):\n self.dsbm11.setValue(val.m11)\n self.dsbm12.setValue(val.m12)\n self.dsbm13.setValue(val.m13)\n self.dsbm21.setValue(val.m21)\n self.dsbm22.setValue(val.m22)\n self.dsbm23.setValue(val.m23)\n self.dsbm31.setValue(val.m31)\n self.dsbm32.setValue(val.m32)\n self.dsbm33.setValue(val.m33)\n\n\nclass Matrix44InputWidget(InputWidgetRaw, Matrix44InputWidget_ui.Ui_Form):\n \"\"\"Matrix44 data input widget\"\"\"\n\n def __init__(self, parent=None, **kwds):\n super(Matrix44InputWidget, self).__init__(parent=parent, **kwds)\n self.setupUi(self)\n self._configSpinBoxes()\n self.dsbm11.valueChanged.connect(self.m11Changed)\n self.dsbm12.valueChanged.connect(self.m12Changed)\n self.dsbm13.valueChanged.connect(self.m13Changed)\n self.dsbm14.valueChanged.connect(self.m14Changed)\n self.dsbm21.valueChanged.connect(self.m21Changed)\n self.dsbm22.valueChanged.connect(self.m22Changed)\n self.dsbm23.valueChanged.connect(self.m23Changed)\n self.dsbm24.valueChanged.connect(self.m24Changed)\n self.dsbm31.valueChanged.connect(self.m31Changed)\n self.dsbm32.valueChanged.connect(self.m32Changed)\n self.dsbm33.valueChanged.connect(self.m33Changed)\n self.dsbm34.valueChanged.connect(self.m34Changed)\n self.dsbm41.valueChanged.connect(self.m41Changed)\n self.dsbm42.valueChanged.connect(self.m42Changed)\n self.dsbm43.valueChanged.connect(self.m43Changed)\n self.dsbm44.valueChanged.connect(self.m44Changed)\n self.pbReset.clicked.connect(self.onResetValue)\n\n def asDataTypeClass(self):\n return pyrr.Matrix44([[self.dsbm11.value(), self.dsbm12.value(),\n self.dsbm13.value(), self.dsbm14.value()], [self.dsbm21.value(),\n self.dsbm22.value(), self.dsbm23.value(), self.dsbm24.value()],\n [self.dsbm31.value(), self.dsbm32.value(), self.dsbm33.value(),\n self.dsbm34.value()], [self.dsbm41.value(), self.dsbm42.value(),\n self.dsbm43.value(), self.dsbm44.value()]])\n\n def _configSpinBoxes(self):\n ls = [self.dsbm11, self.dsbm12, self.dsbm13, self.dsbm14, self.\n dsbm21, self.dsbm22, self.dsbm23, self.dsbm24, self.dsbm31,\n self.dsbm32, self.dsbm33, self.dsbm34, self.dsbm41, self.dsbm42,\n self.dsbm43, self.dsbm44]\n for sb in ls:\n sb.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n sb.setSingleStep(FLOAT_SINGLE_STEP)\n sb.setDecimals(FLOAT_DECIMALS)\n\n def m11Changed(self, val):\n m = self.asDataTypeClass()\n m.m11 = val\n self.dataSetCallback(m)\n\n def m12Changed(self, val):\n m = self.asDataTypeClass()\n m.m12 = val\n self.dataSetCallback(m)\n\n def m13Changed(self, val):\n m = self.asDataTypeClass()\n m.m13 = val\n self.dataSetCallback(m)\n\n def m14Changed(self, val):\n m = self.asDataTypeClass()\n m.m14 = val\n self.dataSetCallback(m)\n\n def m21Changed(self, val):\n m = self.asDataTypeClass()\n m.m21 = val\n self.dataSetCallback(m)\n\n def m22Changed(self, val):\n m = self.asDataTypeClass()\n m.m22 = val\n self.dataSetCallback(m)\n\n def m23Changed(self, val):\n m = self.asDataTypeClass()\n m.m23 = val\n self.dataSetCallback(m)\n\n def m24Changed(self, val):\n m = self.asDataTypeClass()\n m.m24 = val\n self.dataSetCallback(m)\n\n def m31Changed(self, val):\n m = self.asDataTypeClass()\n m.m31 = val\n self.dataSetCallback(m)\n\n def m32Changed(self, val):\n m = self.asDataTypeClass()\n m.m32 = val\n self.dataSetCallback(m)\n\n def m33Changed(self, val):\n m = self.asDataTypeClass()\n m.m33 = val\n self.dataSetCallback(m)\n\n def m34Changed(self, val):\n m = self.asDataTypeClass()\n m.m34 = val\n self.dataSetCallback(m)\n\n def m41Changed(self, val):\n m = self.asDataTypeClass()\n m.m41 = val\n self.dataSetCallback(m)\n\n def m42Changed(self, val):\n m = self.asDataTypeClass()\n m.m42 = val\n self.dataSetCallback(m)\n\n def m43Changed(self, val):\n m = self.asDataTypeClass()\n m.m43 = val\n self.dataSetCallback(m)\n\n def m44Changed(self, val):\n m = self.asDataTypeClass()\n m.m44 = val\n self.dataSetCallback(m)\n\n def setWidgetValue(self, val):\n self.dsbm11.setValue(val.m11)\n self.dsbm12.setValue(val.m12)\n self.dsbm13.setValue(val.m13)\n self.dsbm14.setValue(val.m14)\n self.dsbm21.setValue(val.m21)\n self.dsbm22.setValue(val.m22)\n self.dsbm23.setValue(val.m23)\n self.dsbm24.setValue(val.m24)\n self.dsbm31.setValue(val.m31)\n self.dsbm32.setValue(val.m32)\n self.dsbm33.setValue(val.m33)\n self.dsbm34.setValue(val.m34)\n self.dsbm41.setValue(val.m41)\n self.dsbm42.setValue(val.m42)\n self.dsbm43.setValue(val.m43)\n self.dsbm44.setValue(val.m44)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass BoolInputWidget(InputWidgetSingle):\n <mask token>\n\n def __init__(self, parent=None, **kwds):\n super(BoolInputWidget, self).__init__(parent=parent, **kwds)\n self.cb = QCheckBox(self)\n self.setWidget(self.cb)\n self.cb.stateChanged.connect(lambda val: self.dataSetCallback(bool(\n val)))\n <mask token>\n\n\nclass FloatVector3InputWidget(InputWidgetRaw, FloatVector3InputWidget_ui.\n Ui_Form):\n \"\"\"Vector3 data input widget\"\"\"\n\n def __init__(self, **kwds):\n super(FloatVector3InputWidget, self).__init__(**kwds)\n self.setupUi(self)\n self._configSpinBoxes()\n self.dsbX.valueChanged.connect(self._onDataChangedX)\n self.dsbY.valueChanged.connect(self._onDataChangedY)\n self.dsbZ.valueChanged.connect(self._onDataChangedZ)\n self.pbReset.clicked.connect(self.onResetValue)\n\n def asDataTypeClass(self):\n return pyrr.Vector3([self.dsbX.value(), self.dsbY.value(), self.\n dsbZ.value()])\n\n def _configSpinBoxes(self):\n self.dsbX.setDecimals(FLOAT_DECIMALS)\n self.dsbY.setDecimals(FLOAT_DECIMALS)\n self.dsbZ.setDecimals(FLOAT_DECIMALS)\n self.dsbX.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbY.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbZ.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbX.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbY.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbZ.setSingleStep(FLOAT_SINGLE_STEP)\n\n def _onDataChangedX(self, val):\n v = self.asDataTypeClass()\n v.x = val\n self.dataSetCallback(v)\n\n def _onDataChangedY(self, val):\n v = self.asDataTypeClass()\n v.y = val\n self.dataSetCallback(v)\n\n def _onDataChangedZ(self, val):\n v = self.asDataTypeClass()\n v.z = val\n self.dataSetCallback(v)\n\n def setWidgetValue(self, val):\n self.dsbX.setValue(val.x)\n self.dsbY.setValue(val.y)\n self.dsbZ.setValue(val.z)\n\n\nclass FloatVector4InputWidget(InputWidgetRaw, FloatVector4InputWidget_ui.\n Ui_Form):\n \"\"\"Vector4 data input widget\"\"\"\n\n def __init__(self, **kwds):\n super(FloatVector4InputWidget, self).__init__(**kwds)\n self.setupUi(self)\n self._configSpinBoxes()\n self.dsbX.valueChanged.connect(self._onDataChangedX)\n self.dsbY.valueChanged.connect(self._onDataChangedY)\n self.dsbZ.valueChanged.connect(self._onDataChangedZ)\n self.dsbW.valueChanged.connect(self._onDataChangedW)\n self.pbReset.clicked.connect(self.onResetValue)\n\n def asDataTypeClass(self):\n return pyrr.Vector4([self.dsbX.value(), self.dsbY.value(), self.\n dsbZ.value(), self.dsbW.value()])\n\n def _configSpinBoxes(self):\n self.dsbX.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbY.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbZ.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbW.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbX.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbY.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbZ.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbW.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbX.setDecimals(FLOAT_DECIMALS)\n self.dsbY.setDecimals(FLOAT_DECIMALS)\n self.dsbZ.setDecimals(FLOAT_DECIMALS)\n self.dsbW.setDecimals(FLOAT_DECIMALS)\n\n def _onDataChangedX(self, val):\n v = self.asDataTypeClass()\n v.x = val\n self.dataSetCallback(v)\n\n def _onDataChangedY(self, val):\n v = self.asDataTypeClass()\n v.y = val\n self.dataSetCallback(v)\n\n def _onDataChangedZ(self, val):\n v = self.asDataTypeClass()\n v.z = val\n self.dataSetCallback(v)\n\n def _onDataChangedW(self, val):\n v = self.asDataTypeClass()\n v.w = val\n self.dataSetCallback(v)\n\n def setWidgetValue(self, val):\n self.dsbX.setValue(val.x)\n self.dsbY.setValue(val.y)\n self.dsbZ.setValue(val.z)\n self.dsbW.setValue(val.w)\n\n\nclass QuatInputWidget(FloatVector4InputWidget):\n \"\"\"Quaternion data input widget\"\"\"\n\n def __init__(self, **kwds):\n super(QuatInputWidget, self).__init__(**kwds)\n\n def asDataTypeClass(self):\n return pyrr.Quaternion([self.dsbX.value(), self.dsbY.value(), self.\n dsbZ.value(), self.dsbW.value()])\n\n\nclass Matrix33InputWidget(InputWidgetRaw, Matrix33InputWidget_ui.Ui_Form):\n \"\"\"Matrix33 data input widget\"\"\"\n\n def __init__(self, parent=None, **kwds):\n super(Matrix33InputWidget, self).__init__(parent=parent, **kwds)\n self.setupUi(self)\n self._configSpinBoxes()\n self.dsbm11.valueChanged.connect(self.m11Changed)\n self.dsbm12.valueChanged.connect(self.m12Changed)\n self.dsbm13.valueChanged.connect(self.m13Changed)\n self.dsbm21.valueChanged.connect(self.m21Changed)\n self.dsbm22.valueChanged.connect(self.m22Changed)\n self.dsbm23.valueChanged.connect(self.m23Changed)\n self.dsbm31.valueChanged.connect(self.m31Changed)\n self.dsbm32.valueChanged.connect(self.m32Changed)\n self.dsbm33.valueChanged.connect(self.m33Changed)\n self.pbReset.clicked.connect(self.onResetValue)\n\n def asDataTypeClass(self):\n return pyrr.Matrix33([[self.dsbm11.value(), self.dsbm12.value(),\n self.dsbm13.value()], [self.dsbm21.value(), self.dsbm22.value(),\n self.dsbm23.value()], [self.dsbm31.value(), self.dsbm32.value(),\n self.dsbm33.value()]])\n\n def _configSpinBoxes(self):\n ls = [self.dsbm11, self.dsbm12, self.dsbm13, self.dsbm21, self.\n dsbm22, self.dsbm23, self.dsbm31, self.dsbm32, self.dsbm33]\n for sb in ls:\n sb.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n sb.setSingleStep(FLOAT_SINGLE_STEP)\n sb.setDecimals(FLOAT_DECIMALS)\n\n def m11Changed(self, val):\n m = self.asDataTypeClass()\n m.m11 = val\n self.dataSetCallback(m)\n\n def m12Changed(self, val):\n m = self.asDataTypeClass()\n m.m12 = val\n self.dataSetCallback(m)\n\n def m13Changed(self, val):\n m = self.asDataTypeClass()\n m.m13 = val\n self.dataSetCallback(m)\n\n def m21Changed(self, val):\n m = self.asDataTypeClass()\n m.m21 = val\n self.dataSetCallback(m)\n\n def m22Changed(self, val):\n m = self.asDataTypeClass()\n m.m22 = val\n self.dataSetCallback(m)\n\n def m23Changed(self, val):\n m = self.asDataTypeClass()\n m.m23 = val\n self.dataSetCallback(m)\n\n def m31Changed(self, val):\n m = self.asDataTypeClass()\n m.m31 = val\n self.dataSetCallback(m)\n\n def m32Changed(self, val):\n m = self.asDataTypeClass()\n m.m32 = val\n self.dataSetCallback(m)\n\n def m33Changed(self, val):\n m = self.asDataTypeClass()\n m.m33 = val\n self.dataSetCallback(m)\n\n def setWidgetValue(self, val):\n self.dsbm11.setValue(val.m11)\n self.dsbm12.setValue(val.m12)\n self.dsbm13.setValue(val.m13)\n self.dsbm21.setValue(val.m21)\n self.dsbm22.setValue(val.m22)\n self.dsbm23.setValue(val.m23)\n self.dsbm31.setValue(val.m31)\n self.dsbm32.setValue(val.m32)\n self.dsbm33.setValue(val.m33)\n\n\nclass Matrix44InputWidget(InputWidgetRaw, Matrix44InputWidget_ui.Ui_Form):\n \"\"\"Matrix44 data input widget\"\"\"\n\n def __init__(self, parent=None, **kwds):\n super(Matrix44InputWidget, self).__init__(parent=parent, **kwds)\n self.setupUi(self)\n self._configSpinBoxes()\n self.dsbm11.valueChanged.connect(self.m11Changed)\n self.dsbm12.valueChanged.connect(self.m12Changed)\n self.dsbm13.valueChanged.connect(self.m13Changed)\n self.dsbm14.valueChanged.connect(self.m14Changed)\n self.dsbm21.valueChanged.connect(self.m21Changed)\n self.dsbm22.valueChanged.connect(self.m22Changed)\n self.dsbm23.valueChanged.connect(self.m23Changed)\n self.dsbm24.valueChanged.connect(self.m24Changed)\n self.dsbm31.valueChanged.connect(self.m31Changed)\n self.dsbm32.valueChanged.connect(self.m32Changed)\n self.dsbm33.valueChanged.connect(self.m33Changed)\n self.dsbm34.valueChanged.connect(self.m34Changed)\n self.dsbm41.valueChanged.connect(self.m41Changed)\n self.dsbm42.valueChanged.connect(self.m42Changed)\n self.dsbm43.valueChanged.connect(self.m43Changed)\n self.dsbm44.valueChanged.connect(self.m44Changed)\n self.pbReset.clicked.connect(self.onResetValue)\n\n def asDataTypeClass(self):\n return pyrr.Matrix44([[self.dsbm11.value(), self.dsbm12.value(),\n self.dsbm13.value(), self.dsbm14.value()], [self.dsbm21.value(),\n self.dsbm22.value(), self.dsbm23.value(), self.dsbm24.value()],\n [self.dsbm31.value(), self.dsbm32.value(), self.dsbm33.value(),\n self.dsbm34.value()], [self.dsbm41.value(), self.dsbm42.value(),\n self.dsbm43.value(), self.dsbm44.value()]])\n\n def _configSpinBoxes(self):\n ls = [self.dsbm11, self.dsbm12, self.dsbm13, self.dsbm14, self.\n dsbm21, self.dsbm22, self.dsbm23, self.dsbm24, self.dsbm31,\n self.dsbm32, self.dsbm33, self.dsbm34, self.dsbm41, self.dsbm42,\n self.dsbm43, self.dsbm44]\n for sb in ls:\n sb.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n sb.setSingleStep(FLOAT_SINGLE_STEP)\n sb.setDecimals(FLOAT_DECIMALS)\n\n def m11Changed(self, val):\n m = self.asDataTypeClass()\n m.m11 = val\n self.dataSetCallback(m)\n\n def m12Changed(self, val):\n m = self.asDataTypeClass()\n m.m12 = val\n self.dataSetCallback(m)\n\n def m13Changed(self, val):\n m = self.asDataTypeClass()\n m.m13 = val\n self.dataSetCallback(m)\n\n def m14Changed(self, val):\n m = self.asDataTypeClass()\n m.m14 = val\n self.dataSetCallback(m)\n\n def m21Changed(self, val):\n m = self.asDataTypeClass()\n m.m21 = val\n self.dataSetCallback(m)\n\n def m22Changed(self, val):\n m = self.asDataTypeClass()\n m.m22 = val\n self.dataSetCallback(m)\n\n def m23Changed(self, val):\n m = self.asDataTypeClass()\n m.m23 = val\n self.dataSetCallback(m)\n\n def m24Changed(self, val):\n m = self.asDataTypeClass()\n m.m24 = val\n self.dataSetCallback(m)\n\n def m31Changed(self, val):\n m = self.asDataTypeClass()\n m.m31 = val\n self.dataSetCallback(m)\n\n def m32Changed(self, val):\n m = self.asDataTypeClass()\n m.m32 = val\n self.dataSetCallback(m)\n\n def m33Changed(self, val):\n m = self.asDataTypeClass()\n m.m33 = val\n self.dataSetCallback(m)\n\n def m34Changed(self, val):\n m = self.asDataTypeClass()\n m.m34 = val\n self.dataSetCallback(m)\n\n def m41Changed(self, val):\n m = self.asDataTypeClass()\n m.m41 = val\n self.dataSetCallback(m)\n\n def m42Changed(self, val):\n m = self.asDataTypeClass()\n m.m42 = val\n self.dataSetCallback(m)\n\n def m43Changed(self, val):\n m = self.asDataTypeClass()\n m.m43 = val\n self.dataSetCallback(m)\n\n def m44Changed(self, val):\n m = self.asDataTypeClass()\n m.m44 = val\n self.dataSetCallback(m)\n\n def setWidgetValue(self, val):\n self.dsbm11.setValue(val.m11)\n self.dsbm12.setValue(val.m12)\n self.dsbm13.setValue(val.m13)\n self.dsbm14.setValue(val.m14)\n self.dsbm21.setValue(val.m21)\n self.dsbm22.setValue(val.m22)\n self.dsbm23.setValue(val.m23)\n self.dsbm24.setValue(val.m24)\n self.dsbm31.setValue(val.m31)\n self.dsbm32.setValue(val.m32)\n self.dsbm33.setValue(val.m33)\n self.dsbm34.setValue(val.m34)\n self.dsbm41.setValue(val.m41)\n self.dsbm42.setValue(val.m42)\n self.dsbm43.setValue(val.m43)\n self.dsbm44.setValue(val.m44)\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass EnumInputWidget(InputWidgetSingle):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass FloatInputWidget(InputWidgetSingle):\n \"\"\"\n Floating point data input widget\n \"\"\"\n\n def __init__(self, parent=None, **kwds):\n super(FloatInputWidget, self).__init__(parent=parent, **kwds)\n self.sb = QDoubleSpinBox(self)\n _configDoubleSpinBox(self.sb)\n self.setWidget(self.sb)\n self.sb.valueChanged.connect(lambda val: self.dataSetCallback(val))\n\n def setWidgetValue(self, val):\n self.sb.setValue(float(val))\n\n\nclass IntInputWidget(InputWidgetSingle):\n \"\"\"\n Decimal number input widget\n \"\"\"\n\n def __init__(self, parent=None, **kwds):\n super(IntInputWidget, self).__init__(parent=parent, **kwds)\n self.sb = QSpinBox(self)\n _configIntSpinBox(self.sb)\n self.setWidget(self.sb)\n self.sb.valueChanged.connect(lambda val: self.dataSetCallback(val))\n\n def setWidgetValue(self, val):\n self.sb.setValue(int(val))\n\n\nclass NoneInputWidget(InputWidgetSingle):\n \"\"\"\n String data input widget\n \"\"\"\n\n def __init__(self, parent=None, **kwds):\n super(NoneInputWidget, self).__init__(parent=parent, **kwds)\n self.le = QLineEdit(self)\n self.le.setContextMenuPolicy(QtCore.Qt.NoContextMenu)\n self.setWidget(self.le)\n self.le.textChanged.connect(lambda val: self.dataSetCallback(val))\n self.le.setEnabled(False)\n\n def setWidgetValue(self, val):\n self.le.setText(str(val))\n\n\nclass StringInputWidget(InputWidgetSingle):\n \"\"\"\n String data input widget\n \"\"\"\n\n def __init__(self, parent=None, **kwds):\n super(StringInputWidget, self).__init__(parent=parent, **kwds)\n self.le = QLineEdit(self)\n self.le.setContextMenuPolicy(QtCore.Qt.NoContextMenu)\n self.setWidget(self.le)\n self.le.textChanged.connect(lambda val: self.dataSetCallback(val))\n\n def setWidgetValue(self, val):\n self.le.setText(str(val))\n\n\nclass BoolInputWidget(InputWidgetSingle):\n \"\"\"Boolean data input widget\"\"\"\n\n def __init__(self, parent=None, **kwds):\n super(BoolInputWidget, self).__init__(parent=parent, **kwds)\n self.cb = QCheckBox(self)\n self.setWidget(self.cb)\n self.cb.stateChanged.connect(lambda val: self.dataSetCallback(bool(\n val)))\n\n def setWidgetValue(self, val):\n if bool(val):\n self.cb.setCheckState(QtCore.Qt.Checked)\n else:\n self.cb.setCheckState(QtCore.Qt.Unchecked)\n\n\nclass FloatVector3InputWidget(InputWidgetRaw, FloatVector3InputWidget_ui.\n Ui_Form):\n \"\"\"Vector3 data input widget\"\"\"\n\n def __init__(self, **kwds):\n super(FloatVector3InputWidget, self).__init__(**kwds)\n self.setupUi(self)\n self._configSpinBoxes()\n self.dsbX.valueChanged.connect(self._onDataChangedX)\n self.dsbY.valueChanged.connect(self._onDataChangedY)\n self.dsbZ.valueChanged.connect(self._onDataChangedZ)\n self.pbReset.clicked.connect(self.onResetValue)\n\n def asDataTypeClass(self):\n return pyrr.Vector3([self.dsbX.value(), self.dsbY.value(), self.\n dsbZ.value()])\n\n def _configSpinBoxes(self):\n self.dsbX.setDecimals(FLOAT_DECIMALS)\n self.dsbY.setDecimals(FLOAT_DECIMALS)\n self.dsbZ.setDecimals(FLOAT_DECIMALS)\n self.dsbX.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbY.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbZ.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbX.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbY.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbZ.setSingleStep(FLOAT_SINGLE_STEP)\n\n def _onDataChangedX(self, val):\n v = self.asDataTypeClass()\n v.x = val\n self.dataSetCallback(v)\n\n def _onDataChangedY(self, val):\n v = self.asDataTypeClass()\n v.y = val\n self.dataSetCallback(v)\n\n def _onDataChangedZ(self, val):\n v = self.asDataTypeClass()\n v.z = val\n self.dataSetCallback(v)\n\n def setWidgetValue(self, val):\n self.dsbX.setValue(val.x)\n self.dsbY.setValue(val.y)\n self.dsbZ.setValue(val.z)\n\n\nclass FloatVector4InputWidget(InputWidgetRaw, FloatVector4InputWidget_ui.\n Ui_Form):\n \"\"\"Vector4 data input widget\"\"\"\n\n def __init__(self, **kwds):\n super(FloatVector4InputWidget, self).__init__(**kwds)\n self.setupUi(self)\n self._configSpinBoxes()\n self.dsbX.valueChanged.connect(self._onDataChangedX)\n self.dsbY.valueChanged.connect(self._onDataChangedY)\n self.dsbZ.valueChanged.connect(self._onDataChangedZ)\n self.dsbW.valueChanged.connect(self._onDataChangedW)\n self.pbReset.clicked.connect(self.onResetValue)\n\n def asDataTypeClass(self):\n return pyrr.Vector4([self.dsbX.value(), self.dsbY.value(), self.\n dsbZ.value(), self.dsbW.value()])\n\n def _configSpinBoxes(self):\n self.dsbX.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbY.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbZ.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbW.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbX.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbY.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbZ.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbW.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbX.setDecimals(FLOAT_DECIMALS)\n self.dsbY.setDecimals(FLOAT_DECIMALS)\n self.dsbZ.setDecimals(FLOAT_DECIMALS)\n self.dsbW.setDecimals(FLOAT_DECIMALS)\n\n def _onDataChangedX(self, val):\n v = self.asDataTypeClass()\n v.x = val\n self.dataSetCallback(v)\n\n def _onDataChangedY(self, val):\n v = self.asDataTypeClass()\n v.y = val\n self.dataSetCallback(v)\n\n def _onDataChangedZ(self, val):\n v = self.asDataTypeClass()\n v.z = val\n self.dataSetCallback(v)\n\n def _onDataChangedW(self, val):\n v = self.asDataTypeClass()\n v.w = val\n self.dataSetCallback(v)\n\n def setWidgetValue(self, val):\n self.dsbX.setValue(val.x)\n self.dsbY.setValue(val.y)\n self.dsbZ.setValue(val.z)\n self.dsbW.setValue(val.w)\n\n\nclass QuatInputWidget(FloatVector4InputWidget):\n \"\"\"Quaternion data input widget\"\"\"\n\n def __init__(self, **kwds):\n super(QuatInputWidget, self).__init__(**kwds)\n\n def asDataTypeClass(self):\n return pyrr.Quaternion([self.dsbX.value(), self.dsbY.value(), self.\n dsbZ.value(), self.dsbW.value()])\n\n\nclass Matrix33InputWidget(InputWidgetRaw, Matrix33InputWidget_ui.Ui_Form):\n \"\"\"Matrix33 data input widget\"\"\"\n\n def __init__(self, parent=None, **kwds):\n super(Matrix33InputWidget, self).__init__(parent=parent, **kwds)\n self.setupUi(self)\n self._configSpinBoxes()\n self.dsbm11.valueChanged.connect(self.m11Changed)\n self.dsbm12.valueChanged.connect(self.m12Changed)\n self.dsbm13.valueChanged.connect(self.m13Changed)\n self.dsbm21.valueChanged.connect(self.m21Changed)\n self.dsbm22.valueChanged.connect(self.m22Changed)\n self.dsbm23.valueChanged.connect(self.m23Changed)\n self.dsbm31.valueChanged.connect(self.m31Changed)\n self.dsbm32.valueChanged.connect(self.m32Changed)\n self.dsbm33.valueChanged.connect(self.m33Changed)\n self.pbReset.clicked.connect(self.onResetValue)\n\n def asDataTypeClass(self):\n return pyrr.Matrix33([[self.dsbm11.value(), self.dsbm12.value(),\n self.dsbm13.value()], [self.dsbm21.value(), self.dsbm22.value(),\n self.dsbm23.value()], [self.dsbm31.value(), self.dsbm32.value(),\n self.dsbm33.value()]])\n\n def _configSpinBoxes(self):\n ls = [self.dsbm11, self.dsbm12, self.dsbm13, self.dsbm21, self.\n dsbm22, self.dsbm23, self.dsbm31, self.dsbm32, self.dsbm33]\n for sb in ls:\n sb.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n sb.setSingleStep(FLOAT_SINGLE_STEP)\n sb.setDecimals(FLOAT_DECIMALS)\n\n def m11Changed(self, val):\n m = self.asDataTypeClass()\n m.m11 = val\n self.dataSetCallback(m)\n\n def m12Changed(self, val):\n m = self.asDataTypeClass()\n m.m12 = val\n self.dataSetCallback(m)\n\n def m13Changed(self, val):\n m = self.asDataTypeClass()\n m.m13 = val\n self.dataSetCallback(m)\n\n def m21Changed(self, val):\n m = self.asDataTypeClass()\n m.m21 = val\n self.dataSetCallback(m)\n\n def m22Changed(self, val):\n m = self.asDataTypeClass()\n m.m22 = val\n self.dataSetCallback(m)\n\n def m23Changed(self, val):\n m = self.asDataTypeClass()\n m.m23 = val\n self.dataSetCallback(m)\n\n def m31Changed(self, val):\n m = self.asDataTypeClass()\n m.m31 = val\n self.dataSetCallback(m)\n\n def m32Changed(self, val):\n m = self.asDataTypeClass()\n m.m32 = val\n self.dataSetCallback(m)\n\n def m33Changed(self, val):\n m = self.asDataTypeClass()\n m.m33 = val\n self.dataSetCallback(m)\n\n def setWidgetValue(self, val):\n self.dsbm11.setValue(val.m11)\n self.dsbm12.setValue(val.m12)\n self.dsbm13.setValue(val.m13)\n self.dsbm21.setValue(val.m21)\n self.dsbm22.setValue(val.m22)\n self.dsbm23.setValue(val.m23)\n self.dsbm31.setValue(val.m31)\n self.dsbm32.setValue(val.m32)\n self.dsbm33.setValue(val.m33)\n\n\nclass Matrix44InputWidget(InputWidgetRaw, Matrix44InputWidget_ui.Ui_Form):\n \"\"\"Matrix44 data input widget\"\"\"\n\n def __init__(self, parent=None, **kwds):\n super(Matrix44InputWidget, self).__init__(parent=parent, **kwds)\n self.setupUi(self)\n self._configSpinBoxes()\n self.dsbm11.valueChanged.connect(self.m11Changed)\n self.dsbm12.valueChanged.connect(self.m12Changed)\n self.dsbm13.valueChanged.connect(self.m13Changed)\n self.dsbm14.valueChanged.connect(self.m14Changed)\n self.dsbm21.valueChanged.connect(self.m21Changed)\n self.dsbm22.valueChanged.connect(self.m22Changed)\n self.dsbm23.valueChanged.connect(self.m23Changed)\n self.dsbm24.valueChanged.connect(self.m24Changed)\n self.dsbm31.valueChanged.connect(self.m31Changed)\n self.dsbm32.valueChanged.connect(self.m32Changed)\n self.dsbm33.valueChanged.connect(self.m33Changed)\n self.dsbm34.valueChanged.connect(self.m34Changed)\n self.dsbm41.valueChanged.connect(self.m41Changed)\n self.dsbm42.valueChanged.connect(self.m42Changed)\n self.dsbm43.valueChanged.connect(self.m43Changed)\n self.dsbm44.valueChanged.connect(self.m44Changed)\n self.pbReset.clicked.connect(self.onResetValue)\n\n def asDataTypeClass(self):\n return pyrr.Matrix44([[self.dsbm11.value(), self.dsbm12.value(),\n self.dsbm13.value(), self.dsbm14.value()], [self.dsbm21.value(),\n self.dsbm22.value(), self.dsbm23.value(), self.dsbm24.value()],\n [self.dsbm31.value(), self.dsbm32.value(), self.dsbm33.value(),\n self.dsbm34.value()], [self.dsbm41.value(), self.dsbm42.value(),\n self.dsbm43.value(), self.dsbm44.value()]])\n\n def _configSpinBoxes(self):\n ls = [self.dsbm11, self.dsbm12, self.dsbm13, self.dsbm14, self.\n dsbm21, self.dsbm22, self.dsbm23, self.dsbm24, self.dsbm31,\n self.dsbm32, self.dsbm33, self.dsbm34, self.dsbm41, self.dsbm42,\n self.dsbm43, self.dsbm44]\n for sb in ls:\n sb.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n sb.setSingleStep(FLOAT_SINGLE_STEP)\n sb.setDecimals(FLOAT_DECIMALS)\n\n def m11Changed(self, val):\n m = self.asDataTypeClass()\n m.m11 = val\n self.dataSetCallback(m)\n\n def m12Changed(self, val):\n m = self.asDataTypeClass()\n m.m12 = val\n self.dataSetCallback(m)\n\n def m13Changed(self, val):\n m = self.asDataTypeClass()\n m.m13 = val\n self.dataSetCallback(m)\n\n def m14Changed(self, val):\n m = self.asDataTypeClass()\n m.m14 = val\n self.dataSetCallback(m)\n\n def m21Changed(self, val):\n m = self.asDataTypeClass()\n m.m21 = val\n self.dataSetCallback(m)\n\n def m22Changed(self, val):\n m = self.asDataTypeClass()\n m.m22 = val\n self.dataSetCallback(m)\n\n def m23Changed(self, val):\n m = self.asDataTypeClass()\n m.m23 = val\n self.dataSetCallback(m)\n\n def m24Changed(self, val):\n m = self.asDataTypeClass()\n m.m24 = val\n self.dataSetCallback(m)\n\n def m31Changed(self, val):\n m = self.asDataTypeClass()\n m.m31 = val\n self.dataSetCallback(m)\n\n def m32Changed(self, val):\n m = self.asDataTypeClass()\n m.m32 = val\n self.dataSetCallback(m)\n\n def m33Changed(self, val):\n m = self.asDataTypeClass()\n m.m33 = val\n self.dataSetCallback(m)\n\n def m34Changed(self, val):\n m = self.asDataTypeClass()\n m.m34 = val\n self.dataSetCallback(m)\n\n def m41Changed(self, val):\n m = self.asDataTypeClass()\n m.m41 = val\n self.dataSetCallback(m)\n\n def m42Changed(self, val):\n m = self.asDataTypeClass()\n m.m42 = val\n self.dataSetCallback(m)\n\n def m43Changed(self, val):\n m = self.asDataTypeClass()\n m.m43 = val\n self.dataSetCallback(m)\n\n def m44Changed(self, val):\n m = self.asDataTypeClass()\n m.m44 = val\n self.dataSetCallback(m)\n\n def setWidgetValue(self, val):\n self.dsbm11.setValue(val.m11)\n self.dsbm12.setValue(val.m12)\n self.dsbm13.setValue(val.m13)\n self.dsbm14.setValue(val.m14)\n self.dsbm21.setValue(val.m21)\n self.dsbm22.setValue(val.m22)\n self.dsbm23.setValue(val.m23)\n self.dsbm24.setValue(val.m24)\n self.dsbm31.setValue(val.m31)\n self.dsbm32.setValue(val.m32)\n self.dsbm33.setValue(val.m33)\n self.dsbm34.setValue(val.m34)\n self.dsbm41.setValue(val.m41)\n self.dsbm42.setValue(val.m42)\n self.dsbm43.setValue(val.m43)\n self.dsbm44.setValue(val.m44)\n\n\n<mask token>\n", "step-5": "import weakref\nfrom Qt import QtCore\nfrom Qt import QtGui\nfrom Qt.QtWidgets import QDoubleSpinBox\nfrom Qt.QtWidgets import QSpinBox\nfrom Qt.QtWidgets import QWidget\nfrom Qt.QtWidgets import QSpacerItem\nfrom Qt.QtWidgets import QPushButton\nfrom Qt.QtWidgets import QComboBox\nfrom Qt.QtWidgets import QLineEdit\nfrom Qt.QtWidgets import QCheckBox\nfrom Qt.QtWidgets import QGraphicsProxyWidget\nfrom Qt.QtWidgets import QGridLayout\nfrom Qt.QtWidgets import QHBoxLayout\nfrom Qt.QtWidgets import QSizePolicy\nfrom AGraphCommon import *\nfrom AbstractGraph import PinBase\nfrom ..Ui import FloatVector3InputWidget_ui\nfrom ..Ui import FloatVector4InputWidget_ui\nfrom ..Ui import Matrix33InputWidget_ui\nfrom ..Ui import Matrix44InputWidget_ui\nimport pyrr\n\n\ndef _configDoubleSpinBox(sb):\n sb.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n sb.setSingleStep(FLOAT_SINGLE_STEP)\n sb.setDecimals(FLOAT_DECIMALS)\n\n\ndef _configIntSpinBox(sb):\n sb.setRange(INT_RANGE_MIN, INT_RANGE_MAX)\n\n\nclass InputWidgetRaw(QWidget):\n \"\"\"\n This type of widget can be used as a base class for complex ui generated by designer\n \"\"\"\n def __init__(self, parent=None, dataSetCallback=None, defaultValue=None, userStructClass=None, **kwds):\n super(InputWidgetRaw, self).__init__(parent=parent, **kwds)\n self._defaultValue = defaultValue\n # fuction with signature void(object)\n # this will set data to pin\n self.dataSetCallback = dataSetCallback\n\n def onResetValue(self):\n self.setWidgetValue(self._defaultValue)\n\n def setWidgetValue(self, value):\n '''to widget'''\n pass\n\n def widgetValueUpdated(self, value):\n '''from widget'''\n pass\n\n\nclass InputWidgetSingle(InputWidgetRaw):\n \"\"\"\n This type of widget is used for a simple widgets like buttons, checkboxes etc.\n It consists of horizontal layout widget itself and reset button.\n \"\"\"\n\n def __init__(self, parent=None, dataSetCallback=None, defaultValue=None, userStructClass=None, **kwds):\n super(InputWidgetSingle, self).__init__(parent=parent, dataSetCallback=dataSetCallback, defaultValue=defaultValue, userStructClass=userStructClass, **kwds)\n # from widget\n self.bWidgetSet = False\n self.gridLayout = QGridLayout(self)\n self.gridLayout.setSpacing(1)\n self.gridLayout.setContentsMargins(0, 0, 0, 0)\n self.gridLayout.setObjectName(\"gridLayout\")\n self.horizontalLayout = QHBoxLayout()\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n\n spacerItem = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)\n self.horizontalLayout.addItem(spacerItem)\n self.pbReset = QPushButton(self)\n self.pbReset.setMaximumSize(QtCore.QSize(25, 25))\n self.pbReset.setText(\"\")\n self.pbReset.setObjectName(\"pbReset\")\n self.pbReset.setIcon(QtGui.QIcon(\":/icons/resources/reset.png\"))\n self.horizontalLayout.addWidget(self.pbReset)\n self.pbReset.clicked.connect(self.onResetValue)\n\n self.gridLayout.addLayout(self.horizontalLayout, 0, 0, 1, 1)\n self._index = 0\n\n def setWidget(self, widget):\n self.horizontalLayout.insertWidget(self._index, widget)\n\n\nclass ExecInputWidget(InputWidgetSingle):\n \"\"\"docstring for ExecInputWidget\"\"\"\n def __init__(self, parent=None, **kwds):\n super(ExecInputWidget, self).__init__(parent=parent, **kwds)\n self.pb = QPushButton('execute', self)\n self.setWidget(self.pb)\n self.pb.clicked.connect(self.dataSetCallback)\n self.pbReset.deleteLater()\n def setObjectName(self,name):\n super(ExecInputWidget, self).setObjectName(name)\n self.pb.setText(name.split(\".\")[-1])\n\nclass EnumInputWidget(InputWidgetSingle):\n \"\"\"\n Enum input widget\n \"\"\"\n def __init__(self, parent=None, **kwds):\n super(EnumInputWidget, self).__init__(parent=parent, **kwds)\n # self._userStruct = kwds['userStructClass']\n self.cb = QComboBox(self)\n self.setWidget(self.cb)\n for i in list(kwds['userStructClass']):\n self.cb.addItem(i.name, i.value)\n self.cb.currentIndexChanged[int].connect(self.dataSetCallback)\n\n def setWidgetValue(self, val):\n self.cb.setCurrentIndex(val)\n\n\nclass FloatInputWidget(InputWidgetSingle):\n \"\"\"\n Floating point data input widget\n \"\"\"\n\n def __init__(self, parent=None, **kwds):\n super(FloatInputWidget, self).__init__(parent=parent, **kwds)\n self.sb = QDoubleSpinBox(self)\n _configDoubleSpinBox(self.sb)\n self.setWidget(self.sb)\n # when spin box updated call setter function\n self.sb.valueChanged.connect(lambda val: self.dataSetCallback(val))\n\n def setWidgetValue(self, val):\n self.sb.setValue(float(val))\n\n\nclass IntInputWidget(InputWidgetSingle):\n \"\"\"\n Decimal number input widget\n \"\"\"\n def __init__(self, parent=None, **kwds):\n super(IntInputWidget, self).__init__(parent=parent, **kwds)\n self.sb = QSpinBox(self)\n _configIntSpinBox(self.sb)\n self.setWidget(self.sb)\n self.sb.valueChanged.connect(lambda val: self.dataSetCallback(val))\n\n def setWidgetValue(self, val):\n self.sb.setValue(int(val))\n\n\nclass NoneInputWidget(InputWidgetSingle):\n \"\"\"\n String data input widget\n \"\"\"\n def __init__(self, parent=None, **kwds):\n super(NoneInputWidget, self).__init__(parent=parent, **kwds)\n self.le = QLineEdit(self)\n self.le.setContextMenuPolicy(QtCore.Qt.NoContextMenu)\n self.setWidget(self.le)\n self.le.textChanged.connect(lambda val: self.dataSetCallback(val))\n self.le.setEnabled(False)\n\n def setWidgetValue(self, val):\n self.le.setText(str(val))\n\nclass StringInputWidget(InputWidgetSingle):\n \"\"\"\n String data input widget\n \"\"\"\n def __init__(self, parent=None, **kwds):\n super(StringInputWidget, self).__init__(parent=parent, **kwds)\n self.le = QLineEdit(self)\n self.le.setContextMenuPolicy(QtCore.Qt.NoContextMenu)\n self.setWidget(self.le)\n self.le.textChanged.connect(lambda val: self.dataSetCallback(val))\n\n def setWidgetValue(self, val):\n self.le.setText(str(val))\n\n\nclass BoolInputWidget(InputWidgetSingle):\n \"\"\"Boolean data input widget\"\"\"\n def __init__(self, parent=None, **kwds):\n super(BoolInputWidget, self).__init__(parent=parent, **kwds)\n self.cb = QCheckBox(self)\n self.setWidget(self.cb)\n self.cb.stateChanged.connect(lambda val: self.dataSetCallback(bool(val)))\n\n def setWidgetValue(self, val):\n if bool(val):\n self.cb.setCheckState(QtCore.Qt.Checked)\n else:\n self.cb.setCheckState(QtCore.Qt.Unchecked)\n\n\nclass FloatVector3InputWidget(InputWidgetRaw, FloatVector3InputWidget_ui.Ui_Form):\n \"\"\"Vector3 data input widget\"\"\"\n def __init__(self, **kwds):\n super(FloatVector3InputWidget, self).__init__(**kwds)\n self.setupUi(self)\n self._configSpinBoxes()\n self.dsbX.valueChanged.connect(self._onDataChangedX)\n self.dsbY.valueChanged.connect(self._onDataChangedY)\n self.dsbZ.valueChanged.connect(self._onDataChangedZ)\n self.pbReset.clicked.connect(self.onResetValue)\n\n def asDataTypeClass(self):\n return pyrr.Vector3([self.dsbX.value(), self.dsbY.value(), self.dsbZ.value()])\n\n def _configSpinBoxes(self):\n self.dsbX.setDecimals(FLOAT_DECIMALS)\n self.dsbY.setDecimals(FLOAT_DECIMALS)\n self.dsbZ.setDecimals(FLOAT_DECIMALS)\n\n self.dsbX.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbY.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbZ.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n\n self.dsbX.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbY.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbZ.setSingleStep(FLOAT_SINGLE_STEP)\n\n def _onDataChangedX(self, val):\n v = self.asDataTypeClass()\n v.x = val\n self.dataSetCallback(v)\n\n def _onDataChangedY(self, val):\n v = self.asDataTypeClass()\n v.y = val\n self.dataSetCallback(v)\n\n def _onDataChangedZ(self, val):\n v = self.asDataTypeClass()\n v.z = val\n self.dataSetCallback(v)\n\n def setWidgetValue(self, val):\n self.dsbX.setValue(val.x)\n self.dsbY.setValue(val.y)\n self.dsbZ.setValue(val.z)\n\n\nclass FloatVector4InputWidget(InputWidgetRaw, FloatVector4InputWidget_ui.Ui_Form):\n \"\"\"Vector4 data input widget\"\"\"\n def __init__(self, **kwds):\n super(FloatVector4InputWidget, self).__init__(**kwds)\n self.setupUi(self)\n self._configSpinBoxes()\n self.dsbX.valueChanged.connect(self._onDataChangedX)\n self.dsbY.valueChanged.connect(self._onDataChangedY)\n self.dsbZ.valueChanged.connect(self._onDataChangedZ)\n self.dsbW.valueChanged.connect(self._onDataChangedW)\n self.pbReset.clicked.connect(self.onResetValue)\n\n def asDataTypeClass(self):\n return pyrr.Vector4([self.dsbX.value(), self.dsbY.value(), self.dsbZ.value(), self.dsbW.value()])\n\n def _configSpinBoxes(self):\n self.dsbX.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbY.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbZ.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbW.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n self.dsbX.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbY.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbZ.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbW.setSingleStep(FLOAT_SINGLE_STEP)\n self.dsbX.setDecimals(FLOAT_DECIMALS)\n self.dsbY.setDecimals(FLOAT_DECIMALS)\n self.dsbZ.setDecimals(FLOAT_DECIMALS)\n self.dsbW.setDecimals(FLOAT_DECIMALS)\n\n def _onDataChangedX(self, val):\n v = self.asDataTypeClass()\n v.x = val\n self.dataSetCallback(v)\n\n def _onDataChangedY(self, val):\n v = self.asDataTypeClass()\n v.y = val\n self.dataSetCallback(v)\n\n def _onDataChangedZ(self, val):\n v = self.asDataTypeClass()\n v.z = val\n self.dataSetCallback(v)\n\n def _onDataChangedW(self, val):\n v = self.asDataTypeClass()\n v.w = val\n self.dataSetCallback(v)\n\n def setWidgetValue(self, val):\n self.dsbX.setValue(val.x)\n self.dsbY.setValue(val.y)\n self.dsbZ.setValue(val.z)\n self.dsbW.setValue(val.w)\n\n\nclass QuatInputWidget(FloatVector4InputWidget):\n \"\"\"Quaternion data input widget\"\"\"\n def __init__(self, **kwds):\n super(QuatInputWidget, self).__init__(**kwds)\n\n def asDataTypeClass(self):\n return pyrr.Quaternion([self.dsbX.value(), self.dsbY.value(), self.dsbZ.value(), self.dsbW.value()])\n\n\nclass Matrix33InputWidget(InputWidgetRaw, Matrix33InputWidget_ui.Ui_Form):\n \"\"\"Matrix33 data input widget\"\"\"\n def __init__(self, parent=None, **kwds):\n super(Matrix33InputWidget, self).__init__(parent=parent, **kwds)\n self.setupUi(self)\n self._configSpinBoxes()\n\n self.dsbm11.valueChanged.connect(self.m11Changed)\n self.dsbm12.valueChanged.connect(self.m12Changed)\n self.dsbm13.valueChanged.connect(self.m13Changed)\n\n self.dsbm21.valueChanged.connect(self.m21Changed)\n self.dsbm22.valueChanged.connect(self.m22Changed)\n self.dsbm23.valueChanged.connect(self.m23Changed)\n\n self.dsbm31.valueChanged.connect(self.m31Changed)\n self.dsbm32.valueChanged.connect(self.m32Changed)\n self.dsbm33.valueChanged.connect(self.m33Changed)\n\n self.pbReset.clicked.connect(self.onResetValue)\n\n def asDataTypeClass(self):\n return pyrr.Matrix33([\n [self.dsbm11.value(), self.dsbm12.value(), self.dsbm13.value()],\n [self.dsbm21.value(), self.dsbm22.value(), self.dsbm23.value()],\n [self.dsbm31.value(), self.dsbm32.value(), self.dsbm33.value()]\n ])\n\n def _configSpinBoxes(self):\n ls = [self.dsbm11, self.dsbm12, self.dsbm13,\n self.dsbm21, self.dsbm22, self.dsbm23,\n self.dsbm31, self.dsbm32, self.dsbm33]\n for sb in ls:\n sb.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n sb.setSingleStep(FLOAT_SINGLE_STEP)\n sb.setDecimals(FLOAT_DECIMALS)\n\n def m11Changed(self, val):\n m = self.asDataTypeClass()\n m.m11 = val\n self.dataSetCallback(m)\n\n def m12Changed(self, val):\n m = self.asDataTypeClass()\n m.m12 = val\n self.dataSetCallback(m)\n\n def m13Changed(self, val):\n m = self.asDataTypeClass()\n m.m13 = val\n self.dataSetCallback(m)\n\n def m21Changed(self, val):\n m = self.asDataTypeClass()\n m.m21 = val\n self.dataSetCallback(m)\n\n def m22Changed(self, val):\n m = self.asDataTypeClass()\n m.m22 = val\n self.dataSetCallback(m)\n\n def m23Changed(self, val):\n m = self.asDataTypeClass()\n m.m23 = val\n self.dataSetCallback(m)\n\n def m31Changed(self, val):\n m = self.asDataTypeClass()\n m.m31 = val\n self.dataSetCallback(m)\n\n def m32Changed(self, val):\n m = self.asDataTypeClass()\n m.m32 = val\n self.dataSetCallback(m)\n\n def m33Changed(self, val):\n m = self.asDataTypeClass()\n m.m33 = val\n self.dataSetCallback(m)\n\n def setWidgetValue(self, val):\n self.dsbm11.setValue(val.m11)\n self.dsbm12.setValue(val.m12)\n self.dsbm13.setValue(val.m13)\n\n self.dsbm21.setValue(val.m21)\n self.dsbm22.setValue(val.m22)\n self.dsbm23.setValue(val.m23)\n\n self.dsbm31.setValue(val.m31)\n self.dsbm32.setValue(val.m32)\n self.dsbm33.setValue(val.m33)\n\n\nclass Matrix44InputWidget(InputWidgetRaw, Matrix44InputWidget_ui.Ui_Form):\n \"\"\"Matrix44 data input widget\"\"\"\n def __init__(self, parent=None, **kwds):\n super(Matrix44InputWidget, self).__init__(parent=parent, **kwds)\n self.setupUi(self)\n self._configSpinBoxes()\n\n self.dsbm11.valueChanged.connect(self.m11Changed)\n self.dsbm12.valueChanged.connect(self.m12Changed)\n self.dsbm13.valueChanged.connect(self.m13Changed)\n self.dsbm14.valueChanged.connect(self.m14Changed)\n\n self.dsbm21.valueChanged.connect(self.m21Changed)\n self.dsbm22.valueChanged.connect(self.m22Changed)\n self.dsbm23.valueChanged.connect(self.m23Changed)\n self.dsbm24.valueChanged.connect(self.m24Changed)\n\n self.dsbm31.valueChanged.connect(self.m31Changed)\n self.dsbm32.valueChanged.connect(self.m32Changed)\n self.dsbm33.valueChanged.connect(self.m33Changed)\n self.dsbm34.valueChanged.connect(self.m34Changed)\n\n self.dsbm41.valueChanged.connect(self.m41Changed)\n self.dsbm42.valueChanged.connect(self.m42Changed)\n self.dsbm43.valueChanged.connect(self.m43Changed)\n self.dsbm44.valueChanged.connect(self.m44Changed)\n\n self.pbReset.clicked.connect(self.onResetValue)\n\n def asDataTypeClass(self):\n return pyrr.Matrix44([\n [self.dsbm11.value(), self.dsbm12.value(), self.dsbm13.value(), self.dsbm14.value()],\n [self.dsbm21.value(), self.dsbm22.value(), self.dsbm23.value(), self.dsbm24.value()],\n [self.dsbm31.value(), self.dsbm32.value(), self.dsbm33.value(), self.dsbm34.value()],\n [self.dsbm41.value(), self.dsbm42.value(), self.dsbm43.value(), self.dsbm44.value()]\n ])\n\n def _configSpinBoxes(self):\n ls = [self.dsbm11, self.dsbm12, self.dsbm13, self.dsbm14,\n self.dsbm21, self.dsbm22, self.dsbm23, self.dsbm24,\n self.dsbm31, self.dsbm32, self.dsbm33, self.dsbm34,\n self.dsbm41, self.dsbm42, self.dsbm43, self.dsbm44]\n for sb in ls:\n sb.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)\n sb.setSingleStep(FLOAT_SINGLE_STEP)\n sb.setDecimals(FLOAT_DECIMALS)\n\n def m11Changed(self, val):\n m = self.asDataTypeClass()\n m.m11 = val\n self.dataSetCallback(m)\n\n def m12Changed(self, val):\n m = self.asDataTypeClass()\n m.m12 = val\n self.dataSetCallback(m)\n\n def m13Changed(self, val):\n m = self.asDataTypeClass()\n m.m13 = val\n self.dataSetCallback(m)\n\n def m14Changed(self, val):\n m = self.asDataTypeClass()\n m.m14 = val\n self.dataSetCallback(m)\n\n def m21Changed(self, val):\n m = self.asDataTypeClass()\n m.m21 = val\n self.dataSetCallback(m)\n\n def m22Changed(self, val):\n m = self.asDataTypeClass()\n m.m22 = val\n self.dataSetCallback(m)\n\n def m23Changed(self, val):\n m = self.asDataTypeClass()\n m.m23 = val\n self.dataSetCallback(m)\n\n def m24Changed(self, val):\n m = self.asDataTypeClass()\n m.m24 = val\n self.dataSetCallback(m)\n\n def m31Changed(self, val):\n m = self.asDataTypeClass()\n m.m31 = val\n self.dataSetCallback(m)\n\n def m32Changed(self, val):\n m = self.asDataTypeClass()\n m.m32 = val\n self.dataSetCallback(m)\n\n def m33Changed(self, val):\n m = self.asDataTypeClass()\n m.m33 = val\n self.dataSetCallback(m)\n\n def m34Changed(self, val):\n m = self.asDataTypeClass()\n m.m34 = val\n self.dataSetCallback(m)\n\n def m41Changed(self, val):\n m = self.asDataTypeClass()\n m.m41 = val\n self.dataSetCallback(m)\n\n def m42Changed(self, val):\n m = self.asDataTypeClass()\n m.m42 = val\n self.dataSetCallback(m)\n\n def m43Changed(self, val):\n m = self.asDataTypeClass()\n m.m43 = val\n self.dataSetCallback(m)\n\n def m44Changed(self, val):\n m = self.asDataTypeClass()\n m.m44 = val\n self.dataSetCallback(m)\n\n def setWidgetValue(self, val):\n self.dsbm11.setValue(val.m11)\n self.dsbm12.setValue(val.m12)\n self.dsbm13.setValue(val.m13)\n self.dsbm14.setValue(val.m14)\n\n self.dsbm21.setValue(val.m21)\n self.dsbm22.setValue(val.m22)\n self.dsbm23.setValue(val.m23)\n self.dsbm24.setValue(val.m24)\n\n self.dsbm31.setValue(val.m31)\n self.dsbm32.setValue(val.m32)\n self.dsbm33.setValue(val.m33)\n self.dsbm34.setValue(val.m34)\n\n self.dsbm41.setValue(val.m41)\n self.dsbm42.setValue(val.m42)\n self.dsbm43.setValue(val.m43)\n self.dsbm44.setValue(val.m44)\n\n\ndef getInputWidget(dataType, dataSetter, defaultValue, userStructClass):\n '''\n factory method\n '''\n if dataType == DataTypes.Float:\n return FloatInputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue)\n if dataType == DataTypes.Int:\n return IntInputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue)\n if dataType == DataTypes.String:\n return StringInputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue)\n if dataType == DataTypes.Bool:\n return BoolInputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue)\n if dataType == DataTypes.FloatVector3:\n return FloatVector3InputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue)\n if dataType == DataTypes.FloatVector4:\n return FloatVector4InputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue)\n if dataType == DataTypes.Quaternion:\n return QuatInputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue)\n if dataType == DataTypes.Matrix33:\n return Matrix33InputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue)\n if dataType == DataTypes.Matrix44:\n return Matrix44InputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue)\n if dataType == DataTypes.Exec:\n return ExecInputWidget(dataSetCallback=dataSetter, defaultValue=None)\n if dataType == DataTypes.Enum:\n return EnumInputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue, userStructClass=userStructClass)\n \n return NoneInputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue)\n", "step-ids": [ 59, 60, 62, 81, 103 ] }
[ 59, 60, 62, 81, 103 ]
import mysql.connector # config = { # "user":"root", # "password":"Sm13481353", # "host":"3" # } mydb = mysql.connector.connect( user="seyed", password="Sm13481353", host="localhost", database="telegram_bot", auth_plugin="mysql_native_password" ) mycursor = mydb.cursor() query = "insert into question(update_id,chat_id) values (40,20)" # mycursor.execute(query) # mydb.commit() mycursor.execute("select * from question") users = mycursor.fetchall() for user in users: print(user)
normal
{ "blob_id": "a29a904290cb733ac7b526a75e0c218b952e2266", "index": 4630, "step-1": "<mask token>\n", "step-2": "<mask token>\nmycursor.execute('select * from question')\n<mask token>\nfor user in users:\n print(user)\n", "step-3": "<mask token>\nmydb = mysql.connector.connect(user='seyed', password='Sm13481353', host=\n 'localhost', database='telegram_bot', auth_plugin='mysql_native_password')\nmycursor = mydb.cursor()\nquery = 'insert into question(update_id,chat_id) values (40,20)'\nmycursor.execute('select * from question')\nusers = mycursor.fetchall()\nfor user in users:\n print(user)\n", "step-4": "import mysql.connector\nmydb = mysql.connector.connect(user='seyed', password='Sm13481353', host=\n 'localhost', database='telegram_bot', auth_plugin='mysql_native_password')\nmycursor = mydb.cursor()\nquery = 'insert into question(update_id,chat_id) values (40,20)'\nmycursor.execute('select * from question')\nusers = mycursor.fetchall()\nfor user in users:\n print(user)\n", "step-5": "import mysql.connector\n# config = {\n# \"user\":\"root\",\n# \"password\":\"Sm13481353\",\n# \"host\":\"3\"\n# }\nmydb = mysql.connector.connect(\n user=\"seyed\",\n password=\"Sm13481353\",\n host=\"localhost\",\n database=\"telegram_bot\",\n auth_plugin=\"mysql_native_password\"\n )\nmycursor = mydb.cursor()\nquery = \"insert into question(update_id,chat_id) values (40,20)\"\n# mycursor.execute(query)\n# mydb.commit()\nmycursor.execute(\"select * from question\")\nusers = mycursor.fetchall()\nfor user in users:\n print(user)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import pandas as pd import tensorflow as tf import autokeras as ak import numpy as np import matplotlib.pyplot as plt import pandas as pd import tensorflow as tf from numpy import concatenate from pandas import read_csv, DataFrame, concat from sklearn.preprocessing import MinMaxScaler np.set_printoptions(suppress=True) EPOCHS = 10 BATCH_SIZE = 128 SHIFT_DAYS = 3 PRED_STEPS = 24*6 #48hr * 10분단위 예측 TIME_STEPS = SHIFT_DAYS*PRED_STEPS #hours step DIMENSION = 15 MODEL_NUM = 10 CAPACITY = 89.7 TRAIN_RATIO = 0.6 VAL_RATIO = 0.2 START_DATE = '2021012899' END_DATE = '2021042924' SAVE_PATH = './data/' SAVE_NAME = 'autoML_Test' def getData(): # power power_file = './data/power_20210129_20210429_preprocess_1hour' power_df = read_csv(power_file+'.csv', encoding='CP949', converters={'date':int}) print(power_df.shape) # sensor sensor_file = 'data/sensor_20210129_20210429_preprocess_1hour' sensor_df = read_csv(sensor_file+'.csv', encoding='CP949', converters={'date':int}) sensor_df = sensor_df.sort_values('date') print(sensor_df.shape) # scale power_df.drop(['date'], axis=1, inplace=True) pow_scaler = MinMaxScaler(feature_range = (0, 1)) scaled_pow = pow_scaler.fit_transform(power_df.values) power_scaleddf = pd.DataFrame(scaled_pow, columns=power_df.columns, index=list(power_df.index.values)) weather_df = sensor_df.copy() weather_df.drop(['date'], axis=1, inplace=True) weather_scaler = MinMaxScaler(feature_range = (0, 1))#scale scaled_weather = weather_scaler.fit_transform(weather_df.values) weather_scaleddf = pd.DataFrame(scaled_weather, columns=weather_df.columns, index=list(weather_df.index.values)) # JOIN df = weather_scaleddf.copy() # pow + weather + powY df.insert(0, 'pow', power_scaleddf.values, True) #df = df.iloc[0:-TIME_STEPS, :] #df.insert(df.shape[1], 'pow_Y', power_scaleddf.iloc[TIME_STEPS:, :].values, True) #df.insert(df.shape[1], 'pow_Y', power_scaleddf.iloc[TIME_STEPS:, :].values, True) #df.to_csv(SAVE_PATH+"total_scaled"+SAVE_NAME+".csv",mode='w',index=False, encoding='CP949') #display(df) return pow_scaler, df pow_scaler, df = getData() #display(df) dataset = df val_split = int(len(dataset) * 0.7) data_train = dataset[:val_split] validation_data = dataset[val_split:] data_x = data_train[ [ 'pow', 'temp', 'humidity', 'windspeed', 'windgust', 'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin', 'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv', 'feelslike', 'dewpoint', 'outside_status' ] ].astype("float64") data_x_val = validation_data[ [ 'pow', 'temp', 'humidity', 'windspeed', 'windgust', 'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin', 'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv', 'feelslike', 'dewpoint', 'outside_status' ] ].astype("float64") # Data with train data and the unseen data from subsequent time steps. data_x_test = dataset[ [ 'pow', 'temp', 'humidity', 'windspeed', 'windgust', 'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin', 'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv', 'feelslike', 'dewpoint', 'outside_status' ] ].astype("float64") data_y = data_train["pow"].astype("float64") data_y_val = validation_data["pow"].astype("float64") print(data_x.shape) # (6549, 12) print(data_y.shape) # (6549,) predict_from = 1 predict_until = 10 lookback = 3 clf = ak.TimeseriesForecaster( lookback=lookback, predict_from=predict_from, #predict_until=predict_until, #max_trials=1, objective="val_loss", ) # Train the TimeSeriesForecaster with train data clf.fit( x=data_x, y=data_y, validation_data=(data_x_val, data_y_val), batch_size=128, epochs=10, ) # Predict with the best model(includes original training data). predictions = clf.predict(data_x_test) print(predictions.shape) # Evaluate the best model with testing data. print(clf.evaluate(data_x_val, data_y_val))
normal
{ "blob_id": "013189cd67cc44efd539c75ed235a0753d95f54e", "index": 2165, "step-1": "<mask token>\n\n\ndef getData():\n power_file = './data/power_20210129_20210429_preprocess_1hour'\n power_df = read_csv(power_file + '.csv', encoding='CP949', converters={\n 'date': int})\n print(power_df.shape)\n sensor_file = 'data/sensor_20210129_20210429_preprocess_1hour'\n sensor_df = read_csv(sensor_file + '.csv', encoding='CP949', converters\n ={'date': int})\n sensor_df = sensor_df.sort_values('date')\n print(sensor_df.shape)\n power_df.drop(['date'], axis=1, inplace=True)\n pow_scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_pow = pow_scaler.fit_transform(power_df.values)\n power_scaleddf = pd.DataFrame(scaled_pow, columns=power_df.columns,\n index=list(power_df.index.values))\n weather_df = sensor_df.copy()\n weather_df.drop(['date'], axis=1, inplace=True)\n weather_scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_weather = weather_scaler.fit_transform(weather_df.values)\n weather_scaleddf = pd.DataFrame(scaled_weather, columns=weather_df.\n columns, index=list(weather_df.index.values))\n df = weather_scaleddf.copy()\n df.insert(0, 'pow', power_scaleddf.values, True)\n return pow_scaler, df\n\n\n<mask token>\n", "step-2": "<mask token>\nnp.set_printoptions(suppress=True)\n<mask token>\n\n\ndef getData():\n power_file = './data/power_20210129_20210429_preprocess_1hour'\n power_df = read_csv(power_file + '.csv', encoding='CP949', converters={\n 'date': int})\n print(power_df.shape)\n sensor_file = 'data/sensor_20210129_20210429_preprocess_1hour'\n sensor_df = read_csv(sensor_file + '.csv', encoding='CP949', converters\n ={'date': int})\n sensor_df = sensor_df.sort_values('date')\n print(sensor_df.shape)\n power_df.drop(['date'], axis=1, inplace=True)\n pow_scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_pow = pow_scaler.fit_transform(power_df.values)\n power_scaleddf = pd.DataFrame(scaled_pow, columns=power_df.columns,\n index=list(power_df.index.values))\n weather_df = sensor_df.copy()\n weather_df.drop(['date'], axis=1, inplace=True)\n weather_scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_weather = weather_scaler.fit_transform(weather_df.values)\n weather_scaleddf = pd.DataFrame(scaled_weather, columns=weather_df.\n columns, index=list(weather_df.index.values))\n df = weather_scaleddf.copy()\n df.insert(0, 'pow', power_scaleddf.values, True)\n return pow_scaler, df\n\n\n<mask token>\nprint(data_x.shape)\nprint(data_y.shape)\n<mask token>\nclf.fit(x=data_x, y=data_y, validation_data=(data_x_val, data_y_val),\n batch_size=128, epochs=10)\n<mask token>\nprint(predictions.shape)\nprint(clf.evaluate(data_x_val, data_y_val))\n", "step-3": "<mask token>\nnp.set_printoptions(suppress=True)\nEPOCHS = 10\nBATCH_SIZE = 128\nSHIFT_DAYS = 3\nPRED_STEPS = 24 * 6\nTIME_STEPS = SHIFT_DAYS * PRED_STEPS\nDIMENSION = 15\nMODEL_NUM = 10\nCAPACITY = 89.7\nTRAIN_RATIO = 0.6\nVAL_RATIO = 0.2\nSTART_DATE = '2021012899'\nEND_DATE = '2021042924'\nSAVE_PATH = './data/'\nSAVE_NAME = 'autoML_Test'\n\n\ndef getData():\n power_file = './data/power_20210129_20210429_preprocess_1hour'\n power_df = read_csv(power_file + '.csv', encoding='CP949', converters={\n 'date': int})\n print(power_df.shape)\n sensor_file = 'data/sensor_20210129_20210429_preprocess_1hour'\n sensor_df = read_csv(sensor_file + '.csv', encoding='CP949', converters\n ={'date': int})\n sensor_df = sensor_df.sort_values('date')\n print(sensor_df.shape)\n power_df.drop(['date'], axis=1, inplace=True)\n pow_scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_pow = pow_scaler.fit_transform(power_df.values)\n power_scaleddf = pd.DataFrame(scaled_pow, columns=power_df.columns,\n index=list(power_df.index.values))\n weather_df = sensor_df.copy()\n weather_df.drop(['date'], axis=1, inplace=True)\n weather_scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_weather = weather_scaler.fit_transform(weather_df.values)\n weather_scaleddf = pd.DataFrame(scaled_weather, columns=weather_df.\n columns, index=list(weather_df.index.values))\n df = weather_scaleddf.copy()\n df.insert(0, 'pow', power_scaleddf.values, True)\n return pow_scaler, df\n\n\npow_scaler, df = getData()\ndataset = df\nval_split = int(len(dataset) * 0.7)\ndata_train = dataset[:val_split]\nvalidation_data = dataset[val_split:]\ndata_x = data_train[['pow', 'temp', 'humidity', 'windspeed', 'windgust',\n 'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',\n 'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',\n 'feelslike', 'dewpoint', 'outside_status']].astype('float64')\ndata_x_val = validation_data[['pow', 'temp', 'humidity', 'windspeed',\n 'windgust', 'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',\n 'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',\n 'feelslike', 'dewpoint', 'outside_status']].astype('float64')\ndata_x_test = dataset[['pow', 'temp', 'humidity', 'windspeed', 'windgust',\n 'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',\n 'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',\n 'feelslike', 'dewpoint', 'outside_status']].astype('float64')\ndata_y = data_train['pow'].astype('float64')\ndata_y_val = validation_data['pow'].astype('float64')\nprint(data_x.shape)\nprint(data_y.shape)\npredict_from = 1\npredict_until = 10\nlookback = 3\nclf = ak.TimeseriesForecaster(lookback=lookback, predict_from=predict_from,\n objective='val_loss')\nclf.fit(x=data_x, y=data_y, validation_data=(data_x_val, data_y_val),\n batch_size=128, epochs=10)\npredictions = clf.predict(data_x_test)\nprint(predictions.shape)\nprint(clf.evaluate(data_x_val, data_y_val))\n", "step-4": "import pandas as pd\nimport tensorflow as tf\nimport autokeras as ak\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport tensorflow as tf\nfrom numpy import concatenate\nfrom pandas import read_csv, DataFrame, concat\nfrom sklearn.preprocessing import MinMaxScaler\nnp.set_printoptions(suppress=True)\nEPOCHS = 10\nBATCH_SIZE = 128\nSHIFT_DAYS = 3\nPRED_STEPS = 24 * 6\nTIME_STEPS = SHIFT_DAYS * PRED_STEPS\nDIMENSION = 15\nMODEL_NUM = 10\nCAPACITY = 89.7\nTRAIN_RATIO = 0.6\nVAL_RATIO = 0.2\nSTART_DATE = '2021012899'\nEND_DATE = '2021042924'\nSAVE_PATH = './data/'\nSAVE_NAME = 'autoML_Test'\n\n\ndef getData():\n power_file = './data/power_20210129_20210429_preprocess_1hour'\n power_df = read_csv(power_file + '.csv', encoding='CP949', converters={\n 'date': int})\n print(power_df.shape)\n sensor_file = 'data/sensor_20210129_20210429_preprocess_1hour'\n sensor_df = read_csv(sensor_file + '.csv', encoding='CP949', converters\n ={'date': int})\n sensor_df = sensor_df.sort_values('date')\n print(sensor_df.shape)\n power_df.drop(['date'], axis=1, inplace=True)\n pow_scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_pow = pow_scaler.fit_transform(power_df.values)\n power_scaleddf = pd.DataFrame(scaled_pow, columns=power_df.columns,\n index=list(power_df.index.values))\n weather_df = sensor_df.copy()\n weather_df.drop(['date'], axis=1, inplace=True)\n weather_scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_weather = weather_scaler.fit_transform(weather_df.values)\n weather_scaleddf = pd.DataFrame(scaled_weather, columns=weather_df.\n columns, index=list(weather_df.index.values))\n df = weather_scaleddf.copy()\n df.insert(0, 'pow', power_scaleddf.values, True)\n return pow_scaler, df\n\n\npow_scaler, df = getData()\ndataset = df\nval_split = int(len(dataset) * 0.7)\ndata_train = dataset[:val_split]\nvalidation_data = dataset[val_split:]\ndata_x = data_train[['pow', 'temp', 'humidity', 'windspeed', 'windgust',\n 'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',\n 'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',\n 'feelslike', 'dewpoint', 'outside_status']].astype('float64')\ndata_x_val = validation_data[['pow', 'temp', 'humidity', 'windspeed',\n 'windgust', 'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',\n 'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',\n 'feelslike', 'dewpoint', 'outside_status']].astype('float64')\ndata_x_test = dataset[['pow', 'temp', 'humidity', 'windspeed', 'windgust',\n 'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',\n 'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',\n 'feelslike', 'dewpoint', 'outside_status']].astype('float64')\ndata_y = data_train['pow'].astype('float64')\ndata_y_val = validation_data['pow'].astype('float64')\nprint(data_x.shape)\nprint(data_y.shape)\npredict_from = 1\npredict_until = 10\nlookback = 3\nclf = ak.TimeseriesForecaster(lookback=lookback, predict_from=predict_from,\n objective='val_loss')\nclf.fit(x=data_x, y=data_y, validation_data=(data_x_val, data_y_val),\n batch_size=128, epochs=10)\npredictions = clf.predict(data_x_test)\nprint(predictions.shape)\nprint(clf.evaluate(data_x_val, data_y_val))\n", "step-5": "import pandas as pd\r\nimport tensorflow as tf\r\nimport autokeras as ak\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport tensorflow as tf\r\n\r\nfrom numpy import concatenate\r\nfrom pandas import read_csv, DataFrame, concat\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\nnp.set_printoptions(suppress=True)\r\n\r\nEPOCHS = 10\r\nBATCH_SIZE = 128\r\n\r\nSHIFT_DAYS = 3\r\nPRED_STEPS = 24*6 #48hr * 10분단위 예측\r\nTIME_STEPS = SHIFT_DAYS*PRED_STEPS #hours step\r\nDIMENSION = 15\r\nMODEL_NUM = 10\r\nCAPACITY = 89.7\r\n\r\nTRAIN_RATIO = 0.6\r\nVAL_RATIO = 0.2\r\n\r\nSTART_DATE = '2021012899'\r\nEND_DATE = '2021042924'\r\n\r\nSAVE_PATH = './data/'\r\nSAVE_NAME = 'autoML_Test'\r\n\r\n\r\ndef getData():\r\n # power\r\n power_file = './data/power_20210129_20210429_preprocess_1hour'\r\n power_df = read_csv(power_file+'.csv', encoding='CP949', converters={'date':int})\r\n print(power_df.shape)\r\n \r\n # sensor \r\n sensor_file = 'data/sensor_20210129_20210429_preprocess_1hour'\r\n sensor_df = read_csv(sensor_file+'.csv', encoding='CP949', converters={'date':int})\r\n sensor_df = sensor_df.sort_values('date')\r\n print(sensor_df.shape)\r\n\r\n # scale\r\n power_df.drop(['date'], axis=1, inplace=True)\r\n pow_scaler = MinMaxScaler(feature_range = (0, 1))\r\n scaled_pow = pow_scaler.fit_transform(power_df.values)\r\n power_scaleddf = pd.DataFrame(scaled_pow, columns=power_df.columns, index=list(power_df.index.values))\r\n\r\n weather_df = sensor_df.copy()\r\n weather_df.drop(['date'], axis=1, inplace=True)\r\n weather_scaler = MinMaxScaler(feature_range = (0, 1))#scale\r\n scaled_weather = weather_scaler.fit_transform(weather_df.values)\r\n weather_scaleddf = pd.DataFrame(scaled_weather, columns=weather_df.columns, index=list(weather_df.index.values))\r\n\r\n # JOIN \r\n df = weather_scaleddf.copy()\r\n\r\n # pow + weather + powY\r\n df.insert(0, 'pow', power_scaleddf.values, True)\r\n #df = df.iloc[0:-TIME_STEPS, :]\r\n #df.insert(df.shape[1], 'pow_Y', power_scaleddf.iloc[TIME_STEPS:, :].values, True)\r\n #df.insert(df.shape[1], 'pow_Y', power_scaleddf.iloc[TIME_STEPS:, :].values, True)\r\n\r\n #df.to_csv(SAVE_PATH+\"total_scaled\"+SAVE_NAME+\".csv\",mode='w',index=False, encoding='CP949')\r\n #display(df) \r\n\r\n return pow_scaler, df\r\n\r\npow_scaler, df = getData()\r\n#display(df)\r\n\r\ndataset = df\r\nval_split = int(len(dataset) * 0.7)\r\ndata_train = dataset[:val_split]\r\nvalidation_data = dataset[val_split:]\r\n\r\ndata_x = data_train[\r\n [\r\n 'pow', 'temp', 'humidity', 'windspeed', 'windgust', 'maxdailygust',\r\n 'winddir', 'hourlyrainin', 'dailyrainin', 'weeklyrainin',\r\n 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv', 'feelslike',\r\n 'dewpoint', 'outside_status'\r\n ]\r\n].astype(\"float64\")\r\n\r\ndata_x_val = validation_data[\r\n [\r\n 'pow', 'temp', 'humidity', 'windspeed', 'windgust', 'maxdailygust',\r\n 'winddir', 'hourlyrainin', 'dailyrainin', 'weeklyrainin',\r\n 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv', 'feelslike',\r\n 'dewpoint', 'outside_status'\r\n ]\r\n].astype(\"float64\")\r\n\r\n# Data with train data and the unseen data from subsequent time steps.\r\ndata_x_test = dataset[\r\n [\r\n 'pow', 'temp', 'humidity', 'windspeed', 'windgust', 'maxdailygust',\r\n 'winddir', 'hourlyrainin', 'dailyrainin', 'weeklyrainin',\r\n 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv', 'feelslike',\r\n 'dewpoint', 'outside_status'\r\n ]\r\n].astype(\"float64\")\r\n\r\ndata_y = data_train[\"pow\"].astype(\"float64\")\r\n\r\ndata_y_val = validation_data[\"pow\"].astype(\"float64\")\r\n\r\nprint(data_x.shape) # (6549, 12)\r\nprint(data_y.shape) # (6549,)\r\n\r\npredict_from = 1\r\npredict_until = 10\r\nlookback = 3\r\nclf = ak.TimeseriesForecaster(\r\n lookback=lookback,\r\n predict_from=predict_from,\r\n #predict_until=predict_until,\r\n #max_trials=1,\r\n objective=\"val_loss\",\r\n)\r\n# Train the TimeSeriesForecaster with train data\r\nclf.fit(\r\n x=data_x,\r\n y=data_y,\r\n validation_data=(data_x_val, data_y_val),\r\n batch_size=128,\r\n epochs=10,\r\n)\r\n# Predict with the best model(includes original training data).\r\npredictions = clf.predict(data_x_test)\r\nprint(predictions.shape)\r\n# Evaluate the best model with testing data.\r\nprint(clf.evaluate(data_x_val, data_y_val))", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
#!/usr/bin/python3 """ list = list(range(97, 123) for (i in list): if (i % 2 == 0): i = (i - 32) """ for letter in "zYxWvUtSrQpOnMlKjIhGfEdCbA": print('{:s}'.format(letter), end = "")
normal
{ "blob_id": "55a061a1c0cd20e5ab7413c671bc03573de1bbdf", "index": 7754, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor letter in 'zYxWvUtSrQpOnMlKjIhGfEdCbA':\n print('{:s}'.format(letter), end='')\n", "step-3": "#!/usr/bin/python3\n\"\"\"\nlist = list(range(97, 123)\nfor (i in list):\n if (i % 2 == 0):\n i = (i - 32)\n\"\"\"\nfor letter in \"zYxWvUtSrQpOnMlKjIhGfEdCbA\":\n print('{:s}'.format(letter), end = \"\")\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import numpy as np import matplotlib.pyplot as plt from math import * from scipy.integrate import * from pylab import * from scipy.integrate import quad MHD = np.zeros((80, 90, 5), dtype=float) BGI = np.zeros((80, 90, 5), dtype=float) Fp = np.zeros((80), dtype=float) AngMHD = np.zeros((90,2), dtype=float) AngBGI = np.zeros((90,2), dtype=float) B0 = [0.5, 1.5, 3, 5, 10] V = [0.3, 0.3, 0.2, 0.1, 0.1] def PMHD(p, chi, b): return b**2/p*(1 +(sin(chi))**2) def xMHD(p, chi, b): return -b**2/p**2*sin(chi)*cos(chi) def PBGI(p, chi, b): Q = 0.7*p/b**0.57/sqrt(cos(chi)) if Q > 1: A = 1 else: A = Q return b**2/p*(A*(cos(chi))**2 + 0.01/sqrt(p)) def xBGI(p, chi, b): Q = 0.7*p/b**0.57/sqrt(cos(chi)) if Q > 1: A = 1 else: A = Q return A*b**2/p**2*sin(chi)*cos(chi) P0 = 0.3 Pend = 1 B12 = 4 dx = 0.0001 for i in range(450): xi0 = i/5 + 0.1 x0 = pi/180*xi0 P = P0 x = x0 while 0.7*P/B12**0.57/sqrt(cos(x)) < 2: P = P + PMHD(P, x, B12)*dx x = x + xMHD(P, x, B12)*dx gx = 180/pi*x iP = int(P/0.1) ix = int(gx) if iP < 80: MHD[iP, ix, 0] = MHD[iP, ix, 0] + 1 for i in range(450): xi0 = i/5 + 0.1 x0 = pi/180*xi0 P = P0 x = x0 while 0.7*P/B12**0.57/sqrt(cos(x)) < 2: P = P + PBGI(P, x, B12)*dx x = x + xBGI(P, x, B12)*dx gx = 180/pi*x iP = int(P/0.1) ix = int(gx) if iP < 80: BGI[iP, ix, 0] = BGI[iP, ix, 0] + 1 #for j in range(80): # for i in range(90): # Fp[j] = Fp[j] + PxiB[j, i, 0] # print(j/10, Fp[j]) for i in range(90): j = int(10*Pend) AngMHD[i,0] = i AngBGI[i,0] = i AngMHD[i,1] = MHD[j, i, 0] AngBGI[i,1] = BGI[j, i, 0] # print(i, PxiB[10, i, 0]) ymax = np.max(AngBGI) fig, ax = plt.subplots() x = np.linspace(0, 90) plt.xlim(1, 90) plt.ylim(0, 1.2*ymax) data1 = np.array(AngMHD) data2 = np.array(AngBGI) X1,Y1 = data1.T X2,Y2 = data2.T plt.scatter(X1,Y1, color = 'blue', s=15, label="MHD") plt.scatter(X2,Y2, color = 'red', s=15, label="BGI") plt.title('$P_0$ = '+str(P0)+', P = '+str(Pend)+', $B_{12}$ = '+str(B12)+'') plt.grid(True,which="both", ls="-") plt.grid(True,which="both", ls="-") plt.xlabel('$\chi$') #plt.ylabel('$\lambda g(x_{0})$') plt.legend() plt.show() #fig, ax = plt.subplots() #x = np.linspace(0, 1) #plt.xlim(0.0001, 1.0) #plt.ylim(0, 0.1) #plt.plot(x, x**2*(cos(ch)*(1 - x**2) + 1/2*sin(ch)*(x - x**3))**3, label="fitting") #plt.title(''+str(PSR)+', $n_{\pm}$ (P = '+str(P)+', $B_{12}$ = '+str(B12)+', $\chi$ = '+str(chi)+'$^{\circ}$), $\lambda = 92$') #plt.grid(True,which="both", ls="-") #plt.grid(True,which="both", ls="-") ##ax.vlines(xcr, 0, 8, color = 'black', linewidth = 1.5, linestyle = '--') #plt.xlabel('$r_{0}/R_0$') #plt.ylabel('$n_{\pm}$') #plt.legend() #plt.show()
normal
{ "blob_id": "660334be611c30397c2f33890e1bca1fc43bd01f", "index": 2420, "step-1": "<mask token>\n\n\ndef PMHD(p, chi, b):\n return b ** 2 / p * (1 + sin(chi) ** 2)\n\n\ndef xMHD(p, chi, b):\n return -b ** 2 / p ** 2 * sin(chi) * cos(chi)\n\n\n<mask token>\n\n\ndef xBGI(p, chi, b):\n Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))\n if Q > 1:\n A = 1\n else:\n A = Q\n return A * b ** 2 / p ** 2 * sin(chi) * cos(chi)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef PMHD(p, chi, b):\n return b ** 2 / p * (1 + sin(chi) ** 2)\n\n\ndef xMHD(p, chi, b):\n return -b ** 2 / p ** 2 * sin(chi) * cos(chi)\n\n\ndef PBGI(p, chi, b):\n Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))\n if Q > 1:\n A = 1\n else:\n A = Q\n return b ** 2 / p * (A * cos(chi) ** 2 + 0.01 / sqrt(p))\n\n\ndef xBGI(p, chi, b):\n Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))\n if Q > 1:\n A = 1\n else:\n A = Q\n return A * b ** 2 / p ** 2 * sin(chi) * cos(chi)\n\n\n<mask token>\nfor i in range(450):\n xi0 = i / 5 + 0.1\n x0 = pi / 180 * xi0\n P = P0\n x = x0\n while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:\n P = P + PMHD(P, x, B12) * dx\n x = x + xMHD(P, x, B12) * dx\n gx = 180 / pi * x\n iP = int(P / 0.1)\n ix = int(gx)\n if iP < 80:\n MHD[iP, ix, 0] = MHD[iP, ix, 0] + 1\nfor i in range(450):\n xi0 = i / 5 + 0.1\n x0 = pi / 180 * xi0\n P = P0\n x = x0\n while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:\n P = P + PBGI(P, x, B12) * dx\n x = x + xBGI(P, x, B12) * dx\n gx = 180 / pi * x\n iP = int(P / 0.1)\n ix = int(gx)\n if iP < 80:\n BGI[iP, ix, 0] = BGI[iP, ix, 0] + 1\nfor i in range(90):\n j = int(10 * Pend)\n AngMHD[i, 0] = i\n AngBGI[i, 0] = i\n AngMHD[i, 1] = MHD[j, i, 0]\n AngBGI[i, 1] = BGI[j, i, 0]\n<mask token>\nplt.xlim(1, 90)\nplt.ylim(0, 1.2 * ymax)\n<mask token>\nplt.scatter(X1, Y1, color='blue', s=15, label='MHD')\nplt.scatter(X2, Y2, color='red', s=15, label='BGI')\nplt.title('$P_0$ = ' + str(P0) + ', P = ' + str(Pend) + ', $B_{12}$ = ' +\n str(B12) + '')\nplt.grid(True, which='both', ls='-')\nplt.grid(True, which='both', ls='-')\nplt.xlabel('$\\\\chi$')\nplt.legend()\nplt.show()\n", "step-3": "<mask token>\nMHD = np.zeros((80, 90, 5), dtype=float)\nBGI = np.zeros((80, 90, 5), dtype=float)\nFp = np.zeros(80, dtype=float)\nAngMHD = np.zeros((90, 2), dtype=float)\nAngBGI = np.zeros((90, 2), dtype=float)\nB0 = [0.5, 1.5, 3, 5, 10]\nV = [0.3, 0.3, 0.2, 0.1, 0.1]\n\n\ndef PMHD(p, chi, b):\n return b ** 2 / p * (1 + sin(chi) ** 2)\n\n\ndef xMHD(p, chi, b):\n return -b ** 2 / p ** 2 * sin(chi) * cos(chi)\n\n\ndef PBGI(p, chi, b):\n Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))\n if Q > 1:\n A = 1\n else:\n A = Q\n return b ** 2 / p * (A * cos(chi) ** 2 + 0.01 / sqrt(p))\n\n\ndef xBGI(p, chi, b):\n Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))\n if Q > 1:\n A = 1\n else:\n A = Q\n return A * b ** 2 / p ** 2 * sin(chi) * cos(chi)\n\n\nP0 = 0.3\nPend = 1\nB12 = 4\ndx = 0.0001\nfor i in range(450):\n xi0 = i / 5 + 0.1\n x0 = pi / 180 * xi0\n P = P0\n x = x0\n while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:\n P = P + PMHD(P, x, B12) * dx\n x = x + xMHD(P, x, B12) * dx\n gx = 180 / pi * x\n iP = int(P / 0.1)\n ix = int(gx)\n if iP < 80:\n MHD[iP, ix, 0] = MHD[iP, ix, 0] + 1\nfor i in range(450):\n xi0 = i / 5 + 0.1\n x0 = pi / 180 * xi0\n P = P0\n x = x0\n while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:\n P = P + PBGI(P, x, B12) * dx\n x = x + xBGI(P, x, B12) * dx\n gx = 180 / pi * x\n iP = int(P / 0.1)\n ix = int(gx)\n if iP < 80:\n BGI[iP, ix, 0] = BGI[iP, ix, 0] + 1\nfor i in range(90):\n j = int(10 * Pend)\n AngMHD[i, 0] = i\n AngBGI[i, 0] = i\n AngMHD[i, 1] = MHD[j, i, 0]\n AngBGI[i, 1] = BGI[j, i, 0]\nymax = np.max(AngBGI)\nfig, ax = plt.subplots()\nx = np.linspace(0, 90)\nplt.xlim(1, 90)\nplt.ylim(0, 1.2 * ymax)\ndata1 = np.array(AngMHD)\ndata2 = np.array(AngBGI)\nX1, Y1 = data1.T\nX2, Y2 = data2.T\nplt.scatter(X1, Y1, color='blue', s=15, label='MHD')\nplt.scatter(X2, Y2, color='red', s=15, label='BGI')\nplt.title('$P_0$ = ' + str(P0) + ', P = ' + str(Pend) + ', $B_{12}$ = ' +\n str(B12) + '')\nplt.grid(True, which='both', ls='-')\nplt.grid(True, which='both', ls='-')\nplt.xlabel('$\\\\chi$')\nplt.legend()\nplt.show()\n", "step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom math import *\nfrom scipy.integrate import *\nfrom pylab import *\nfrom scipy.integrate import quad\nMHD = np.zeros((80, 90, 5), dtype=float)\nBGI = np.zeros((80, 90, 5), dtype=float)\nFp = np.zeros(80, dtype=float)\nAngMHD = np.zeros((90, 2), dtype=float)\nAngBGI = np.zeros((90, 2), dtype=float)\nB0 = [0.5, 1.5, 3, 5, 10]\nV = [0.3, 0.3, 0.2, 0.1, 0.1]\n\n\ndef PMHD(p, chi, b):\n return b ** 2 / p * (1 + sin(chi) ** 2)\n\n\ndef xMHD(p, chi, b):\n return -b ** 2 / p ** 2 * sin(chi) * cos(chi)\n\n\ndef PBGI(p, chi, b):\n Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))\n if Q > 1:\n A = 1\n else:\n A = Q\n return b ** 2 / p * (A * cos(chi) ** 2 + 0.01 / sqrt(p))\n\n\ndef xBGI(p, chi, b):\n Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))\n if Q > 1:\n A = 1\n else:\n A = Q\n return A * b ** 2 / p ** 2 * sin(chi) * cos(chi)\n\n\nP0 = 0.3\nPend = 1\nB12 = 4\ndx = 0.0001\nfor i in range(450):\n xi0 = i / 5 + 0.1\n x0 = pi / 180 * xi0\n P = P0\n x = x0\n while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:\n P = P + PMHD(P, x, B12) * dx\n x = x + xMHD(P, x, B12) * dx\n gx = 180 / pi * x\n iP = int(P / 0.1)\n ix = int(gx)\n if iP < 80:\n MHD[iP, ix, 0] = MHD[iP, ix, 0] + 1\nfor i in range(450):\n xi0 = i / 5 + 0.1\n x0 = pi / 180 * xi0\n P = P0\n x = x0\n while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:\n P = P + PBGI(P, x, B12) * dx\n x = x + xBGI(P, x, B12) * dx\n gx = 180 / pi * x\n iP = int(P / 0.1)\n ix = int(gx)\n if iP < 80:\n BGI[iP, ix, 0] = BGI[iP, ix, 0] + 1\nfor i in range(90):\n j = int(10 * Pend)\n AngMHD[i, 0] = i\n AngBGI[i, 0] = i\n AngMHD[i, 1] = MHD[j, i, 0]\n AngBGI[i, 1] = BGI[j, i, 0]\nymax = np.max(AngBGI)\nfig, ax = plt.subplots()\nx = np.linspace(0, 90)\nplt.xlim(1, 90)\nplt.ylim(0, 1.2 * ymax)\ndata1 = np.array(AngMHD)\ndata2 = np.array(AngBGI)\nX1, Y1 = data1.T\nX2, Y2 = data2.T\nplt.scatter(X1, Y1, color='blue', s=15, label='MHD')\nplt.scatter(X2, Y2, color='red', s=15, label='BGI')\nplt.title('$P_0$ = ' + str(P0) + ', P = ' + str(Pend) + ', $B_{12}$ = ' +\n str(B12) + '')\nplt.grid(True, which='both', ls='-')\nplt.grid(True, which='both', ls='-')\nplt.xlabel('$\\\\chi$')\nplt.legend()\nplt.show()\n", "step-5": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom math import * \r\nfrom scipy.integrate import *\r\nfrom pylab import * \r\nfrom scipy.integrate import quad\r\n\r\n\r\nMHD = np.zeros((80, 90, 5), dtype=float)\r\nBGI = np.zeros((80, 90, 5), dtype=float)\r\nFp = np.zeros((80), dtype=float) \r\nAngMHD = np.zeros((90,2), dtype=float)\r\nAngBGI = np.zeros((90,2), dtype=float) \r\nB0 = [0.5, 1.5, 3, 5, 10]\r\nV = [0.3, 0.3, 0.2, 0.1, 0.1]\r\n\r\n\r\ndef PMHD(p, chi, b):\r\n return b**2/p*(1 +(sin(chi))**2)\r\n\r\ndef xMHD(p, chi, b):\r\n return -b**2/p**2*sin(chi)*cos(chi)\r\n\r\ndef PBGI(p, chi, b):\r\n Q = 0.7*p/b**0.57/sqrt(cos(chi))\r\n if Q > 1:\r\n A = 1\r\n else:\r\n A = Q\r\n return b**2/p*(A*(cos(chi))**2 + 0.01/sqrt(p))\r\n\r\ndef xBGI(p, chi, b):\r\n Q = 0.7*p/b**0.57/sqrt(cos(chi))\r\n if Q > 1:\r\n A = 1\r\n else:\r\n A = Q\r\n return A*b**2/p**2*sin(chi)*cos(chi)\r\n\r\nP0 = 0.3\r\nPend = 1\r\nB12 = 4\r\n\r\ndx = 0.0001\r\n\r\n\r\nfor i in range(450):\r\n xi0 = i/5 + 0.1\r\n x0 = pi/180*xi0\r\n P = P0\r\n x = x0\r\n while 0.7*P/B12**0.57/sqrt(cos(x)) < 2:\r\n P = P + PMHD(P, x, B12)*dx\r\n x = x + xMHD(P, x, B12)*dx\r\n gx = 180/pi*x\r\n iP = int(P/0.1)\r\n ix = int(gx)\r\n if iP < 80:\r\n MHD[iP, ix, 0] = MHD[iP, ix, 0] + 1\r\n\r\n\r\nfor i in range(450):\r\n xi0 = i/5 + 0.1\r\n x0 = pi/180*xi0\r\n P = P0\r\n x = x0\r\n while 0.7*P/B12**0.57/sqrt(cos(x)) < 2:\r\n P = P + PBGI(P, x, B12)*dx\r\n x = x + xBGI(P, x, B12)*dx\r\n gx = 180/pi*x\r\n iP = int(P/0.1)\r\n ix = int(gx)\r\n if iP < 80:\r\n BGI[iP, ix, 0] = BGI[iP, ix, 0] + 1\r\n\r\n#for j in range(80):\r\n# for i in range(90):\r\n# Fp[j] = Fp[j] + PxiB[j, i, 0] \r\n# print(j/10, Fp[j]) \r\n\r\n\r\nfor i in range(90):\r\n j = int(10*Pend)\r\n AngMHD[i,0] = i\r\n AngBGI[i,0] = i\r\n AngMHD[i,1] = MHD[j, i, 0]\r\n AngBGI[i,1] = BGI[j, i, 0]\r\n# print(i, PxiB[10, i, 0])\r\n\r\n\r\nymax = np.max(AngBGI)\r\n\r\nfig, ax = plt.subplots()\r\nx = np.linspace(0, 90)\r\nplt.xlim(1, 90)\r\nplt.ylim(0, 1.2*ymax)\r\ndata1 = np.array(AngMHD)\r\ndata2 = np.array(AngBGI)\r\nX1,Y1 = data1.T\r\nX2,Y2 = data2.T\r\nplt.scatter(X1,Y1, color = 'blue', s=15, label=\"MHD\")\r\nplt.scatter(X2,Y2, color = 'red', s=15, label=\"BGI\")\r\nplt.title('$P_0$ = '+str(P0)+', P = '+str(Pend)+', $B_{12}$ = '+str(B12)+'')\r\nplt.grid(True,which=\"both\", ls=\"-\")\r\nplt.grid(True,which=\"both\", ls=\"-\")\r\nplt.xlabel('$\\chi$')\r\n#plt.ylabel('$\\lambda g(x_{0})$')\r\nplt.legend()\r\nplt.show() \r\n\r\n\r\n#fig, ax = plt.subplots()\r\n#x = np.linspace(0, 1)\r\n#plt.xlim(0.0001, 1.0)\r\n#plt.ylim(0, 0.1)\r\n#plt.plot(x, x**2*(cos(ch)*(1 - x**2) + 1/2*sin(ch)*(x - x**3))**3, label=\"fitting\")\r\n#plt.title(''+str(PSR)+', $n_{\\pm}$ (P = '+str(P)+', $B_{12}$ = '+str(B12)+', $\\chi$ = '+str(chi)+'$^{\\circ}$), $\\lambda = 92$')\r\n#plt.grid(True,which=\"both\", ls=\"-\")\r\n#plt.grid(True,which=\"both\", ls=\"-\")\r\n##ax.vlines(xcr, 0, 8, color = 'black', linewidth = 1.5, linestyle = '--')\r\n#plt.xlabel('$r_{0}/R_0$')\r\n#plt.ylabel('$n_{\\pm}$')\r\n#plt.legend()\r\n#plt.show() ", "step-ids": [ 3, 5, 6, 7, 8 ] }
[ 3, 5, 6, 7, 8 ]
from core import Postgresdb db = Postgresdb() print(db)
normal
{ "blob_id": "962a9781e4f2ad787dd695896b6455c9b336603a", "index": 7178, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(db)\n", "step-3": "<mask token>\ndb = Postgresdb()\nprint(db)\n", "step-4": "from core import Postgresdb\ndb = Postgresdb()\nprint(db)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import torch from datasets import concatenate_datasets from datasets.arrow_dataset import Dataset from transfer_classifier.dataset_preprocessor.classification_dataset_preprocessor import ( ClassificationDatasetPreprocessor, ) from transformers import PreTrainedModel from transformers.tokenization_utils import BatchEncoding class Augmentor: def __init__(self) -> None: self.__AUGMENTATION_VALID__ = "VALID" def augment( self, dataset: Dataset, preprocessor: ClassificationDatasetPreprocessor, num_trial: int = 2, discriminator: PreTrainedModel = None, threshold: float = 0.8, ) -> BatchEncoding: augmented_samples = None # type: Optional[BatchEncoding] if discriminator is not None and preprocessor is None: raise Exception("To use discriminator, preprocessor should be required.") for _ in range(num_trial): original = dataset.shuffle() augmented = self.generate(original, preprocessor) if discriminator is not None and preprocessor is not None: matched, log = self.discriminate(discriminator, preprocessor, original, augmented, threshold) def unmatched_to_invalid(example: Dict[str, Any], index: int) -> Dict[str, Any]: example[self.__AUGMENTATION_VALID__] = True if index in matched else False return example augmented = augmented.map(unmatched_to_invalid, with_indices=True) augmented = augmented.filter(lambda e: e[self.__AUGMENTATION_VALID__]) if len(augmented) == 0: continue if augmented_samples is None: augmented_samples = augmented else: augmented_samples = concatenate_datasets([augmented_samples, augmented]) if len(dataset) <= len(augmented_samples): augmented_samples = augmented_samples.select(range(len(dataset))) break if augmented_samples is not None: augmented_samples = augmented_samples.remove_columns([self.__AUGMENTATION_VALID__]) augmented_samples = augmented_samples.flatten_indices() return augmented_samples def generate(self, dataset: Dataset, preprocessor: ClassificationDatasetPreprocessor) -> BatchEncoding: raise NotImplementedError("Augmentor subclass should implement augment_sample.") def discriminate( self, model: PreTrainedModel, preprocessor: ClassificationDatasetPreprocessor, original: Dataset, augmented: Dataset, threshold: float, ) -> Tuple[List[int], List[Dict[str, Union[str, float]]]]: formatted_original = preprocessor.format(original) original_scores = self.predict(model, formatted_original) formatted_augmented = preprocessor.format(augmented) augmented_scores = self.predict(model, formatted_augmented) matched = [] logs = [] for i, original, original_score, augmented, augmented_score in zip( range(len(original)), original, original_scores, augmented, augmented_scores ): if original_score["label"] == augmented_score["label"] and augmented_score["score"] >= threshold: matched.append(i) logs.append( { "original": original[preprocessor.input_column], "original_label": original_score["label"], "original_score": original_score["score"], "augmented": augmented[preprocessor.input_column], "augmented_label": augmented_score["label"], "augmented_score": augmented_score["score"], } ) return (matched, logs) def predict( self, model: PreTrainedModel, examples: Dataset, ) -> List[Dict[str, Union[int, float]]]: model.eval() device = "cuda" if torch.cuda.is_available() else "cpu" model.to(device) with torch.no_grad(): # type: ignore input_ids = examples["input_ids"].to(device) if "token_type_ids" in examples.column_names: token_type_ids = examples["token_type_ids"].to(device) outputs = model(input_ids, token_type_ids=token_type_ids) else: outputs = model(input_ids) predictions = outputs[0].cpu().numpy() scores = np.exp(predictions) / np.exp(predictions).sum(-1, keepdims=True) return [{"label": model.config.id2label[item.argmax()], "score": item.max().item()} for item in scores]
normal
{ "blob_id": "4a88ce640b6680df925288b44232cf43d585c11c", "index": 669, "step-1": "<mask token>\n\n\nclass Augmentor:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Augmentor:\n\n def __init__(self) ->None:\n self.__AUGMENTATION_VALID__ = 'VALID'\n\n def augment(self, dataset: Dataset, preprocessor:\n ClassificationDatasetPreprocessor, num_trial: int=2, discriminator:\n PreTrainedModel=None, threshold: float=0.8) ->BatchEncoding:\n augmented_samples = None\n if discriminator is not None and preprocessor is None:\n raise Exception(\n 'To use discriminator, preprocessor should be required.')\n for _ in range(num_trial):\n original = dataset.shuffle()\n augmented = self.generate(original, preprocessor)\n if discriminator is not None and preprocessor is not None:\n matched, log = self.discriminate(discriminator,\n preprocessor, original, augmented, threshold)\n\n def unmatched_to_invalid(example: Dict[str, Any], index: int\n ) ->Dict[str, Any]:\n example[self.__AUGMENTATION_VALID__\n ] = True if index in matched else False\n return example\n augmented = augmented.map(unmatched_to_invalid,\n with_indices=True)\n augmented = augmented.filter(lambda e: e[self.\n __AUGMENTATION_VALID__])\n if len(augmented) == 0:\n continue\n if augmented_samples is None:\n augmented_samples = augmented\n else:\n augmented_samples = concatenate_datasets([augmented_samples,\n augmented])\n if len(dataset) <= len(augmented_samples):\n augmented_samples = augmented_samples.select(range(len(\n dataset)))\n break\n if augmented_samples is not None:\n augmented_samples = augmented_samples.remove_columns([self.\n __AUGMENTATION_VALID__])\n augmented_samples = augmented_samples.flatten_indices()\n return augmented_samples\n <mask token>\n\n def discriminate(self, model: PreTrainedModel, preprocessor:\n ClassificationDatasetPreprocessor, original: Dataset, augmented:\n Dataset, threshold: float) ->Tuple[List[int], List[Dict[str, Union[\n str, float]]]]:\n formatted_original = preprocessor.format(original)\n original_scores = self.predict(model, formatted_original)\n formatted_augmented = preprocessor.format(augmented)\n augmented_scores = self.predict(model, formatted_augmented)\n matched = []\n logs = []\n for i, original, original_score, augmented, augmented_score in zip(\n range(len(original)), original, original_scores, augmented,\n augmented_scores):\n if original_score['label'] == augmented_score['label'\n ] and augmented_score['score'] >= threshold:\n matched.append(i)\n logs.append({'original': original[preprocessor.input_column],\n 'original_label': original_score['label'], 'original_score':\n original_score['score'], 'augmented': augmented[\n preprocessor.input_column], 'augmented_label':\n augmented_score['label'], 'augmented_score':\n augmented_score['score']})\n return matched, logs\n\n def predict(self, model: PreTrainedModel, examples: Dataset) ->List[Dict\n [str, Union[int, float]]]:\n model.eval()\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n model.to(device)\n with torch.no_grad():\n input_ids = examples['input_ids'].to(device)\n if 'token_type_ids' in examples.column_names:\n token_type_ids = examples['token_type_ids'].to(device)\n outputs = model(input_ids, token_type_ids=token_type_ids)\n else:\n outputs = model(input_ids)\n predictions = outputs[0].cpu().numpy()\n scores = np.exp(predictions) / np.exp(predictions).sum(-1, keepdims\n =True)\n return [{'label': model.config.id2label[item.argmax()], 'score':\n item.max().item()} for item in scores]\n", "step-3": "<mask token>\n\n\nclass Augmentor:\n\n def __init__(self) ->None:\n self.__AUGMENTATION_VALID__ = 'VALID'\n\n def augment(self, dataset: Dataset, preprocessor:\n ClassificationDatasetPreprocessor, num_trial: int=2, discriminator:\n PreTrainedModel=None, threshold: float=0.8) ->BatchEncoding:\n augmented_samples = None\n if discriminator is not None and preprocessor is None:\n raise Exception(\n 'To use discriminator, preprocessor should be required.')\n for _ in range(num_trial):\n original = dataset.shuffle()\n augmented = self.generate(original, preprocessor)\n if discriminator is not None and preprocessor is not None:\n matched, log = self.discriminate(discriminator,\n preprocessor, original, augmented, threshold)\n\n def unmatched_to_invalid(example: Dict[str, Any], index: int\n ) ->Dict[str, Any]:\n example[self.__AUGMENTATION_VALID__\n ] = True if index in matched else False\n return example\n augmented = augmented.map(unmatched_to_invalid,\n with_indices=True)\n augmented = augmented.filter(lambda e: e[self.\n __AUGMENTATION_VALID__])\n if len(augmented) == 0:\n continue\n if augmented_samples is None:\n augmented_samples = augmented\n else:\n augmented_samples = concatenate_datasets([augmented_samples,\n augmented])\n if len(dataset) <= len(augmented_samples):\n augmented_samples = augmented_samples.select(range(len(\n dataset)))\n break\n if augmented_samples is not None:\n augmented_samples = augmented_samples.remove_columns([self.\n __AUGMENTATION_VALID__])\n augmented_samples = augmented_samples.flatten_indices()\n return augmented_samples\n\n def generate(self, dataset: Dataset, preprocessor:\n ClassificationDatasetPreprocessor) ->BatchEncoding:\n raise NotImplementedError(\n 'Augmentor subclass should implement augment_sample.')\n\n def discriminate(self, model: PreTrainedModel, preprocessor:\n ClassificationDatasetPreprocessor, original: Dataset, augmented:\n Dataset, threshold: float) ->Tuple[List[int], List[Dict[str, Union[\n str, float]]]]:\n formatted_original = preprocessor.format(original)\n original_scores = self.predict(model, formatted_original)\n formatted_augmented = preprocessor.format(augmented)\n augmented_scores = self.predict(model, formatted_augmented)\n matched = []\n logs = []\n for i, original, original_score, augmented, augmented_score in zip(\n range(len(original)), original, original_scores, augmented,\n augmented_scores):\n if original_score['label'] == augmented_score['label'\n ] and augmented_score['score'] >= threshold:\n matched.append(i)\n logs.append({'original': original[preprocessor.input_column],\n 'original_label': original_score['label'], 'original_score':\n original_score['score'], 'augmented': augmented[\n preprocessor.input_column], 'augmented_label':\n augmented_score['label'], 'augmented_score':\n augmented_score['score']})\n return matched, logs\n\n def predict(self, model: PreTrainedModel, examples: Dataset) ->List[Dict\n [str, Union[int, float]]]:\n model.eval()\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n model.to(device)\n with torch.no_grad():\n input_ids = examples['input_ids'].to(device)\n if 'token_type_ids' in examples.column_names:\n token_type_ids = examples['token_type_ids'].to(device)\n outputs = model(input_ids, token_type_ids=token_type_ids)\n else:\n outputs = model(input_ids)\n predictions = outputs[0].cpu().numpy()\n scores = np.exp(predictions) / np.exp(predictions).sum(-1, keepdims\n =True)\n return [{'label': model.config.id2label[item.argmax()], 'score':\n item.max().item()} for item in scores]\n", "step-4": "from typing import Any, Dict, List, Optional, Tuple, Union\nimport numpy as np\nimport torch\nfrom datasets import concatenate_datasets\nfrom datasets.arrow_dataset import Dataset\nfrom transfer_classifier.dataset_preprocessor.classification_dataset_preprocessor import ClassificationDatasetPreprocessor\nfrom transformers import PreTrainedModel\nfrom transformers.tokenization_utils import BatchEncoding\n\n\nclass Augmentor:\n\n def __init__(self) ->None:\n self.__AUGMENTATION_VALID__ = 'VALID'\n\n def augment(self, dataset: Dataset, preprocessor:\n ClassificationDatasetPreprocessor, num_trial: int=2, discriminator:\n PreTrainedModel=None, threshold: float=0.8) ->BatchEncoding:\n augmented_samples = None\n if discriminator is not None and preprocessor is None:\n raise Exception(\n 'To use discriminator, preprocessor should be required.')\n for _ in range(num_trial):\n original = dataset.shuffle()\n augmented = self.generate(original, preprocessor)\n if discriminator is not None and preprocessor is not None:\n matched, log = self.discriminate(discriminator,\n preprocessor, original, augmented, threshold)\n\n def unmatched_to_invalid(example: Dict[str, Any], index: int\n ) ->Dict[str, Any]:\n example[self.__AUGMENTATION_VALID__\n ] = True if index in matched else False\n return example\n augmented = augmented.map(unmatched_to_invalid,\n with_indices=True)\n augmented = augmented.filter(lambda e: e[self.\n __AUGMENTATION_VALID__])\n if len(augmented) == 0:\n continue\n if augmented_samples is None:\n augmented_samples = augmented\n else:\n augmented_samples = concatenate_datasets([augmented_samples,\n augmented])\n if len(dataset) <= len(augmented_samples):\n augmented_samples = augmented_samples.select(range(len(\n dataset)))\n break\n if augmented_samples is not None:\n augmented_samples = augmented_samples.remove_columns([self.\n __AUGMENTATION_VALID__])\n augmented_samples = augmented_samples.flatten_indices()\n return augmented_samples\n\n def generate(self, dataset: Dataset, preprocessor:\n ClassificationDatasetPreprocessor) ->BatchEncoding:\n raise NotImplementedError(\n 'Augmentor subclass should implement augment_sample.')\n\n def discriminate(self, model: PreTrainedModel, preprocessor:\n ClassificationDatasetPreprocessor, original: Dataset, augmented:\n Dataset, threshold: float) ->Tuple[List[int], List[Dict[str, Union[\n str, float]]]]:\n formatted_original = preprocessor.format(original)\n original_scores = self.predict(model, formatted_original)\n formatted_augmented = preprocessor.format(augmented)\n augmented_scores = self.predict(model, formatted_augmented)\n matched = []\n logs = []\n for i, original, original_score, augmented, augmented_score in zip(\n range(len(original)), original, original_scores, augmented,\n augmented_scores):\n if original_score['label'] == augmented_score['label'\n ] and augmented_score['score'] >= threshold:\n matched.append(i)\n logs.append({'original': original[preprocessor.input_column],\n 'original_label': original_score['label'], 'original_score':\n original_score['score'], 'augmented': augmented[\n preprocessor.input_column], 'augmented_label':\n augmented_score['label'], 'augmented_score':\n augmented_score['score']})\n return matched, logs\n\n def predict(self, model: PreTrainedModel, examples: Dataset) ->List[Dict\n [str, Union[int, float]]]:\n model.eval()\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n model.to(device)\n with torch.no_grad():\n input_ids = examples['input_ids'].to(device)\n if 'token_type_ids' in examples.column_names:\n token_type_ids = examples['token_type_ids'].to(device)\n outputs = model(input_ids, token_type_ids=token_type_ids)\n else:\n outputs = model(input_ids)\n predictions = outputs[0].cpu().numpy()\n scores = np.exp(predictions) / np.exp(predictions).sum(-1, keepdims\n =True)\n return [{'label': model.config.id2label[item.argmax()], 'score':\n item.max().item()} for item in scores]\n", "step-5": "from typing import Any, Dict, List, Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nfrom datasets import concatenate_datasets\nfrom datasets.arrow_dataset import Dataset\nfrom transfer_classifier.dataset_preprocessor.classification_dataset_preprocessor import (\n ClassificationDatasetPreprocessor,\n)\nfrom transformers import PreTrainedModel\nfrom transformers.tokenization_utils import BatchEncoding\n\n\nclass Augmentor:\n def __init__(self) -> None:\n self.__AUGMENTATION_VALID__ = \"VALID\"\n\n def augment(\n self,\n dataset: Dataset,\n preprocessor: ClassificationDatasetPreprocessor,\n num_trial: int = 2,\n discriminator: PreTrainedModel = None,\n threshold: float = 0.8,\n ) -> BatchEncoding:\n augmented_samples = None # type: Optional[BatchEncoding]\n\n if discriminator is not None and preprocessor is None:\n raise Exception(\"To use discriminator, preprocessor should be required.\")\n\n for _ in range(num_trial):\n original = dataset.shuffle()\n augmented = self.generate(original, preprocessor)\n if discriminator is not None and preprocessor is not None:\n matched, log = self.discriminate(discriminator, preprocessor, original, augmented, threshold)\n\n def unmatched_to_invalid(example: Dict[str, Any], index: int) -> Dict[str, Any]:\n example[self.__AUGMENTATION_VALID__] = True if index in matched else False\n return example\n\n augmented = augmented.map(unmatched_to_invalid, with_indices=True)\n\n augmented = augmented.filter(lambda e: e[self.__AUGMENTATION_VALID__])\n if len(augmented) == 0:\n continue\n\n if augmented_samples is None:\n augmented_samples = augmented\n else:\n augmented_samples = concatenate_datasets([augmented_samples, augmented])\n\n if len(dataset) <= len(augmented_samples):\n augmented_samples = augmented_samples.select(range(len(dataset)))\n break\n\n if augmented_samples is not None:\n augmented_samples = augmented_samples.remove_columns([self.__AUGMENTATION_VALID__])\n augmented_samples = augmented_samples.flatten_indices()\n\n return augmented_samples\n\n def generate(self, dataset: Dataset, preprocessor: ClassificationDatasetPreprocessor) -> BatchEncoding:\n raise NotImplementedError(\"Augmentor subclass should implement augment_sample.\")\n\n def discriminate(\n self,\n model: PreTrainedModel,\n preprocessor: ClassificationDatasetPreprocessor,\n original: Dataset,\n augmented: Dataset,\n threshold: float,\n ) -> Tuple[List[int], List[Dict[str, Union[str, float]]]]:\n\n formatted_original = preprocessor.format(original)\n original_scores = self.predict(model, formatted_original)\n\n formatted_augmented = preprocessor.format(augmented)\n augmented_scores = self.predict(model, formatted_augmented)\n\n matched = []\n logs = []\n for i, original, original_score, augmented, augmented_score in zip(\n range(len(original)), original, original_scores, augmented, augmented_scores\n ):\n if original_score[\"label\"] == augmented_score[\"label\"] and augmented_score[\"score\"] >= threshold:\n matched.append(i)\n\n logs.append(\n {\n \"original\": original[preprocessor.input_column],\n \"original_label\": original_score[\"label\"],\n \"original_score\": original_score[\"score\"],\n \"augmented\": augmented[preprocessor.input_column],\n \"augmented_label\": augmented_score[\"label\"],\n \"augmented_score\": augmented_score[\"score\"],\n }\n )\n\n return (matched, logs)\n\n def predict(\n self,\n model: PreTrainedModel,\n examples: Dataset,\n ) -> List[Dict[str, Union[int, float]]]:\n model.eval()\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n model.to(device)\n with torch.no_grad(): # type: ignore\n input_ids = examples[\"input_ids\"].to(device)\n if \"token_type_ids\" in examples.column_names:\n token_type_ids = examples[\"token_type_ids\"].to(device)\n outputs = model(input_ids, token_type_ids=token_type_ids)\n else:\n outputs = model(input_ids)\n\n predictions = outputs[0].cpu().numpy()\n\n scores = np.exp(predictions) / np.exp(predictions).sum(-1, keepdims=True)\n return [{\"label\": model.config.id2label[item.argmax()], \"score\": item.max().item()} for item in scores]\n", "step-ids": [ 1, 5, 6, 7, 8 ] }
[ 1, 5, 6, 7, 8 ]
#!flask/bin/python import os, json import requests SENDGRID_API_KEY = os.environ.get('SENDGRID_API_KEY', default=None) FROM_EMAIL = os.environ.get('FROM_EMAIL', default=None) TO_EMAIL = os.environ.get('TO_EMAIL', default=None) if not SENDGRID_API_KEY: raise ValueError("Need to set Sendgrid API Key (SENDGRID_API_KEY)") if not FROM_EMAIL or not TO_EMAIL: raise ValueError("Need to set email info (FROM_EMAIL and TO_EMAIL") sendgrid_url = 'https://api.sendgrid.com/v3/mail/send' def build_request_body(email): from_email = email['email'] name = email['name'] subject = email['subject'] body = email['body'] if not from_email: from_email = FROM_EMAIL if not name: name = "Anonymous" if not subject: subject = "Portfolio contact form message" req_body = json.dumps({ "personalizations": [ { "to": [ { "email": TO_EMAIL } ], "subject": subject } ], "from": { "email": from_email, "name": name }, "content": [ { "type": "text/plain", "value": body } ] }) return req_body def send_mail(email): headers = { "Authorization": f"Bearer {SENDGRID_API_KEY}", "Content-Type": "application/json" } email_body = build_request_body(email) response = requests.post(sendgrid_url, headers=headers, data=email_body) print(response.text) return response
normal
{ "blob_id": "cb29ee8687b469923896ceb7d5a6cd7f54b2c34e", "index": 6207, "step-1": "<mask token>\n\n\ndef build_request_body(email):\n from_email = email['email']\n name = email['name']\n subject = email['subject']\n body = email['body']\n if not from_email:\n from_email = FROM_EMAIL\n if not name:\n name = 'Anonymous'\n if not subject:\n subject = 'Portfolio contact form message'\n req_body = json.dumps({'personalizations': [{'to': [{'email': TO_EMAIL}\n ], 'subject': subject}], 'from': {'email': from_email, 'name': name\n }, 'content': [{'type': 'text/plain', 'value': body}]})\n return req_body\n\n\ndef send_mail(email):\n headers = {'Authorization': f'Bearer {SENDGRID_API_KEY}',\n 'Content-Type': 'application/json'}\n email_body = build_request_body(email)\n response = requests.post(sendgrid_url, headers=headers, data=email_body)\n print(response.text)\n return response\n", "step-2": "<mask token>\nif not SENDGRID_API_KEY:\n raise ValueError('Need to set Sendgrid API Key (SENDGRID_API_KEY)')\nif not FROM_EMAIL or not TO_EMAIL:\n raise ValueError('Need to set email info (FROM_EMAIL and TO_EMAIL')\n<mask token>\n\n\ndef build_request_body(email):\n from_email = email['email']\n name = email['name']\n subject = email['subject']\n body = email['body']\n if not from_email:\n from_email = FROM_EMAIL\n if not name:\n name = 'Anonymous'\n if not subject:\n subject = 'Portfolio contact form message'\n req_body = json.dumps({'personalizations': [{'to': [{'email': TO_EMAIL}\n ], 'subject': subject}], 'from': {'email': from_email, 'name': name\n }, 'content': [{'type': 'text/plain', 'value': body}]})\n return req_body\n\n\ndef send_mail(email):\n headers = {'Authorization': f'Bearer {SENDGRID_API_KEY}',\n 'Content-Type': 'application/json'}\n email_body = build_request_body(email)\n response = requests.post(sendgrid_url, headers=headers, data=email_body)\n print(response.text)\n return response\n", "step-3": "<mask token>\nSENDGRID_API_KEY = os.environ.get('SENDGRID_API_KEY', default=None)\nFROM_EMAIL = os.environ.get('FROM_EMAIL', default=None)\nTO_EMAIL = os.environ.get('TO_EMAIL', default=None)\nif not SENDGRID_API_KEY:\n raise ValueError('Need to set Sendgrid API Key (SENDGRID_API_KEY)')\nif not FROM_EMAIL or not TO_EMAIL:\n raise ValueError('Need to set email info (FROM_EMAIL and TO_EMAIL')\nsendgrid_url = 'https://api.sendgrid.com/v3/mail/send'\n\n\ndef build_request_body(email):\n from_email = email['email']\n name = email['name']\n subject = email['subject']\n body = email['body']\n if not from_email:\n from_email = FROM_EMAIL\n if not name:\n name = 'Anonymous'\n if not subject:\n subject = 'Portfolio contact form message'\n req_body = json.dumps({'personalizations': [{'to': [{'email': TO_EMAIL}\n ], 'subject': subject}], 'from': {'email': from_email, 'name': name\n }, 'content': [{'type': 'text/plain', 'value': body}]})\n return req_body\n\n\ndef send_mail(email):\n headers = {'Authorization': f'Bearer {SENDGRID_API_KEY}',\n 'Content-Type': 'application/json'}\n email_body = build_request_body(email)\n response = requests.post(sendgrid_url, headers=headers, data=email_body)\n print(response.text)\n return response\n", "step-4": "import os, json\nimport requests\nSENDGRID_API_KEY = os.environ.get('SENDGRID_API_KEY', default=None)\nFROM_EMAIL = os.environ.get('FROM_EMAIL', default=None)\nTO_EMAIL = os.environ.get('TO_EMAIL', default=None)\nif not SENDGRID_API_KEY:\n raise ValueError('Need to set Sendgrid API Key (SENDGRID_API_KEY)')\nif not FROM_EMAIL or not TO_EMAIL:\n raise ValueError('Need to set email info (FROM_EMAIL and TO_EMAIL')\nsendgrid_url = 'https://api.sendgrid.com/v3/mail/send'\n\n\ndef build_request_body(email):\n from_email = email['email']\n name = email['name']\n subject = email['subject']\n body = email['body']\n if not from_email:\n from_email = FROM_EMAIL\n if not name:\n name = 'Anonymous'\n if not subject:\n subject = 'Portfolio contact form message'\n req_body = json.dumps({'personalizations': [{'to': [{'email': TO_EMAIL}\n ], 'subject': subject}], 'from': {'email': from_email, 'name': name\n }, 'content': [{'type': 'text/plain', 'value': body}]})\n return req_body\n\n\ndef send_mail(email):\n headers = {'Authorization': f'Bearer {SENDGRID_API_KEY}',\n 'Content-Type': 'application/json'}\n email_body = build_request_body(email)\n response = requests.post(sendgrid_url, headers=headers, data=email_body)\n print(response.text)\n return response\n", "step-5": "#!flask/bin/python\nimport os, json\nimport requests\n\nSENDGRID_API_KEY = os.environ.get('SENDGRID_API_KEY', default=None)\nFROM_EMAIL = os.environ.get('FROM_EMAIL', default=None)\nTO_EMAIL = os.environ.get('TO_EMAIL', default=None)\n\nif not SENDGRID_API_KEY:\n raise ValueError(\"Need to set Sendgrid API Key (SENDGRID_API_KEY)\")\n\nif not FROM_EMAIL or not TO_EMAIL:\n raise ValueError(\"Need to set email info (FROM_EMAIL and TO_EMAIL\")\n\nsendgrid_url = 'https://api.sendgrid.com/v3/mail/send'\n\ndef build_request_body(email):\n from_email = email['email']\n name = email['name']\n subject = email['subject']\n body = email['body']\n if not from_email:\n from_email = FROM_EMAIL\n if not name:\n name = \"Anonymous\"\n if not subject:\n subject = \"Portfolio contact form message\"\n req_body = json.dumps({\n \"personalizations\": [\n {\n \"to\": [\n {\n \"email\": TO_EMAIL\n }\n ],\n \"subject\": subject\n }\n ],\n \"from\": {\n \"email\": from_email,\n \"name\": name\n },\n \"content\": [\n {\n \"type\": \"text/plain\",\n \"value\": body\n }\n ]\n })\n return req_body\n\ndef send_mail(email):\n headers = {\n \"Authorization\": f\"Bearer {SENDGRID_API_KEY}\",\n \"Content-Type\": \"application/json\"\n }\n email_body = build_request_body(email)\n response = requests.post(sendgrid_url, headers=headers, data=email_body)\n print(response.text)\n return response\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
from PIL import Image, ImageFilter import numpy as np import glob from numpy import array import matplotlib.pyplot as plt from skimage import morphology import scipy.ndimage def sample_stack(stack, rows=2, cols=2, start_with=0, show_every=1, display1 = True): if (display1): new_list = [] new_list.append(stack) new_list.append(stack) new_list.append(stack) new_list.append(stack) sample_stack(new_list, 2, 2, 0, 1, False) else: fig,ax = plt.subplots(rows,cols,figsize=[12,12]) for i in range((rows*cols)): ind = start_with + i*show_every ax[int(i/rows),int(i % rows)].set_title('slice %d' % ind) ax[int(i/rows),int(i % rows)].imshow(stack[ind],cmap='gray') ax[int(i/rows),int(i % rows)].axis('off') plt.show() """ datapath = "jpg_images/" img0 = Image.open("jpg_images/maskedimage" + str(0) + ".jpg") counter = 0 img1 = [] for f in glob.glob('/Users/paulmccabe/Desktop/jpg images/*.jpg'): path = "jpg_images/maskedimage" + str(counter) + ".jpg" img0 = Image.open(path).convert('L') img1.append(array(img0)) counter += 1 print("Counter: " + str(counter)) imgs_to_process_orig = np.stack([s for s in img1]) """ id = 2 imgs = np.load("/Users/paulmccabe/Desktop/Segmentation Project/" + "justmask_%d.npy" % (id)) counter = 0 print("Saving as jpg Images...") for img in imgs: scipy.misc.imsave('/Users/paulmccabe/Desktop/Segmentation Project' + '/jpg mask images/justmask{}.jpg'.format(counter), img) counter += 1 counter = 0 #print("Re-Importing jpg Images...") #for f in glob.glob('/Users/paulmccabe/Desktop/Segmentation Project/jpg mask images/*.jpg'): # path = "jpg_images/maskedimage" + str(counter) + ".jpg" # img0 = Image.open(path).convert('L') # img1.append(array(img0)) # counter += 1 imgs[imgs == 1] = 255 list = [] for img in imgs: PIL_img = Image.fromarray(img.astype('uint8')) PIL_edge = PIL_img.filter(ImageFilter.FIND_EDGES) np_img = array(PIL_edge) dilation = morphology.dilation(np_img, np.ones([4,4])) list.append(dilation) imgs_after_processing = np.stack([s for s in list]) np.save("/Users/paulmccabe/Desktop/Segmentation Project" + "/justedge_%d.npy" % (id), imgs_after_processing[:284]) #sample_stack(np_img)
normal
{ "blob_id": "371c1c9e3ccf7dae35d435bdb013e0462f3add5d", "index": 4831, "step-1": "<mask token>\n\n\ndef sample_stack(stack, rows=2, cols=2, start_with=0, show_every=1,\n display1=True):\n if display1:\n new_list = []\n new_list.append(stack)\n new_list.append(stack)\n new_list.append(stack)\n new_list.append(stack)\n sample_stack(new_list, 2, 2, 0, 1, False)\n else:\n fig, ax = plt.subplots(rows, cols, figsize=[12, 12])\n for i in range(rows * cols):\n ind = start_with + i * show_every\n ax[int(i / rows), int(i % rows)].set_title('slice %d' % ind)\n ax[int(i / rows), int(i % rows)].imshow(stack[ind], cmap='gray')\n ax[int(i / rows), int(i % rows)].axis('off')\n plt.show()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef sample_stack(stack, rows=2, cols=2, start_with=0, show_every=1,\n display1=True):\n if display1:\n new_list = []\n new_list.append(stack)\n new_list.append(stack)\n new_list.append(stack)\n new_list.append(stack)\n sample_stack(new_list, 2, 2, 0, 1, False)\n else:\n fig, ax = plt.subplots(rows, cols, figsize=[12, 12])\n for i in range(rows * cols):\n ind = start_with + i * show_every\n ax[int(i / rows), int(i % rows)].set_title('slice %d' % ind)\n ax[int(i / rows), int(i % rows)].imshow(stack[ind], cmap='gray')\n ax[int(i / rows), int(i % rows)].axis('off')\n plt.show()\n\n\n<mask token>\nprint('Saving as jpg Images...')\nfor img in imgs:\n scipy.misc.imsave('/Users/paulmccabe/Desktop/Segmentation Project' +\n '/jpg mask images/justmask{}.jpg'.format(counter), img)\n counter += 1\n<mask token>\nfor img in imgs:\n PIL_img = Image.fromarray(img.astype('uint8'))\n PIL_edge = PIL_img.filter(ImageFilter.FIND_EDGES)\n np_img = array(PIL_edge)\n dilation = morphology.dilation(np_img, np.ones([4, 4]))\n list.append(dilation)\n<mask token>\nnp.save('/Users/paulmccabe/Desktop/Segmentation Project' + \n '/justedge_%d.npy' % id, imgs_after_processing[:284])\n", "step-3": "<mask token>\n\n\ndef sample_stack(stack, rows=2, cols=2, start_with=0, show_every=1,\n display1=True):\n if display1:\n new_list = []\n new_list.append(stack)\n new_list.append(stack)\n new_list.append(stack)\n new_list.append(stack)\n sample_stack(new_list, 2, 2, 0, 1, False)\n else:\n fig, ax = plt.subplots(rows, cols, figsize=[12, 12])\n for i in range(rows * cols):\n ind = start_with + i * show_every\n ax[int(i / rows), int(i % rows)].set_title('slice %d' % ind)\n ax[int(i / rows), int(i % rows)].imshow(stack[ind], cmap='gray')\n ax[int(i / rows), int(i % rows)].axis('off')\n plt.show()\n\n\n<mask token>\nid = 2\nimgs = np.load('/Users/paulmccabe/Desktop/Segmentation Project/' + \n 'justmask_%d.npy' % id)\ncounter = 0\nprint('Saving as jpg Images...')\nfor img in imgs:\n scipy.misc.imsave('/Users/paulmccabe/Desktop/Segmentation Project' +\n '/jpg mask images/justmask{}.jpg'.format(counter), img)\n counter += 1\ncounter = 0\nimgs[imgs == 1] = 255\nlist = []\nfor img in imgs:\n PIL_img = Image.fromarray(img.astype('uint8'))\n PIL_edge = PIL_img.filter(ImageFilter.FIND_EDGES)\n np_img = array(PIL_edge)\n dilation = morphology.dilation(np_img, np.ones([4, 4]))\n list.append(dilation)\nimgs_after_processing = np.stack([s for s in list])\nnp.save('/Users/paulmccabe/Desktop/Segmentation Project' + \n '/justedge_%d.npy' % id, imgs_after_processing[:284])\n", "step-4": "from PIL import Image, ImageFilter\nimport numpy as np\nimport glob\nfrom numpy import array\nimport matplotlib.pyplot as plt\nfrom skimage import morphology\nimport scipy.ndimage\n\n\ndef sample_stack(stack, rows=2, cols=2, start_with=0, show_every=1,\n display1=True):\n if display1:\n new_list = []\n new_list.append(stack)\n new_list.append(stack)\n new_list.append(stack)\n new_list.append(stack)\n sample_stack(new_list, 2, 2, 0, 1, False)\n else:\n fig, ax = plt.subplots(rows, cols, figsize=[12, 12])\n for i in range(rows * cols):\n ind = start_with + i * show_every\n ax[int(i / rows), int(i % rows)].set_title('slice %d' % ind)\n ax[int(i / rows), int(i % rows)].imshow(stack[ind], cmap='gray')\n ax[int(i / rows), int(i % rows)].axis('off')\n plt.show()\n\n\n<mask token>\nid = 2\nimgs = np.load('/Users/paulmccabe/Desktop/Segmentation Project/' + \n 'justmask_%d.npy' % id)\ncounter = 0\nprint('Saving as jpg Images...')\nfor img in imgs:\n scipy.misc.imsave('/Users/paulmccabe/Desktop/Segmentation Project' +\n '/jpg mask images/justmask{}.jpg'.format(counter), img)\n counter += 1\ncounter = 0\nimgs[imgs == 1] = 255\nlist = []\nfor img in imgs:\n PIL_img = Image.fromarray(img.astype('uint8'))\n PIL_edge = PIL_img.filter(ImageFilter.FIND_EDGES)\n np_img = array(PIL_edge)\n dilation = morphology.dilation(np_img, np.ones([4, 4]))\n list.append(dilation)\nimgs_after_processing = np.stack([s for s in list])\nnp.save('/Users/paulmccabe/Desktop/Segmentation Project' + \n '/justedge_%d.npy' % id, imgs_after_processing[:284])\n", "step-5": "from PIL import Image, ImageFilter\nimport numpy as np\nimport glob\nfrom numpy import array\nimport matplotlib.pyplot as plt\nfrom skimage import morphology\nimport scipy.ndimage\n\ndef sample_stack(stack, rows=2, cols=2, start_with=0, show_every=1, display1 = True):\n if (display1):\n new_list = []\n new_list.append(stack)\n new_list.append(stack)\n new_list.append(stack)\n new_list.append(stack)\n sample_stack(new_list, 2, 2, 0, 1, False)\n else:\n fig,ax = plt.subplots(rows,cols,figsize=[12,12])\n for i in range((rows*cols)):\n ind = start_with + i*show_every\n ax[int(i/rows),int(i % rows)].set_title('slice %d' % ind)\n ax[int(i/rows),int(i % rows)].imshow(stack[ind],cmap='gray')\n ax[int(i/rows),int(i % rows)].axis('off')\n plt.show()\n\"\"\"\ndatapath = \"jpg_images/\"\nimg0 = Image.open(\"jpg_images/maskedimage\" + str(0) + \".jpg\")\ncounter = 0\nimg1 = []\nfor f in glob.glob('/Users/paulmccabe/Desktop/jpg images/*.jpg'):\n path = \"jpg_images/maskedimage\" + str(counter) + \".jpg\"\n img0 = Image.open(path).convert('L')\n img1.append(array(img0))\n counter += 1\nprint(\"Counter: \" + str(counter))\nimgs_to_process_orig = np.stack([s for s in img1])\n\"\"\"\nid = 2\n\nimgs = np.load(\"/Users/paulmccabe/Desktop/Segmentation Project/\" + \"justmask_%d.npy\" % (id))\ncounter = 0\nprint(\"Saving as jpg Images...\")\nfor img in imgs:\n scipy.misc.imsave('/Users/paulmccabe/Desktop/Segmentation Project' + '/jpg mask images/justmask{}.jpg'.format(counter), img)\n counter += 1\ncounter = 0\n#print(\"Re-Importing jpg Images...\")\n#for f in glob.glob('/Users/paulmccabe/Desktop/Segmentation Project/jpg mask images/*.jpg'):\n# path = \"jpg_images/maskedimage\" + str(counter) + \".jpg\"\n# img0 = Image.open(path).convert('L')\n# img1.append(array(img0))\n# counter += 1\nimgs[imgs == 1] = 255\nlist = []\nfor img in imgs:\n PIL_img = Image.fromarray(img.astype('uint8'))\n PIL_edge = PIL_img.filter(ImageFilter.FIND_EDGES)\n np_img = array(PIL_edge)\n dilation = morphology.dilation(np_img, np.ones([4,4]))\n list.append(dilation)\n\nimgs_after_processing = np.stack([s for s in list])\n\nnp.save(\"/Users/paulmccabe/Desktop/Segmentation Project\" + \"/justedge_%d.npy\" % (id), imgs_after_processing[:284])\n\n#sample_stack(np_img)", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
"""This module contains a class supporting composition of AugraphyPipelines""" class ComposePipelines: """The composition of multiple AugraphyPipelines. Define AugraphyPipelines elsewhere, then use this to compose them. ComposePipelines objects are callable on images (as numpy.ndarrays). :param pipelines: A list contains multiple augraphy.base.AugraphyPipeline. :type pipelines: list or tuple """ def __init__(self, pipelines): self.pipelines = pipelines def __call__(self, image): augmented_image = image.copy() newpipeline = dict() for i, pipeline in enumerate(self.pipelines): data_output = pipeline.augment(augmented_image) augmented_image = data_output["output"] for key in data_output.keys(): newkey = "pipeline" + str(i) + "-" + key newpipeline[newkey] = data_output[key] return newpipeline
normal
{ "blob_id": "13c55c313c740edce48fc979e8956fdd018e8aab", "index": 9716, "step-1": "<mask token>\n\n\nclass ComposePipelines:\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass ComposePipelines:\n <mask token>\n <mask token>\n\n def __call__(self, image):\n augmented_image = image.copy()\n newpipeline = dict()\n for i, pipeline in enumerate(self.pipelines):\n data_output = pipeline.augment(augmented_image)\n augmented_image = data_output['output']\n for key in data_output.keys():\n newkey = 'pipeline' + str(i) + '-' + key\n newpipeline[newkey] = data_output[key]\n return newpipeline\n", "step-3": "<mask token>\n\n\nclass ComposePipelines:\n <mask token>\n\n def __init__(self, pipelines):\n self.pipelines = pipelines\n\n def __call__(self, image):\n augmented_image = image.copy()\n newpipeline = dict()\n for i, pipeline in enumerate(self.pipelines):\n data_output = pipeline.augment(augmented_image)\n augmented_image = data_output['output']\n for key in data_output.keys():\n newkey = 'pipeline' + str(i) + '-' + key\n newpipeline[newkey] = data_output[key]\n return newpipeline\n", "step-4": "<mask token>\n\n\nclass ComposePipelines:\n \"\"\"The composition of multiple AugraphyPipelines.\n Define AugraphyPipelines elsewhere, then use this to compose them.\n ComposePipelines objects are callable on images (as numpy.ndarrays).\n\n :param pipelines: A list contains multiple augraphy.base.AugraphyPipeline.\n :type pipelines: list or tuple\n \"\"\"\n\n def __init__(self, pipelines):\n self.pipelines = pipelines\n\n def __call__(self, image):\n augmented_image = image.copy()\n newpipeline = dict()\n for i, pipeline in enumerate(self.pipelines):\n data_output = pipeline.augment(augmented_image)\n augmented_image = data_output['output']\n for key in data_output.keys():\n newkey = 'pipeline' + str(i) + '-' + key\n newpipeline[newkey] = data_output[key]\n return newpipeline\n", "step-5": "\"\"\"This module contains a class supporting composition of AugraphyPipelines\"\"\"\n\n\nclass ComposePipelines:\n \"\"\"The composition of multiple AugraphyPipelines.\n Define AugraphyPipelines elsewhere, then use this to compose them.\n ComposePipelines objects are callable on images (as numpy.ndarrays).\n\n :param pipelines: A list contains multiple augraphy.base.AugraphyPipeline.\n :type pipelines: list or tuple\n \"\"\"\n\n def __init__(self, pipelines):\n self.pipelines = pipelines\n\n def __call__(self, image):\n\n augmented_image = image.copy()\n newpipeline = dict()\n\n for i, pipeline in enumerate(self.pipelines):\n data_output = pipeline.augment(augmented_image)\n augmented_image = data_output[\"output\"]\n\n for key in data_output.keys():\n newkey = \"pipeline\" + str(i) + \"-\" + key\n newpipeline[newkey] = data_output[key]\n\n return newpipeline\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
#! /usr/bin/python3 from scapy.all import * import sys ip=IP(src=sys.argv[1], dst=sys.argv[2]) syn_packet = TCP(sport=52255, dport=1237, flags="S", seq=100, options=[('MSS',689),('WScale',1)]) synack_packet = sr1(ip/syn_packet) my_ack = synack_packet.seq+1 ack_packet = TCP(sport=52255, dport=1237, flags="A", seq=101, ack=my_ack) send(ip/ack_packet)
normal
{ "blob_id": "acd6197e60cf59ffcaa33bb50a60a03592bb3559", "index": 7169, "step-1": "<mask token>\n", "step-2": "<mask token>\nsend(ip / ack_packet)\n", "step-3": "<mask token>\nip = IP(src=sys.argv[1], dst=sys.argv[2])\nsyn_packet = TCP(sport=52255, dport=1237, flags='S', seq=100, options=[(\n 'MSS', 689), ('WScale', 1)])\nsynack_packet = sr1(ip / syn_packet)\nmy_ack = synack_packet.seq + 1\nack_packet = TCP(sport=52255, dport=1237, flags='A', seq=101, ack=my_ack)\nsend(ip / ack_packet)\n", "step-4": "from scapy.all import *\nimport sys\nip = IP(src=sys.argv[1], dst=sys.argv[2])\nsyn_packet = TCP(sport=52255, dport=1237, flags='S', seq=100, options=[(\n 'MSS', 689), ('WScale', 1)])\nsynack_packet = sr1(ip / syn_packet)\nmy_ack = synack_packet.seq + 1\nack_packet = TCP(sport=52255, dport=1237, flags='A', seq=101, ack=my_ack)\nsend(ip / ack_packet)\n", "step-5": "#! /usr/bin/python3\n\nfrom scapy.all import *\nimport sys\n\nip=IP(src=sys.argv[1], dst=sys.argv[2])\nsyn_packet = TCP(sport=52255, dport=1237, flags=\"S\", seq=100, options=[('MSS',689),('WScale',1)])\nsynack_packet = sr1(ip/syn_packet)\nmy_ack = synack_packet.seq+1\nack_packet = TCP(sport=52255, dport=1237, flags=\"A\", seq=101, ack=my_ack)\nsend(ip/ack_packet)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ This is the ES library for Apache Kibble. It stores the elasticsearch handler and config options. """ import elasticsearch from kibble.configuration import KibbleConfigParser class KibbleESWrapper(object): """ Class for rewriting old-style queries to the new ones, where doc_type is an integral part of the DB name """ def __init__(self, ES): self.ES = ES def get(self, index, doc_type, id): return self.ES.get(index=index + "_" + doc_type, doc_type="_doc", id=id) def exists(self, index, doc_type, id): return self.ES.exists(index=index + "_" + doc_type, doc_type="_doc", id=id) def delete(self, index, doc_type, id): return self.ES.delete(index=index + "_" + doc_type, doc_type="_doc", id=id) def index(self, index, doc_type, id, body): return self.ES.index( index=index + "_" + doc_type, doc_type="_doc", id=id, body=body ) def update(self, index, doc_type, id, body): return self.ES.update( index=index + "_" + doc_type, doc_type="_doc", id=id, body=body ) def scroll(self, scroll_id, scroll): return self.ES.scroll(scroll_id=scroll_id, scroll=scroll) def delete_by_query(self, **kwargs): return self.ES.delete_by_query(**kwargs) def search( self, index, doc_type, size=100, scroll=None, _source_include=None, body=None ): return self.ES.search( index=index + "_" + doc_type, doc_type="_doc", size=size, scroll=scroll, _source_include=_source_include, body=body, ) def count(self, index, doc_type="*", body=None): return self.ES.count(index=index + "_" + doc_type, doc_type="_doc", body=body) class KibbleESWrapperSeven(object): """ Class for rewriting old-style queries to the >= 7.x ones, where doc_type is an integral part of the DB name and NO DOC_TYPE! """ def __init__(self, ES): self.ES = ES def get(self, index, doc_type, id): return self.ES.get(index=index + "_" + doc_type, id=id) def exists(self, index, doc_type, id): return self.ES.exists(index=index + "_" + doc_type, id=id) def delete(self, index, doc_type, id): return self.ES.delete(index=index + "_" + doc_type, id=id) def index(self, index, doc_type, id, body): return self.ES.index(index=index + "_" + doc_type, id=id, body=body) def update(self, index, doc_type, id, body): return self.ES.update(index=index + "_" + doc_type, id=id, body=body) def scroll(self, scroll_id, scroll): return self.ES.scroll(scroll_id=scroll_id, scroll=scroll) def delete_by_query(self, **kwargs): return self.ES.delete_by_query(**kwargs) def search( self, index, doc_type, size=100, scroll=None, _source_include=None, body=None ): return self.ES.search( index=index + "_" + doc_type, size=size, scroll=scroll, _source_includes=_source_include, body=body, ) def count(self, index, doc_type="*", body=None): return self.ES.count(index=index + "_" + doc_type, body=body) class KibbleDatabase(object): def __init__(self, config: KibbleConfigParser): self.config = config self.dbname = config.get("elasticsearch", "dbname") self.ES = elasticsearch.Elasticsearch( [config.get("elasticsearch", "conn_uri")], use_ssl=config.getboolean("elasticsearch", "ssl"), verify_certs=False, max_retries=5, retry_on_timeout=True, ) # IMPORTANT BIT: Figure out if this is ES < 6.x, 6.x or >= 7.x. # If so, we're using the new ES DB mappings, and need to adjust ALL # ES calls to match this. self.ESversion = int(self.ES.info()["version"]["number"].split(".")[0]) if self.ESversion >= 7: self.ES = KibbleESWrapperSeven(self.ES) elif self.ESversion >= 6: self.ES = KibbleESWrapper(self.ES)
normal
{ "blob_id": "f4b704a1416bfd6524340a68a20981957abf4340", "index": 9850, "step-1": "<mask token>\n\n\nclass KibbleESWrapper(object):\n <mask token>\n\n def __init__(self, ES):\n self.ES = ES\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n <mask token>\n <mask token>\n <mask token>\n\n\nclass KibbleESWrapperSeven(object):\n \"\"\"\n Class for rewriting old-style queries to the >= 7.x ones,\n where doc_type is an integral part of the DB name and NO DOC_TYPE!\n \"\"\"\n\n def __init__(self, ES):\n self.ES = ES\n\n def get(self, index, doc_type, id):\n return self.ES.get(index=index + '_' + doc_type, id=id)\n\n def exists(self, index, doc_type, id):\n return self.ES.exists(index=index + '_' + doc_type, id=id)\n\n def delete(self, index, doc_type, id):\n return self.ES.delete(index=index + '_' + doc_type, id=id)\n\n def index(self, index, doc_type, id, body):\n return self.ES.index(index=index + '_' + doc_type, id=id, body=body)\n\n def update(self, index, doc_type, id, body):\n return self.ES.update(index=index + '_' + doc_type, id=id, body=body)\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n\n def delete_by_query(self, **kwargs):\n return self.ES.delete_by_query(**kwargs)\n\n def search(self, index, doc_type, size=100, scroll=None,\n _source_include=None, body=None):\n return self.ES.search(index=index + '_' + doc_type, size=size,\n scroll=scroll, _source_includes=_source_include, body=body)\n\n def count(self, index, doc_type='*', body=None):\n return self.ES.count(index=index + '_' + doc_type, body=body)\n\n\nclass KibbleDatabase(object):\n\n def __init__(self, config: KibbleConfigParser):\n self.config = config\n self.dbname = config.get('elasticsearch', 'dbname')\n self.ES = elasticsearch.Elasticsearch([config.get('elasticsearch',\n 'conn_uri')], use_ssl=config.getboolean('elasticsearch', 'ssl'),\n verify_certs=False, max_retries=5, retry_on_timeout=True)\n self.ESversion = int(self.ES.info()['version']['number'].split('.')[0])\n if self.ESversion >= 7:\n self.ES = KibbleESWrapperSeven(self.ES)\n elif self.ESversion >= 6:\n self.ES = KibbleESWrapper(self.ES)\n", "step-2": "<mask token>\n\n\nclass KibbleESWrapper(object):\n <mask token>\n\n def __init__(self, ES):\n self.ES = ES\n <mask token>\n <mask token>\n <mask token>\n\n def index(self, index, doc_type, id, body):\n return self.ES.index(index=index + '_' + doc_type, doc_type='_doc',\n id=id, body=body)\n <mask token>\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n <mask token>\n <mask token>\n\n def count(self, index, doc_type='*', body=None):\n return self.ES.count(index=index + '_' + doc_type, doc_type='_doc',\n body=body)\n\n\nclass KibbleESWrapperSeven(object):\n \"\"\"\n Class for rewriting old-style queries to the >= 7.x ones,\n where doc_type is an integral part of the DB name and NO DOC_TYPE!\n \"\"\"\n\n def __init__(self, ES):\n self.ES = ES\n\n def get(self, index, doc_type, id):\n return self.ES.get(index=index + '_' + doc_type, id=id)\n\n def exists(self, index, doc_type, id):\n return self.ES.exists(index=index + '_' + doc_type, id=id)\n\n def delete(self, index, doc_type, id):\n return self.ES.delete(index=index + '_' + doc_type, id=id)\n\n def index(self, index, doc_type, id, body):\n return self.ES.index(index=index + '_' + doc_type, id=id, body=body)\n\n def update(self, index, doc_type, id, body):\n return self.ES.update(index=index + '_' + doc_type, id=id, body=body)\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n\n def delete_by_query(self, **kwargs):\n return self.ES.delete_by_query(**kwargs)\n\n def search(self, index, doc_type, size=100, scroll=None,\n _source_include=None, body=None):\n return self.ES.search(index=index + '_' + doc_type, size=size,\n scroll=scroll, _source_includes=_source_include, body=body)\n\n def count(self, index, doc_type='*', body=None):\n return self.ES.count(index=index + '_' + doc_type, body=body)\n\n\nclass KibbleDatabase(object):\n\n def __init__(self, config: KibbleConfigParser):\n self.config = config\n self.dbname = config.get('elasticsearch', 'dbname')\n self.ES = elasticsearch.Elasticsearch([config.get('elasticsearch',\n 'conn_uri')], use_ssl=config.getboolean('elasticsearch', 'ssl'),\n verify_certs=False, max_retries=5, retry_on_timeout=True)\n self.ESversion = int(self.ES.info()['version']['number'].split('.')[0])\n if self.ESversion >= 7:\n self.ES = KibbleESWrapperSeven(self.ES)\n elif self.ESversion >= 6:\n self.ES = KibbleESWrapper(self.ES)\n", "step-3": "<mask token>\n\n\nclass KibbleESWrapper(object):\n <mask token>\n\n def __init__(self, ES):\n self.ES = ES\n\n def get(self, index, doc_type, id):\n return self.ES.get(index=index + '_' + doc_type, doc_type='_doc', id=id\n )\n\n def exists(self, index, doc_type, id):\n return self.ES.exists(index=index + '_' + doc_type, doc_type='_doc',\n id=id)\n <mask token>\n\n def index(self, index, doc_type, id, body):\n return self.ES.index(index=index + '_' + doc_type, doc_type='_doc',\n id=id, body=body)\n\n def update(self, index, doc_type, id, body):\n return self.ES.update(index=index + '_' + doc_type, doc_type='_doc',\n id=id, body=body)\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n <mask token>\n\n def search(self, index, doc_type, size=100, scroll=None,\n _source_include=None, body=None):\n return self.ES.search(index=index + '_' + doc_type, doc_type='_doc',\n size=size, scroll=scroll, _source_include=_source_include, body\n =body)\n\n def count(self, index, doc_type='*', body=None):\n return self.ES.count(index=index + '_' + doc_type, doc_type='_doc',\n body=body)\n\n\nclass KibbleESWrapperSeven(object):\n \"\"\"\n Class for rewriting old-style queries to the >= 7.x ones,\n where doc_type is an integral part of the DB name and NO DOC_TYPE!\n \"\"\"\n\n def __init__(self, ES):\n self.ES = ES\n\n def get(self, index, doc_type, id):\n return self.ES.get(index=index + '_' + doc_type, id=id)\n\n def exists(self, index, doc_type, id):\n return self.ES.exists(index=index + '_' + doc_type, id=id)\n\n def delete(self, index, doc_type, id):\n return self.ES.delete(index=index + '_' + doc_type, id=id)\n\n def index(self, index, doc_type, id, body):\n return self.ES.index(index=index + '_' + doc_type, id=id, body=body)\n\n def update(self, index, doc_type, id, body):\n return self.ES.update(index=index + '_' + doc_type, id=id, body=body)\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n\n def delete_by_query(self, **kwargs):\n return self.ES.delete_by_query(**kwargs)\n\n def search(self, index, doc_type, size=100, scroll=None,\n _source_include=None, body=None):\n return self.ES.search(index=index + '_' + doc_type, size=size,\n scroll=scroll, _source_includes=_source_include, body=body)\n\n def count(self, index, doc_type='*', body=None):\n return self.ES.count(index=index + '_' + doc_type, body=body)\n\n\nclass KibbleDatabase(object):\n\n def __init__(self, config: KibbleConfigParser):\n self.config = config\n self.dbname = config.get('elasticsearch', 'dbname')\n self.ES = elasticsearch.Elasticsearch([config.get('elasticsearch',\n 'conn_uri')], use_ssl=config.getboolean('elasticsearch', 'ssl'),\n verify_certs=False, max_retries=5, retry_on_timeout=True)\n self.ESversion = int(self.ES.info()['version']['number'].split('.')[0])\n if self.ESversion >= 7:\n self.ES = KibbleESWrapperSeven(self.ES)\n elif self.ESversion >= 6:\n self.ES = KibbleESWrapper(self.ES)\n", "step-4": "<mask token>\n\n\nclass KibbleESWrapper(object):\n <mask token>\n\n def __init__(self, ES):\n self.ES = ES\n\n def get(self, index, doc_type, id):\n return self.ES.get(index=index + '_' + doc_type, doc_type='_doc', id=id\n )\n\n def exists(self, index, doc_type, id):\n return self.ES.exists(index=index + '_' + doc_type, doc_type='_doc',\n id=id)\n\n def delete(self, index, doc_type, id):\n return self.ES.delete(index=index + '_' + doc_type, doc_type='_doc',\n id=id)\n\n def index(self, index, doc_type, id, body):\n return self.ES.index(index=index + '_' + doc_type, doc_type='_doc',\n id=id, body=body)\n\n def update(self, index, doc_type, id, body):\n return self.ES.update(index=index + '_' + doc_type, doc_type='_doc',\n id=id, body=body)\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n\n def delete_by_query(self, **kwargs):\n return self.ES.delete_by_query(**kwargs)\n\n def search(self, index, doc_type, size=100, scroll=None,\n _source_include=None, body=None):\n return self.ES.search(index=index + '_' + doc_type, doc_type='_doc',\n size=size, scroll=scroll, _source_include=_source_include, body\n =body)\n\n def count(self, index, doc_type='*', body=None):\n return self.ES.count(index=index + '_' + doc_type, doc_type='_doc',\n body=body)\n\n\nclass KibbleESWrapperSeven(object):\n \"\"\"\n Class for rewriting old-style queries to the >= 7.x ones,\n where doc_type is an integral part of the DB name and NO DOC_TYPE!\n \"\"\"\n\n def __init__(self, ES):\n self.ES = ES\n\n def get(self, index, doc_type, id):\n return self.ES.get(index=index + '_' + doc_type, id=id)\n\n def exists(self, index, doc_type, id):\n return self.ES.exists(index=index + '_' + doc_type, id=id)\n\n def delete(self, index, doc_type, id):\n return self.ES.delete(index=index + '_' + doc_type, id=id)\n\n def index(self, index, doc_type, id, body):\n return self.ES.index(index=index + '_' + doc_type, id=id, body=body)\n\n def update(self, index, doc_type, id, body):\n return self.ES.update(index=index + '_' + doc_type, id=id, body=body)\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n\n def delete_by_query(self, **kwargs):\n return self.ES.delete_by_query(**kwargs)\n\n def search(self, index, doc_type, size=100, scroll=None,\n _source_include=None, body=None):\n return self.ES.search(index=index + '_' + doc_type, size=size,\n scroll=scroll, _source_includes=_source_include, body=body)\n\n def count(self, index, doc_type='*', body=None):\n return self.ES.count(index=index + '_' + doc_type, body=body)\n\n\nclass KibbleDatabase(object):\n\n def __init__(self, config: KibbleConfigParser):\n self.config = config\n self.dbname = config.get('elasticsearch', 'dbname')\n self.ES = elasticsearch.Elasticsearch([config.get('elasticsearch',\n 'conn_uri')], use_ssl=config.getboolean('elasticsearch', 'ssl'),\n verify_certs=False, max_retries=5, retry_on_timeout=True)\n self.ESversion = int(self.ES.info()['version']['number'].split('.')[0])\n if self.ESversion >= 7:\n self.ES = KibbleESWrapperSeven(self.ES)\n elif self.ESversion >= 6:\n self.ES = KibbleESWrapper(self.ES)\n", "step-5": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nThis is the ES library for Apache Kibble.\nIt stores the elasticsearch handler and config options.\n\"\"\"\n\nimport elasticsearch\n\nfrom kibble.configuration import KibbleConfigParser\n\n\nclass KibbleESWrapper(object):\n \"\"\"\n Class for rewriting old-style queries to the new ones,\n where doc_type is an integral part of the DB name\n \"\"\"\n\n def __init__(self, ES):\n self.ES = ES\n\n def get(self, index, doc_type, id):\n return self.ES.get(index=index + \"_\" + doc_type, doc_type=\"_doc\", id=id)\n\n def exists(self, index, doc_type, id):\n return self.ES.exists(index=index + \"_\" + doc_type, doc_type=\"_doc\", id=id)\n\n def delete(self, index, doc_type, id):\n return self.ES.delete(index=index + \"_\" + doc_type, doc_type=\"_doc\", id=id)\n\n def index(self, index, doc_type, id, body):\n return self.ES.index(\n index=index + \"_\" + doc_type, doc_type=\"_doc\", id=id, body=body\n )\n\n def update(self, index, doc_type, id, body):\n return self.ES.update(\n index=index + \"_\" + doc_type, doc_type=\"_doc\", id=id, body=body\n )\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n\n def delete_by_query(self, **kwargs):\n return self.ES.delete_by_query(**kwargs)\n\n def search(\n self, index, doc_type, size=100, scroll=None, _source_include=None, body=None\n ):\n return self.ES.search(\n index=index + \"_\" + doc_type,\n doc_type=\"_doc\",\n size=size,\n scroll=scroll,\n _source_include=_source_include,\n body=body,\n )\n\n def count(self, index, doc_type=\"*\", body=None):\n return self.ES.count(index=index + \"_\" + doc_type, doc_type=\"_doc\", body=body)\n\n\nclass KibbleESWrapperSeven(object):\n \"\"\"\n Class for rewriting old-style queries to the >= 7.x ones,\n where doc_type is an integral part of the DB name and NO DOC_TYPE!\n \"\"\"\n\n def __init__(self, ES):\n self.ES = ES\n\n def get(self, index, doc_type, id):\n return self.ES.get(index=index + \"_\" + doc_type, id=id)\n\n def exists(self, index, doc_type, id):\n return self.ES.exists(index=index + \"_\" + doc_type, id=id)\n\n def delete(self, index, doc_type, id):\n return self.ES.delete(index=index + \"_\" + doc_type, id=id)\n\n def index(self, index, doc_type, id, body):\n return self.ES.index(index=index + \"_\" + doc_type, id=id, body=body)\n\n def update(self, index, doc_type, id, body):\n return self.ES.update(index=index + \"_\" + doc_type, id=id, body=body)\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n\n def delete_by_query(self, **kwargs):\n return self.ES.delete_by_query(**kwargs)\n\n def search(\n self, index, doc_type, size=100, scroll=None, _source_include=None, body=None\n ):\n return self.ES.search(\n index=index + \"_\" + doc_type,\n size=size,\n scroll=scroll,\n _source_includes=_source_include,\n body=body,\n )\n\n def count(self, index, doc_type=\"*\", body=None):\n return self.ES.count(index=index + \"_\" + doc_type, body=body)\n\n\nclass KibbleDatabase(object):\n def __init__(self, config: KibbleConfigParser):\n self.config = config\n self.dbname = config.get(\"elasticsearch\", \"dbname\")\n self.ES = elasticsearch.Elasticsearch(\n [config.get(\"elasticsearch\", \"conn_uri\")],\n use_ssl=config.getboolean(\"elasticsearch\", \"ssl\"),\n verify_certs=False,\n max_retries=5,\n retry_on_timeout=True,\n )\n\n # IMPORTANT BIT: Figure out if this is ES < 6.x, 6.x or >= 7.x.\n # If so, we're using the new ES DB mappings, and need to adjust ALL\n # ES calls to match this.\n self.ESversion = int(self.ES.info()[\"version\"][\"number\"].split(\".\")[0])\n if self.ESversion >= 7:\n self.ES = KibbleESWrapperSeven(self.ES)\n elif self.ESversion >= 6:\n self.ES = KibbleESWrapper(self.ES)\n", "step-ids": [ 17, 19, 23, 25, 28 ] }
[ 17, 19, 23, 25, 28 ]
cijferICOR = float(input('Wat is je cijfer voor ICOR?: ')) x = 30 beloningICOR = cijferICOR * x beloning = 'beloning €' print(beloning, beloningICOR) cijferPROG = float(input('Wat is je cijfer voor PROG: ')) beloningPROG = cijferPROG * x print(beloning, beloningPROG) cijferCSN = float(input('Wat is je cijfer voor CSN?: ')) beloningCSN = cijferCSN * x print(beloning, beloningCSN) gemiddelde = beloningICOR + beloningPROG + beloningCSN print('de gemiddelde beloning is:€ ', gemiddelde / 3) totalevergoeding = beloningICOR + beloningPROG + beloningCSN print('uw totale vergoeding is:€ ', totalevergoeding) gemiddeld_cijfer = (cijferICOR + cijferPROG + cijferCSN) / 3 print('mijn cijfers gemiddeld is een', gemiddeld_cijfer, 'en dat levert een beloning op van: €', totalevergoeding)
normal
{ "blob_id": "74bca94cbcba0851e13d855c02fbc13fb0b09e6a", "index": 4263, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(beloning, beloningICOR)\n<mask token>\nprint(beloning, beloningPROG)\n<mask token>\nprint(beloning, beloningCSN)\n<mask token>\nprint('de gemiddelde beloning is:€ ', gemiddelde / 3)\n<mask token>\nprint('uw totale vergoeding is:€ ', totalevergoeding)\n<mask token>\nprint('mijn cijfers gemiddeld is een', gemiddeld_cijfer,\n 'en dat levert een beloning op van: €', totalevergoeding)\n", "step-3": "cijferICOR = float(input('Wat is je cijfer voor ICOR?: '))\nx = 30\nbeloningICOR = cijferICOR * x\nbeloning = 'beloning €'\nprint(beloning, beloningICOR)\ncijferPROG = float(input('Wat is je cijfer voor PROG: '))\nbeloningPROG = cijferPROG * x\nprint(beloning, beloningPROG)\ncijferCSN = float(input('Wat is je cijfer voor CSN?: '))\nbeloningCSN = cijferCSN * x\nprint(beloning, beloningCSN)\ngemiddelde = beloningICOR + beloningPROG + beloningCSN\nprint('de gemiddelde beloning is:€ ', gemiddelde / 3)\ntotalevergoeding = beloningICOR + beloningPROG + beloningCSN\nprint('uw totale vergoeding is:€ ', totalevergoeding)\ngemiddeld_cijfer = (cijferICOR + cijferPROG + cijferCSN) / 3\nprint('mijn cijfers gemiddeld is een', gemiddeld_cijfer,\n 'en dat levert een beloning op van: €', totalevergoeding)\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
def has23(nums): this = nums[0] == 2 or nums[0] == 3 that = nums[1] == 2 or nums[1] == 3 return this or that
normal
{ "blob_id": "174c4c1ed7f2197e012644999cf23f5e82f4b7c3", "index": 3148, "step-1": "<mask token>\n", "step-2": "def has23(nums):\n this = nums[0] == 2 or nums[0] == 3\n that = nums[1] == 2 or nums[1] == 3\n return this or that\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
n,k = map(int,raw_input().split()) nums = list(map(int,raw_input().split())) if k==1: print min(nums) elif k==2: print max(nums[0],nums[-1]) else: print max(nums)
normal
{ "blob_id": "041a5bf205c1b3b3029623aa93835e99104464b2", "index": 2361, "step-1": "n,k = map(int,raw_input().split())\nnums = list(map(int,raw_input().split()))\nif k==1:\n print min(nums)\nelif k==2:\n print max(nums[0],nums[-1])\nelse:\n print max(nums)\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
# Представлен список чисел. # Необходимо вывести элементы исходного списка, # значения которых больше предыдущего элемента. from random import randint list = [] y = int(input("Введите количество элементов в списке>>> ")) for i in range(0, y): list.append(randint(1, 10)) new = [el for num, el in enumerate(list) if list[num - 1] < list[num]] print(f"Исходный список: {list}") print(f"Новый список список: {new}")
normal
{ "blob_id": "bfc4f5e90b7c22a29d33ae9b4a5edfb6086d79f4", "index": 2344, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in range(0, y):\n list.append(randint(1, 10))\n<mask token>\nprint(f'Исходный список: {list}')\nprint(f'Новый список список: {new}')\n", "step-3": "<mask token>\nlist = []\ny = int(input('Введите количество элементов в списке>>> '))\nfor i in range(0, y):\n list.append(randint(1, 10))\nnew = [el for num, el in enumerate(list) if list[num - 1] < list[num]]\nprint(f'Исходный список: {list}')\nprint(f'Новый список список: {new}')\n", "step-4": "from random import randint\nlist = []\ny = int(input('Введите количество элементов в списке>>> '))\nfor i in range(0, y):\n list.append(randint(1, 10))\nnew = [el for num, el in enumerate(list) if list[num - 1] < list[num]]\nprint(f'Исходный список: {list}')\nprint(f'Новый список список: {new}')\n", "step-5": "# Представлен список чисел.\n# Необходимо вывести элементы исходного списка,\n# значения которых больше предыдущего элемента.\nfrom random import randint\n\nlist = []\ny = int(input(\"Введите количество элементов в списке>>> \"))\nfor i in range(0, y):\n list.append(randint(1, 10))\n\nnew = [el for num, el in enumerate(list) if list[num - 1] < list[num]]\nprint(f\"Исходный список: {list}\")\nprint(f\"Новый список список: {new}\")\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
""" Process pair-end reads of barcode-guide-donor Step 1 cassette to generate a library reference table mapping barcodes to features. Create dictionaries mapping barcodes to forward and reverse reads, split into sub-segments. R1_dict: map barcodes to corresponding R1 sequences. R2_dict: map barcodes to corresponding R2 sequences. read_count_dict: map each barcode to corresponding total number of reads. """ from collections import Counter import argparse import gzip import numpy as np import pickle parser = argparse.ArgumentParser() parser.add_argument('-f', '-forward', required=True, help="forward sequencing files", nargs='+', action='store', dest='forward_files') parser.add_argument('-r', '-reverse', required=True, help="reverse sequencing files", nargs='+', action='store', dest='reverse_files') parser.add_argument('-s', '-segments', required=True, help="number of segments to split job into", action='store', dest='total_segments') parser.add_argument('-o', '-out', required=True, help="keyword for saving output files", action='store', dest='out') parser.add_argument('-c', '-cutoff', required=False, default=0, help="read count cutoff for barcodes to keep (default=0)", action='store', dest='cutoff') parser.add_argument('-b', '-barcode', required=False, default=31, help="length of barcode (default=31)", action='store', dest='barcode_length') parser.add_argument('-bq', '-bquality', required=False, default=53, help="ascii quality score cutoff for barcode (default=53)", action='store', dest='barcode_quality') parser.add_argument('-gdq', '-gdquality', required=False, default=55, help="ascii quality score cutoff for guide-donor (default=55)", action='store', dest='guide_donor_quality') args = parser.parse_args() OUTPUT_HEADER = args.out READ_COUNT_CUTOFF = int(args.cutoff) BARCODE_LENGTH = int(args.barcode_length) BARCODE_QUALITY_CUTOFF = int(args.barcode_quality) GUIDE_DONOR_QUALITY_CUTOFF = int(args.guide_donor_quality) # Collect all sequencing reads from forward files. forward_lines = [] for file in args.forward_files: forward_lines.extend(gzip.open(file).readlines()) # Forward sequence. forward_sequence = [forward_lines[r] for r in range(1, len(forward_lines), 4)] forward_sequence = [l.decode('utf-8').replace("\n","") for l in forward_sequence] # Forward sequence quality scores. forward_quality = [forward_lines[r] for r in range(3, len(forward_lines), 4)] forward_quality = [l.decode('utf-8').replace("\n","") for l in forward_quality] barcode_quality_scores = [] # Barcode quality. for line in forward_quality: scores = [ord(i) for i in line[:BARCODE_LENGTH]] barcode_quality_scores.append(np.mean(scores)) forward_guide_donor_quality_scores = [] # Guide-donor quality. for line in forward_quality: scores = [ord(i) for i in line[BARCODE_LENGTH:]] forward_guide_donor_quality_scores.append(np.mean(scores)) # Collect all sequencing reads from reverse files. reverse_lines = [] for file in args.reverse_files: reverse_lines.extend(gzip.open(file).readlines()) # Reverse sequence. reverse_sequence = [reverse_lines[r] for r in range(1, len(reverse_lines), 4)] reverse_sequence = [l.decode('utf-8').replace("\n","") for l in reverse_sequence] # Reverse sequence base quality scores. reverse_quality = [reverse_lines[r] for r in range(3, len(reverse_lines), 4)] reverse_quality = [l.decode('utf-8').replace("\n","") for l in reverse_quality] reverse_guide_donor_quality_scores = [] for line in reverse_quality: scores = [ord(i) for i in line] reverse_guide_donor_quality_scores.append(np.mean(scores)) # Filter out low quality barcodes and low quality guide-donor sequences. forward_sequence, reverse_sequence, barcodes = zip(*[(f, r, f[:BARCODE_LENGTH]) for f, r, fscore, fscore2, rscore in zip(forward_sequence, reverse_sequence, barcode_quality_scores, forward_guide_donor_quality_scores, reverse_guide_donor_quality_scores) if (fscore >= BARCODE_QUALITY_CUTOFF) and (fscore2 >= GUIDE_DONOR_QUALITY_CUTOFF) and (rscore >= GUIDE_DONOR_QUALITY_CUTOFF)]) if (READ_COUNT_CUTOFF != 0): # optional choice to remove low read barcodes from annotations. barcodes_to_keep = [key for key, count in Counter(barcodes).items() if count >= READ_COUNT_CUTOFF] keep_dict = {g: True for g in barcodes_to_keep} forward_sequence, reverse_sequence, barcodes = zip(*[(f, r, b) for f, r, b in zip(forward_sequence, reverse_sequence, barcodes) if b in keep_dict]) # Store barcode read count dictionary for later use. count_dict = dict(Counter(barcodes)) pickle_out = open(OUTPUT_HEADER + ".read_count_dict", "wb") pickle.dump(count_dict, pickle_out, protocol=2) pickle_out.close() # Divide up barcodes into specified number of segments for parallel analysis. LENGTH = len(set(barcodes)) total_segments = int(args.total_segments) barcode_list = list(set(barcodes)) for segment in range(0, total_segments): start = int((LENGTH/total_segments)*segment) # determine start and end position of segment. if (segment+1 == total_segments): sub_barcodes_set = barcode_list[start:] else: stop = int((LENGTH/total_segments)*(segment+1)) sub_barcodes_set = barcode_list[start:stop] sub_barcodes_dict = {b: True for b in sub_barcodes_set} sub_forward, sub_reverse, sub_barcodes = zip(*[(f, r, b) for f, r, b in zip(forward_sequence, reverse_sequence, barcodes) if b in sub_barcodes_dict]) R1_dict, R2_dict = {}, {} # store reads by barcode into R1 and R2 dictionaries. for f, r, b in zip(sub_forward, sub_reverse, sub_barcodes): if (b not in R1_dict) and (b not in R2_dict): R1_dict[b] = [f] R2_dict[b] = [r] else: R1_dict[b].append(f) R2_dict[b].append(r) pickle_out = open(OUTPUT_HEADER + "_" + str(segment) + "-" + str(total_segments) + ".R1_dict", "wb") pickle.dump(R1_dict, pickle_out, protocol=2) pickle_out.close() pickle_out = open(OUTPUT_HEADER + "_" + str(segment) + "-" + str(total_segments) + ".R2_dict", "wb") pickle.dump(R2_dict, pickle_out, protocol=2) pickle_out.close()
normal
{ "blob_id": "9206e4c4eff8ca64266ce53705e88069912b80d8", "index": 1526, "step-1": "<mask token>\n", "step-2": "<mask token>\nparser.add_argument('-f', '-forward', required=True, help=\n 'forward sequencing files', nargs='+', action='store', dest='forward_files'\n )\nparser.add_argument('-r', '-reverse', required=True, help=\n 'reverse sequencing files', nargs='+', action='store', dest='reverse_files'\n )\nparser.add_argument('-s', '-segments', required=True, help=\n 'number of segments to split job into', action='store', dest=\n 'total_segments')\nparser.add_argument('-o', '-out', required=True, help=\n 'keyword for saving output files', action='store', dest='out')\nparser.add_argument('-c', '-cutoff', required=False, default=0, help=\n 'read count cutoff for barcodes to keep (default=0)', action='store',\n dest='cutoff')\nparser.add_argument('-b', '-barcode', required=False, default=31, help=\n 'length of barcode (default=31)', action='store', dest='barcode_length')\nparser.add_argument('-bq', '-bquality', required=False, default=53, help=\n 'ascii quality score cutoff for barcode (default=53)', action='store',\n dest='barcode_quality')\nparser.add_argument('-gdq', '-gdquality', required=False, default=55, help=\n 'ascii quality score cutoff for guide-donor (default=55)', action=\n 'store', dest='guide_donor_quality')\n<mask token>\nfor file in args.forward_files:\n forward_lines.extend(gzip.open(file).readlines())\n<mask token>\nfor line in forward_quality:\n scores = [ord(i) for i in line[:BARCODE_LENGTH]]\n barcode_quality_scores.append(np.mean(scores))\n<mask token>\nfor line in forward_quality:\n scores = [ord(i) for i in line[BARCODE_LENGTH:]]\n forward_guide_donor_quality_scores.append(np.mean(scores))\n<mask token>\nfor file in args.reverse_files:\n reverse_lines.extend(gzip.open(file).readlines())\n<mask token>\nfor line in reverse_quality:\n scores = [ord(i) for i in line]\n reverse_guide_donor_quality_scores.append(np.mean(scores))\n<mask token>\nif READ_COUNT_CUTOFF != 0:\n barcodes_to_keep = [key for key, count in Counter(barcodes).items() if \n count >= READ_COUNT_CUTOFF]\n keep_dict = {g: (True) for g in barcodes_to_keep}\n forward_sequence, reverse_sequence, barcodes = zip(*[(f, r, b) for f, r,\n b in zip(forward_sequence, reverse_sequence, barcodes) if b in\n keep_dict])\n<mask token>\npickle.dump(count_dict, pickle_out, protocol=2)\npickle_out.close()\n<mask token>\nfor segment in range(0, total_segments):\n start = int(LENGTH / total_segments * segment)\n if segment + 1 == total_segments:\n sub_barcodes_set = barcode_list[start:]\n else:\n stop = int(LENGTH / total_segments * (segment + 1))\n sub_barcodes_set = barcode_list[start:stop]\n sub_barcodes_dict = {b: (True) for b in sub_barcodes_set}\n sub_forward, sub_reverse, sub_barcodes = zip(*[(f, r, b) for f, r, b in\n zip(forward_sequence, reverse_sequence, barcodes) if b in\n sub_barcodes_dict])\n R1_dict, R2_dict = {}, {}\n for f, r, b in zip(sub_forward, sub_reverse, sub_barcodes):\n if b not in R1_dict and b not in R2_dict:\n R1_dict[b] = [f]\n R2_dict[b] = [r]\n else:\n R1_dict[b].append(f)\n R2_dict[b].append(r)\n pickle_out = open(OUTPUT_HEADER + '_' + str(segment) + '-' + str(\n total_segments) + '.R1_dict', 'wb')\n pickle.dump(R1_dict, pickle_out, protocol=2)\n pickle_out.close()\n pickle_out = open(OUTPUT_HEADER + '_' + str(segment) + '-' + str(\n total_segments) + '.R2_dict', 'wb')\n pickle.dump(R2_dict, pickle_out, protocol=2)\n pickle_out.close()\n", "step-3": "<mask token>\nparser = argparse.ArgumentParser()\nparser.add_argument('-f', '-forward', required=True, help=\n 'forward sequencing files', nargs='+', action='store', dest='forward_files'\n )\nparser.add_argument('-r', '-reverse', required=True, help=\n 'reverse sequencing files', nargs='+', action='store', dest='reverse_files'\n )\nparser.add_argument('-s', '-segments', required=True, help=\n 'number of segments to split job into', action='store', dest=\n 'total_segments')\nparser.add_argument('-o', '-out', required=True, help=\n 'keyword for saving output files', action='store', dest='out')\nparser.add_argument('-c', '-cutoff', required=False, default=0, help=\n 'read count cutoff for barcodes to keep (default=0)', action='store',\n dest='cutoff')\nparser.add_argument('-b', '-barcode', required=False, default=31, help=\n 'length of barcode (default=31)', action='store', dest='barcode_length')\nparser.add_argument('-bq', '-bquality', required=False, default=53, help=\n 'ascii quality score cutoff for barcode (default=53)', action='store',\n dest='barcode_quality')\nparser.add_argument('-gdq', '-gdquality', required=False, default=55, help=\n 'ascii quality score cutoff for guide-donor (default=55)', action=\n 'store', dest='guide_donor_quality')\nargs = parser.parse_args()\nOUTPUT_HEADER = args.out\nREAD_COUNT_CUTOFF = int(args.cutoff)\nBARCODE_LENGTH = int(args.barcode_length)\nBARCODE_QUALITY_CUTOFF = int(args.barcode_quality)\nGUIDE_DONOR_QUALITY_CUTOFF = int(args.guide_donor_quality)\nforward_lines = []\nfor file in args.forward_files:\n forward_lines.extend(gzip.open(file).readlines())\nforward_sequence = [forward_lines[r] for r in range(1, len(forward_lines), 4)]\nforward_sequence = [l.decode('utf-8').replace('\\n', '') for l in\n forward_sequence]\nforward_quality = [forward_lines[r] for r in range(3, len(forward_lines), 4)]\nforward_quality = [l.decode('utf-8').replace('\\n', '') for l in forward_quality\n ]\nbarcode_quality_scores = []\nfor line in forward_quality:\n scores = [ord(i) for i in line[:BARCODE_LENGTH]]\n barcode_quality_scores.append(np.mean(scores))\nforward_guide_donor_quality_scores = []\nfor line in forward_quality:\n scores = [ord(i) for i in line[BARCODE_LENGTH:]]\n forward_guide_donor_quality_scores.append(np.mean(scores))\nreverse_lines = []\nfor file in args.reverse_files:\n reverse_lines.extend(gzip.open(file).readlines())\nreverse_sequence = [reverse_lines[r] for r in range(1, len(reverse_lines), 4)]\nreverse_sequence = [l.decode('utf-8').replace('\\n', '') for l in\n reverse_sequence]\nreverse_quality = [reverse_lines[r] for r in range(3, len(reverse_lines), 4)]\nreverse_quality = [l.decode('utf-8').replace('\\n', '') for l in reverse_quality\n ]\nreverse_guide_donor_quality_scores = []\nfor line in reverse_quality:\n scores = [ord(i) for i in line]\n reverse_guide_donor_quality_scores.append(np.mean(scores))\nforward_sequence, reverse_sequence, barcodes = zip(*[(f, r, f[:\n BARCODE_LENGTH]) for f, r, fscore, fscore2, rscore in zip(\n forward_sequence, reverse_sequence, barcode_quality_scores,\n forward_guide_donor_quality_scores, reverse_guide_donor_quality_scores) if\n fscore >= BARCODE_QUALITY_CUTOFF and fscore2 >=\n GUIDE_DONOR_QUALITY_CUTOFF and rscore >= GUIDE_DONOR_QUALITY_CUTOFF])\nif READ_COUNT_CUTOFF != 0:\n barcodes_to_keep = [key for key, count in Counter(barcodes).items() if \n count >= READ_COUNT_CUTOFF]\n keep_dict = {g: (True) for g in barcodes_to_keep}\n forward_sequence, reverse_sequence, barcodes = zip(*[(f, r, b) for f, r,\n b in zip(forward_sequence, reverse_sequence, barcodes) if b in\n keep_dict])\ncount_dict = dict(Counter(barcodes))\npickle_out = open(OUTPUT_HEADER + '.read_count_dict', 'wb')\npickle.dump(count_dict, pickle_out, protocol=2)\npickle_out.close()\nLENGTH = len(set(barcodes))\ntotal_segments = int(args.total_segments)\nbarcode_list = list(set(barcodes))\nfor segment in range(0, total_segments):\n start = int(LENGTH / total_segments * segment)\n if segment + 1 == total_segments:\n sub_barcodes_set = barcode_list[start:]\n else:\n stop = int(LENGTH / total_segments * (segment + 1))\n sub_barcodes_set = barcode_list[start:stop]\n sub_barcodes_dict = {b: (True) for b in sub_barcodes_set}\n sub_forward, sub_reverse, sub_barcodes = zip(*[(f, r, b) for f, r, b in\n zip(forward_sequence, reverse_sequence, barcodes) if b in\n sub_barcodes_dict])\n R1_dict, R2_dict = {}, {}\n for f, r, b in zip(sub_forward, sub_reverse, sub_barcodes):\n if b not in R1_dict and b not in R2_dict:\n R1_dict[b] = [f]\n R2_dict[b] = [r]\n else:\n R1_dict[b].append(f)\n R2_dict[b].append(r)\n pickle_out = open(OUTPUT_HEADER + '_' + str(segment) + '-' + str(\n total_segments) + '.R1_dict', 'wb')\n pickle.dump(R1_dict, pickle_out, protocol=2)\n pickle_out.close()\n pickle_out = open(OUTPUT_HEADER + '_' + str(segment) + '-' + str(\n total_segments) + '.R2_dict', 'wb')\n pickle.dump(R2_dict, pickle_out, protocol=2)\n pickle_out.close()\n", "step-4": "<mask token>\nfrom collections import Counter\nimport argparse\nimport gzip\nimport numpy as np\nimport pickle\nparser = argparse.ArgumentParser()\nparser.add_argument('-f', '-forward', required=True, help=\n 'forward sequencing files', nargs='+', action='store', dest='forward_files'\n )\nparser.add_argument('-r', '-reverse', required=True, help=\n 'reverse sequencing files', nargs='+', action='store', dest='reverse_files'\n )\nparser.add_argument('-s', '-segments', required=True, help=\n 'number of segments to split job into', action='store', dest=\n 'total_segments')\nparser.add_argument('-o', '-out', required=True, help=\n 'keyword for saving output files', action='store', dest='out')\nparser.add_argument('-c', '-cutoff', required=False, default=0, help=\n 'read count cutoff for barcodes to keep (default=0)', action='store',\n dest='cutoff')\nparser.add_argument('-b', '-barcode', required=False, default=31, help=\n 'length of barcode (default=31)', action='store', dest='barcode_length')\nparser.add_argument('-bq', '-bquality', required=False, default=53, help=\n 'ascii quality score cutoff for barcode (default=53)', action='store',\n dest='barcode_quality')\nparser.add_argument('-gdq', '-gdquality', required=False, default=55, help=\n 'ascii quality score cutoff for guide-donor (default=55)', action=\n 'store', dest='guide_donor_quality')\nargs = parser.parse_args()\nOUTPUT_HEADER = args.out\nREAD_COUNT_CUTOFF = int(args.cutoff)\nBARCODE_LENGTH = int(args.barcode_length)\nBARCODE_QUALITY_CUTOFF = int(args.barcode_quality)\nGUIDE_DONOR_QUALITY_CUTOFF = int(args.guide_donor_quality)\nforward_lines = []\nfor file in args.forward_files:\n forward_lines.extend(gzip.open(file).readlines())\nforward_sequence = [forward_lines[r] for r in range(1, len(forward_lines), 4)]\nforward_sequence = [l.decode('utf-8').replace('\\n', '') for l in\n forward_sequence]\nforward_quality = [forward_lines[r] for r in range(3, len(forward_lines), 4)]\nforward_quality = [l.decode('utf-8').replace('\\n', '') for l in forward_quality\n ]\nbarcode_quality_scores = []\nfor line in forward_quality:\n scores = [ord(i) for i in line[:BARCODE_LENGTH]]\n barcode_quality_scores.append(np.mean(scores))\nforward_guide_donor_quality_scores = []\nfor line in forward_quality:\n scores = [ord(i) for i in line[BARCODE_LENGTH:]]\n forward_guide_donor_quality_scores.append(np.mean(scores))\nreverse_lines = []\nfor file in args.reverse_files:\n reverse_lines.extend(gzip.open(file).readlines())\nreverse_sequence = [reverse_lines[r] for r in range(1, len(reverse_lines), 4)]\nreverse_sequence = [l.decode('utf-8').replace('\\n', '') for l in\n reverse_sequence]\nreverse_quality = [reverse_lines[r] for r in range(3, len(reverse_lines), 4)]\nreverse_quality = [l.decode('utf-8').replace('\\n', '') for l in reverse_quality\n ]\nreverse_guide_donor_quality_scores = []\nfor line in reverse_quality:\n scores = [ord(i) for i in line]\n reverse_guide_donor_quality_scores.append(np.mean(scores))\nforward_sequence, reverse_sequence, barcodes = zip(*[(f, r, f[:\n BARCODE_LENGTH]) for f, r, fscore, fscore2, rscore in zip(\n forward_sequence, reverse_sequence, barcode_quality_scores,\n forward_guide_donor_quality_scores, reverse_guide_donor_quality_scores) if\n fscore >= BARCODE_QUALITY_CUTOFF and fscore2 >=\n GUIDE_DONOR_QUALITY_CUTOFF and rscore >= GUIDE_DONOR_QUALITY_CUTOFF])\nif READ_COUNT_CUTOFF != 0:\n barcodes_to_keep = [key for key, count in Counter(barcodes).items() if \n count >= READ_COUNT_CUTOFF]\n keep_dict = {g: (True) for g in barcodes_to_keep}\n forward_sequence, reverse_sequence, barcodes = zip(*[(f, r, b) for f, r,\n b in zip(forward_sequence, reverse_sequence, barcodes) if b in\n keep_dict])\ncount_dict = dict(Counter(barcodes))\npickle_out = open(OUTPUT_HEADER + '.read_count_dict', 'wb')\npickle.dump(count_dict, pickle_out, protocol=2)\npickle_out.close()\nLENGTH = len(set(barcodes))\ntotal_segments = int(args.total_segments)\nbarcode_list = list(set(barcodes))\nfor segment in range(0, total_segments):\n start = int(LENGTH / total_segments * segment)\n if segment + 1 == total_segments:\n sub_barcodes_set = barcode_list[start:]\n else:\n stop = int(LENGTH / total_segments * (segment + 1))\n sub_barcodes_set = barcode_list[start:stop]\n sub_barcodes_dict = {b: (True) for b in sub_barcodes_set}\n sub_forward, sub_reverse, sub_barcodes = zip(*[(f, r, b) for f, r, b in\n zip(forward_sequence, reverse_sequence, barcodes) if b in\n sub_barcodes_dict])\n R1_dict, R2_dict = {}, {}\n for f, r, b in zip(sub_forward, sub_reverse, sub_barcodes):\n if b not in R1_dict and b not in R2_dict:\n R1_dict[b] = [f]\n R2_dict[b] = [r]\n else:\n R1_dict[b].append(f)\n R2_dict[b].append(r)\n pickle_out = open(OUTPUT_HEADER + '_' + str(segment) + '-' + str(\n total_segments) + '.R1_dict', 'wb')\n pickle.dump(R1_dict, pickle_out, protocol=2)\n pickle_out.close()\n pickle_out = open(OUTPUT_HEADER + '_' + str(segment) + '-' + str(\n total_segments) + '.R2_dict', 'wb')\n pickle.dump(R2_dict, pickle_out, protocol=2)\n pickle_out.close()\n", "step-5": "\"\"\"\nProcess pair-end reads of barcode-guide-donor Step 1 cassette to generate a library reference table mapping barcodes to features.\nCreate dictionaries mapping barcodes to forward and reverse reads, split into sub-segments.\n\nR1_dict: map barcodes to corresponding R1 sequences.\nR2_dict: map barcodes to corresponding R2 sequences.\nread_count_dict: map each barcode to corresponding total number of reads.\n\n\"\"\"\n\nfrom collections import Counter\nimport argparse\nimport gzip\nimport numpy as np\nimport pickle\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-f', '-forward', required=True, help=\"forward sequencing files\", nargs='+', action='store', dest='forward_files')\nparser.add_argument('-r', '-reverse', required=True, help=\"reverse sequencing files\", nargs='+', action='store', dest='reverse_files')\nparser.add_argument('-s', '-segments', required=True, help=\"number of segments to split job into\", action='store', dest='total_segments')\nparser.add_argument('-o', '-out', required=True, help=\"keyword for saving output files\", action='store', dest='out')\nparser.add_argument('-c', '-cutoff', required=False, default=0, help=\"read count cutoff for barcodes to keep (default=0)\", action='store', dest='cutoff')\nparser.add_argument('-b', '-barcode', required=False, default=31, help=\"length of barcode (default=31)\", action='store', dest='barcode_length')\nparser.add_argument('-bq', '-bquality', required=False, default=53, help=\"ascii quality score cutoff for barcode (default=53)\", action='store', dest='barcode_quality')\nparser.add_argument('-gdq', '-gdquality', required=False, default=55, help=\"ascii quality score cutoff for guide-donor (default=55)\", action='store', dest='guide_donor_quality')\n\nargs = parser.parse_args()\n\nOUTPUT_HEADER = args.out\nREAD_COUNT_CUTOFF = int(args.cutoff)\nBARCODE_LENGTH = int(args.barcode_length)\nBARCODE_QUALITY_CUTOFF = int(args.barcode_quality)\nGUIDE_DONOR_QUALITY_CUTOFF = int(args.guide_donor_quality)\n\n# Collect all sequencing reads from forward files.\nforward_lines = []\nfor file in args.forward_files:\n\tforward_lines.extend(gzip.open(file).readlines())\n\n# Forward sequence.\nforward_sequence = [forward_lines[r] for r in range(1, len(forward_lines), 4)]\nforward_sequence = [l.decode('utf-8').replace(\"\\n\",\"\") for l in forward_sequence]\n\n# Forward sequence quality scores.\nforward_quality = [forward_lines[r] for r in range(3, len(forward_lines), 4)]\nforward_quality = [l.decode('utf-8').replace(\"\\n\",\"\") for l in forward_quality]\n\nbarcode_quality_scores = [] # Barcode quality.\nfor line in forward_quality:\n scores = [ord(i) for i in line[:BARCODE_LENGTH]]\n barcode_quality_scores.append(np.mean(scores))\n\nforward_guide_donor_quality_scores = [] # Guide-donor quality.\nfor line in forward_quality:\n scores = [ord(i) for i in line[BARCODE_LENGTH:]]\n forward_guide_donor_quality_scores.append(np.mean(scores))\n\n# Collect all sequencing reads from reverse files.\nreverse_lines = []\nfor file in args.reverse_files:\n\treverse_lines.extend(gzip.open(file).readlines())\n\n# Reverse sequence.\nreverse_sequence = [reverse_lines[r] for r in range(1, len(reverse_lines), 4)]\nreverse_sequence = [l.decode('utf-8').replace(\"\\n\",\"\") for l in reverse_sequence]\n\n# Reverse sequence base quality scores.\nreverse_quality = [reverse_lines[r] for r in range(3, len(reverse_lines), 4)]\nreverse_quality = [l.decode('utf-8').replace(\"\\n\",\"\") for l in reverse_quality]\n\nreverse_guide_donor_quality_scores = []\nfor line in reverse_quality:\n scores = [ord(i) for i in line]\n reverse_guide_donor_quality_scores.append(np.mean(scores))\n\n# Filter out low quality barcodes and low quality guide-donor sequences.\nforward_sequence, reverse_sequence, barcodes = zip(*[(f, r, f[:BARCODE_LENGTH]) for f, r, fscore, fscore2, rscore\n in zip(forward_sequence, reverse_sequence, barcode_quality_scores, forward_guide_donor_quality_scores, reverse_guide_donor_quality_scores) \n if (fscore >= BARCODE_QUALITY_CUTOFF) and (fscore2 >= GUIDE_DONOR_QUALITY_CUTOFF) and (rscore >= GUIDE_DONOR_QUALITY_CUTOFF)])\n\nif (READ_COUNT_CUTOFF != 0): # optional choice to remove low read barcodes from annotations.\n\tbarcodes_to_keep = [key for key, count in Counter(barcodes).items() if count >= READ_COUNT_CUTOFF]\n\tkeep_dict = {g: True for g in barcodes_to_keep}\n\tforward_sequence, reverse_sequence, barcodes = zip(*[(f, r, b) for f, r, b \n\t\tin zip(forward_sequence, reverse_sequence, barcodes) if b in keep_dict])\n\n# Store barcode read count dictionary for later use. \ncount_dict = dict(Counter(barcodes))\npickle_out = open(OUTPUT_HEADER + \".read_count_dict\", \"wb\")\npickle.dump(count_dict, pickle_out, protocol=2)\npickle_out.close()\n\n# Divide up barcodes into specified number of segments for parallel analysis.\nLENGTH = len(set(barcodes))\ntotal_segments = int(args.total_segments)\n\nbarcode_list = list(set(barcodes))\nfor segment in range(0, total_segments):\n\tstart = int((LENGTH/total_segments)*segment) # determine start and end position of segment.\n\tif (segment+1 == total_segments):\n\t\tsub_barcodes_set = barcode_list[start:]\n\telse:\n\t\tstop = int((LENGTH/total_segments)*(segment+1))\n\t\tsub_barcodes_set = barcode_list[start:stop]\n\tsub_barcodes_dict = {b: True for b in sub_barcodes_set}\n\n\tsub_forward, sub_reverse, sub_barcodes = zip(*[(f, r, b) for f, r, b \n\t\tin zip(forward_sequence, reverse_sequence, barcodes) if b in sub_barcodes_dict])\n\n\tR1_dict, R2_dict = {}, {} # store reads by barcode into R1 and R2 dictionaries.\n\tfor f, r, b in zip(sub_forward, sub_reverse, sub_barcodes):\n\t\tif (b not in R1_dict) and (b not in R2_dict):\n\t\t\tR1_dict[b] = [f]\n\t\t\tR2_dict[b] = [r]\n\t\telse:\n\t\t\tR1_dict[b].append(f)\n\t\t\tR2_dict[b].append(r)\n\n\tpickle_out = open(OUTPUT_HEADER + \"_\" + str(segment) + \"-\" + str(total_segments) + \".R1_dict\", \"wb\")\n\tpickle.dump(R1_dict, pickle_out, protocol=2)\n\tpickle_out.close()\n\n\tpickle_out = open(OUTPUT_HEADER + \"_\" + str(segment) + \"-\" + str(total_segments) + \".R2_dict\", \"wb\")\n\tpickle.dump(R2_dict, pickle_out, protocol=2)\n\tpickle_out.close()", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import datetime import logging import random import transform import timelapse # merge two iterators producing sorted values def merge(s1, s2): try: x1 = next(s1) except StopIteration: yield from s2 return try: x2 = next(s2) except StopIteration: yield from s1 return while True: if x2 > x1: yield x1 try: x1 = next(s1) except StopIteration: yield x2 yield from s2 return else: yield x2 try: x2 = next(s2) except StopIteration: yield x1 yield from s1 return def sliding_stream(delay_secs=20): ts = datetime.datetime.now() delay = datetime.timedelta(0,delay_secs) while True: yield(ts, random.choice(transform.all_transforms)) ts = ts + delay class Sliders(timelapse.TimeLapse): def __init__(self, server_list, nick="Sliders", channel="#sliders", realname="Sliders", sliding_window = 60, **params): super().__init__(server_list, nick=nick, channel=channel, **params) self.lapsed = merge(self.lapsed, sliding_stream(sliding_window)) self.sliders_transform = random.choice(transform.all_transforms) def on_lapsed_message(self, msg): if isinstance(msg, transform.Transform): self.sliders_transform = msg self.connection.privmsg(self.lapsed_channel, "\x01ACTION s'ouvre vers un monde parallèle peuplé de jumeaux " + msg.name + "\x01") else: super().on_lapsed_message(self.sliders_transform(msg))
normal
{ "blob_id": "c651d49c98a4cf457c8252c94c6785dea8e9af60", "index": 3909, "step-1": "<mask token>\n\n\nclass Sliders(timelapse.TimeLapse):\n\n def __init__(self, server_list, nick='Sliders', channel='#sliders',\n realname='Sliders', sliding_window=60, **params):\n super().__init__(server_list, nick=nick, channel=channel, **params)\n self.lapsed = merge(self.lapsed, sliding_stream(sliding_window))\n self.sliders_transform = random.choice(transform.all_transforms)\n\n def on_lapsed_message(self, msg):\n if isinstance(msg, transform.Transform):\n self.sliders_transform = msg\n self.connection.privmsg(self.lapsed_channel, \n \"\\x01ACTION s'ouvre vers un monde parallèle peuplé de jumeaux \"\n + msg.name + '\\x01')\n else:\n super().on_lapsed_message(self.sliders_transform(msg))\n", "step-2": "<mask token>\n\n\ndef sliding_stream(delay_secs=20):\n ts = datetime.datetime.now()\n delay = datetime.timedelta(0, delay_secs)\n while True:\n yield ts, random.choice(transform.all_transforms)\n ts = ts + delay\n\n\nclass Sliders(timelapse.TimeLapse):\n\n def __init__(self, server_list, nick='Sliders', channel='#sliders',\n realname='Sliders', sliding_window=60, **params):\n super().__init__(server_list, nick=nick, channel=channel, **params)\n self.lapsed = merge(self.lapsed, sliding_stream(sliding_window))\n self.sliders_transform = random.choice(transform.all_transforms)\n\n def on_lapsed_message(self, msg):\n if isinstance(msg, transform.Transform):\n self.sliders_transform = msg\n self.connection.privmsg(self.lapsed_channel, \n \"\\x01ACTION s'ouvre vers un monde parallèle peuplé de jumeaux \"\n + msg.name + '\\x01')\n else:\n super().on_lapsed_message(self.sliders_transform(msg))\n", "step-3": "<mask token>\n\n\ndef merge(s1, s2):\n try:\n x1 = next(s1)\n except StopIteration:\n yield from s2\n return\n try:\n x2 = next(s2)\n except StopIteration:\n yield from s1\n return\n while True:\n if x2 > x1:\n yield x1\n try:\n x1 = next(s1)\n except StopIteration:\n yield x2\n yield from s2\n return\n else:\n yield x2\n try:\n x2 = next(s2)\n except StopIteration:\n yield x1\n yield from s1\n return\n\n\ndef sliding_stream(delay_secs=20):\n ts = datetime.datetime.now()\n delay = datetime.timedelta(0, delay_secs)\n while True:\n yield ts, random.choice(transform.all_transforms)\n ts = ts + delay\n\n\nclass Sliders(timelapse.TimeLapse):\n\n def __init__(self, server_list, nick='Sliders', channel='#sliders',\n realname='Sliders', sliding_window=60, **params):\n super().__init__(server_list, nick=nick, channel=channel, **params)\n self.lapsed = merge(self.lapsed, sliding_stream(sliding_window))\n self.sliders_transform = random.choice(transform.all_transforms)\n\n def on_lapsed_message(self, msg):\n if isinstance(msg, transform.Transform):\n self.sliders_transform = msg\n self.connection.privmsg(self.lapsed_channel, \n \"\\x01ACTION s'ouvre vers un monde parallèle peuplé de jumeaux \"\n + msg.name + '\\x01')\n else:\n super().on_lapsed_message(self.sliders_transform(msg))\n", "step-4": "import datetime\nimport logging\nimport random\nimport transform\nimport timelapse\n\n\ndef merge(s1, s2):\n try:\n x1 = next(s1)\n except StopIteration:\n yield from s2\n return\n try:\n x2 = next(s2)\n except StopIteration:\n yield from s1\n return\n while True:\n if x2 > x1:\n yield x1\n try:\n x1 = next(s1)\n except StopIteration:\n yield x2\n yield from s2\n return\n else:\n yield x2\n try:\n x2 = next(s2)\n except StopIteration:\n yield x1\n yield from s1\n return\n\n\ndef sliding_stream(delay_secs=20):\n ts = datetime.datetime.now()\n delay = datetime.timedelta(0, delay_secs)\n while True:\n yield ts, random.choice(transform.all_transforms)\n ts = ts + delay\n\n\nclass Sliders(timelapse.TimeLapse):\n\n def __init__(self, server_list, nick='Sliders', channel='#sliders',\n realname='Sliders', sliding_window=60, **params):\n super().__init__(server_list, nick=nick, channel=channel, **params)\n self.lapsed = merge(self.lapsed, sliding_stream(sliding_window))\n self.sliders_transform = random.choice(transform.all_transforms)\n\n def on_lapsed_message(self, msg):\n if isinstance(msg, transform.Transform):\n self.sliders_transform = msg\n self.connection.privmsg(self.lapsed_channel, \n \"\\x01ACTION s'ouvre vers un monde parallèle peuplé de jumeaux \"\n + msg.name + '\\x01')\n else:\n super().on_lapsed_message(self.sliders_transform(msg))\n", "step-5": "import datetime\nimport logging\nimport random\nimport transform\nimport timelapse\n\n# merge two iterators producing sorted values\ndef merge(s1, s2):\n try:\n x1 = next(s1)\n except StopIteration:\n yield from s2\n return\n\n try:\n x2 = next(s2)\n except StopIteration:\n yield from s1\n return\n\n while True:\n if x2 > x1:\n yield x1\n try:\n x1 = next(s1)\n except StopIteration:\n yield x2\n yield from s2\n return\n else:\n yield x2\n try:\n x2 = next(s2)\n except StopIteration:\n yield x1\n yield from s1\n return\n \n\ndef sliding_stream(delay_secs=20):\n ts = datetime.datetime.now()\n delay = datetime.timedelta(0,delay_secs)\n while True:\n yield(ts, random.choice(transform.all_transforms))\n ts = ts + delay\n\nclass Sliders(timelapse.TimeLapse):\n def __init__(self, server_list, nick=\"Sliders\", channel=\"#sliders\", realname=\"Sliders\",\n sliding_window = 60, **params):\n super().__init__(server_list, nick=nick, channel=channel, **params)\n self.lapsed = merge(self.lapsed, sliding_stream(sliding_window))\n self.sliders_transform = random.choice(transform.all_transforms)\n\n def on_lapsed_message(self, msg):\n\n if isinstance(msg, transform.Transform):\n self.sliders_transform = msg\n self.connection.privmsg(self.lapsed_channel,\n \"\\x01ACTION s'ouvre vers un monde parallèle peuplé de jumeaux \"\n + msg.name + \"\\x01\")\n else:\n super().on_lapsed_message(self.sliders_transform(msg))\n\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
class ListNode: def __init__(self,listt,node,g,h): self.node_list = [] for element in listt: self.node_list.append(element) self.node_list.append(node) self.g=g self.f = int(g)+int(h); self.ID = node def is_Goal(self,complete_nodes): if complete_nodes in self.node_list: return True return False
normal
{ "blob_id": "2b796fb99e4607d310a533e8d9897100c4df087d", "index": 2665, "step-1": "<mask token>\n", "step-2": "class ListNode:\n <mask token>\n <mask token>\n", "step-3": "class ListNode:\n\n def __init__(self, listt, node, g, h):\n self.node_list = []\n for element in listt:\n self.node_list.append(element)\n self.node_list.append(node)\n self.g = g\n self.f = int(g) + int(h)\n self.ID = node\n <mask token>\n", "step-4": "class ListNode:\n\n def __init__(self, listt, node, g, h):\n self.node_list = []\n for element in listt:\n self.node_list.append(element)\n self.node_list.append(node)\n self.g = g\n self.f = int(g) + int(h)\n self.ID = node\n\n def is_Goal(self, complete_nodes):\n if complete_nodes in self.node_list:\n return True\n return False\n", "step-5": "class ListNode:\r\n def __init__(self,listt,node,g,h):\r\n self.node_list = []\r\n for element in listt:\r\n self.node_list.append(element)\r\n self.node_list.append(node)\r\n\r\n self.g=g\r\n self.f = int(g)+int(h);\r\n self.ID = node\r\n\r\n\r\n\r\n def is_Goal(self,complete_nodes):\r\n\r\n if complete_nodes in self.node_list:\r\n return True\r\n return False\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from django.db.models import Q from django.contrib import messages from django.views.generic import ListView, DetailView from django.shortcuts import get_object_or_404, redirect, render from django.contrib.auth.decorators import login_required from django.http import HttpResponse from django.views.decorators.http import require_POST from .models import Pills, Like, Comment from .forms import CommentForm import json class PillListView(ListView): model = Pills template_name = "pills/pill_list.html" form_class = CommentForm def get_context_data(self, **kwargs): context = super(PillListView, self).get_context_data(**kwargs) return context def get_queryset(self, *args, **kwargs): qs = Pills.objects.prefetch_related('category_body','category_gender','like_user_set').all() print(self.request.GET) query = self.request.GET.get("q", None) if query is not None: qs = qs.filter( Q(name__icontains=query) | Q(category_body__name__icontains=query) ) return qs # def PillCategory_SearchList(request): # qs = Pills.objects.prefetch_related('category_body').all() # query = self.request.GET.get("q", None) # if query is not None: # qs = qs.filter( # Q(name__icontains=query) # ) # return qs # context = { # 'qs' : qs, # } # return render(request, "categorysearch.html", context) @login_required def comment_new(request): pk = request.POST.get('pk') pill = get_object_or_404(Pills, pk=pk) form = CommentForm if request.method == 'POST': form = CommentForm(request.POST) if form.is_valid(): comment = form.save(commit=False) comment.author = request.user comment.pills = pill comment.save() return render(request, 'pills/comment_new_ajax.html', {'comment':comment, 'form':form,}) return redirect("pills:pill_list") @login_required def comment_delete(request, pill_pk, pk): comment = get_object_or_404(Comment, pk=pk) if request.method == 'POST' and request.user == comment.author: comment.delete() messages.success(request, '삭제했습니다.') return redirect('pills:pill_list') messages.warning('권한이 없습니다.') return redirect('pills:pill_list') class PillDetailView(DetailView): model = Pills template_name = 'pills/pill_detail.html' # context_object_name = 'pills' @login_required @require_POST # POST method만 받음 def pill_like(request): pk = request.POST.get('pk', None) pill = get_object_or_404(Pills, pk=pk) pill_like, pill_like_created = pill.like_set.get_or_create(user=request.user) if not pill_like_created: pill_like.delete() message = "좋아요 취소" else: message = "좋아요" context = { 'like_count': pill.like_count, 'message': message, 'username': request.user.username } return HttpResponse(json.dumps(context))
normal
{ "blob_id": "3c193decc4a1f284de953003fbba434d6e798b24", "index": 2827, "step-1": "<mask token>\n\n\nclass PillListView(ListView):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass PillDetailView(DetailView):\n model = Pills\n template_name = 'pills/pill_detail.html'\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass PillListView(ListView):\n model = Pills\n template_name = 'pills/pill_list.html'\n form_class = CommentForm\n\n def get_context_data(self, **kwargs):\n context = super(PillListView, self).get_context_data(**kwargs)\n return context\n\n def get_queryset(self, *args, **kwargs):\n qs = Pills.objects.prefetch_related('category_body',\n 'category_gender', 'like_user_set').all()\n print(self.request.GET)\n query = self.request.GET.get('q', None)\n if query is not None:\n qs = qs.filter(Q(name__icontains=query) | Q(\n category_body__name__icontains=query))\n return qs\n\n\n<mask token>\n\n\nclass PillDetailView(DetailView):\n model = Pills\n template_name = 'pills/pill_detail.html'\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass PillListView(ListView):\n model = Pills\n template_name = 'pills/pill_list.html'\n form_class = CommentForm\n\n def get_context_data(self, **kwargs):\n context = super(PillListView, self).get_context_data(**kwargs)\n return context\n\n def get_queryset(self, *args, **kwargs):\n qs = Pills.objects.prefetch_related('category_body',\n 'category_gender', 'like_user_set').all()\n print(self.request.GET)\n query = self.request.GET.get('q', None)\n if query is not None:\n qs = qs.filter(Q(name__icontains=query) | Q(\n category_body__name__icontains=query))\n return qs\n\n\n@login_required\ndef comment_new(request):\n pk = request.POST.get('pk')\n pill = get_object_or_404(Pills, pk=pk)\n form = CommentForm\n if request.method == 'POST':\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.author = request.user\n comment.pills = pill\n comment.save()\n return render(request, 'pills/comment_new_ajax.html', {\n 'comment': comment, 'form': form})\n return redirect('pills:pill_list')\n\n\n@login_required\ndef comment_delete(request, pill_pk, pk):\n comment = get_object_or_404(Comment, pk=pk)\n if request.method == 'POST' and request.user == comment.author:\n comment.delete()\n messages.success(request, '삭제했습니다.')\n return redirect('pills:pill_list')\n messages.warning('권한이 없습니다.')\n return redirect('pills:pill_list')\n\n\nclass PillDetailView(DetailView):\n model = Pills\n template_name = 'pills/pill_detail.html'\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass PillListView(ListView):\n model = Pills\n template_name = 'pills/pill_list.html'\n form_class = CommentForm\n\n def get_context_data(self, **kwargs):\n context = super(PillListView, self).get_context_data(**kwargs)\n return context\n\n def get_queryset(self, *args, **kwargs):\n qs = Pills.objects.prefetch_related('category_body',\n 'category_gender', 'like_user_set').all()\n print(self.request.GET)\n query = self.request.GET.get('q', None)\n if query is not None:\n qs = qs.filter(Q(name__icontains=query) | Q(\n category_body__name__icontains=query))\n return qs\n\n\n@login_required\ndef comment_new(request):\n pk = request.POST.get('pk')\n pill = get_object_or_404(Pills, pk=pk)\n form = CommentForm\n if request.method == 'POST':\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.author = request.user\n comment.pills = pill\n comment.save()\n return render(request, 'pills/comment_new_ajax.html', {\n 'comment': comment, 'form': form})\n return redirect('pills:pill_list')\n\n\n@login_required\ndef comment_delete(request, pill_pk, pk):\n comment = get_object_or_404(Comment, pk=pk)\n if request.method == 'POST' and request.user == comment.author:\n comment.delete()\n messages.success(request, '삭제했습니다.')\n return redirect('pills:pill_list')\n messages.warning('권한이 없습니다.')\n return redirect('pills:pill_list')\n\n\nclass PillDetailView(DetailView):\n model = Pills\n template_name = 'pills/pill_detail.html'\n\n\n@login_required\n@require_POST\ndef pill_like(request):\n pk = request.POST.get('pk', None)\n pill = get_object_or_404(Pills, pk=pk)\n pill_like, pill_like_created = pill.like_set.get_or_create(user=request\n .user)\n if not pill_like_created:\n pill_like.delete()\n message = '좋아요 취소'\n else:\n message = '좋아요'\n context = {'like_count': pill.like_count, 'message': message,\n 'username': request.user.username}\n return HttpResponse(json.dumps(context))\n", "step-5": "from django.db.models import Q\nfrom django.contrib import messages\n\nfrom django.views.generic import ListView, DetailView\nfrom django.shortcuts import get_object_or_404, redirect, render\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse\n\nfrom django.views.decorators.http import require_POST\n\nfrom .models import Pills, Like, Comment\nfrom .forms import CommentForm\nimport json\n\n\nclass PillListView(ListView):\n\tmodel = Pills\n\ttemplate_name = \"pills/pill_list.html\"\n\tform_class = CommentForm\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(PillListView, self).get_context_data(**kwargs)\n\t\treturn context\n\n\tdef get_queryset(self, *args, **kwargs):\n\t\tqs = Pills.objects.prefetch_related('category_body','category_gender','like_user_set').all()\n\t\tprint(self.request.GET)\n\t\tquery = self.request.GET.get(\"q\", None)\n\t\tif query is not None:\n\t\t\tqs = qs.filter(\n\t\t\t\t\tQ(name__icontains=query) | Q(category_body__name__icontains=query)\n\t\t\t\t)\n\t\treturn qs\n\n\n# def PillCategory_SearchList(request):\n# \tqs = Pills.objects.prefetch_related('category_body').all()\n# \tquery = self.request.GET.get(\"q\", None)\n# \tif query is not None:\n# \t\tqs = qs.filter(\n# \t\t\t\tQ(name__icontains=query)\n# \t\t\t\t)\n# \t\treturn qs\n\n# \tcontext = {\n# \t\t\t\t'qs' : qs,\n\n# \t}\n\n# \treturn render(request, \"categorysearch.html\", context)\n\n\n\n\n\n@login_required\ndef comment_new(request):\n\tpk = request.POST.get('pk')\n\tpill = get_object_or_404(Pills, pk=pk)\n\tform = CommentForm\n\tif request.method == 'POST':\n\t\tform = CommentForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tcomment = form.save(commit=False)\n\t\t\tcomment.author = request.user\n\t\t\tcomment.pills = pill\n\t\t\tcomment.save()\n\t\t\treturn render(request, 'pills/comment_new_ajax.html', {'comment':comment, 'form':form,})\n\treturn redirect(\"pills:pill_list\")\n\n\n@login_required\ndef comment_delete(request, pill_pk, pk):\n\tcomment = get_object_or_404(Comment, pk=pk)\n\tif request.method == 'POST' and request.user == comment.author:\n\t\tcomment.delete()\n\t\tmessages.success(request, '삭제했습니다.')\n\t\treturn redirect('pills:pill_list')\n\n\tmessages.warning('권한이 없습니다.')\n\treturn redirect('pills:pill_list')\n\n\n\nclass PillDetailView(DetailView):\n\tmodel = Pills\n\ttemplate_name = 'pills/pill_detail.html'\n\t# context_object_name = 'pills'\n\n\n@login_required\n@require_POST\t# POST method만 받음\ndef pill_like(request):\n\tpk = request.POST.get('pk', None)\n\tpill = get_object_or_404(Pills, pk=pk)\n\n\tpill_like, pill_like_created = pill.like_set.get_or_create(user=request.user)\n\n\tif not pill_like_created:\n\t\tpill_like.delete()\n\t\tmessage = \"좋아요 취소\"\n\telse:\n\t\tmessage = \"좋아요\"\n\n\tcontext = {\n\t\t\t\t'like_count': pill.like_count,\n\t\t\t\t'message': message,\n\t\t\t\t'username': request.user.username\n\t}\n\n\treturn HttpResponse(json.dumps(context))\n\n\n\n\n\n\n", "step-ids": [ 3, 6, 8, 9, 11 ] }
[ 3, 6, 8, 9, 11 ]
try: from setuptools import setup, find_packages except ImportError: from distutils.core import setup def find_packages(): return ['sqlpython'] classifiers = """Development Status :: 4 - Beta Intended Audience :: Information Technology License :: OSI Approved :: MIT License Programming Language :: Python Programming Language :: SQL Topic :: Database :: Front-Ends Operating System :: OS Independent""".splitlines() setup(name="sqlpython", version="1.7.3", description="Command-line interface to Oracle", long_description="Customizable alternative to Oracle's SQL*PLUS command-line interface", author="Luca Canali", author_email="[email protected]", url="http://packages.python.org/sqlpython", packages=find_packages(), include_package_data=True, install_requires=['pyparsing','cmd2==0.6.3','gerald>=0.4.1.1', 'genshi==0.6'], extras_require = { 'oracle': ['cx_Oracle==6.1'], 'postgres': ['psycopg2'], }, keywords = 'client oracle database', license = 'MIT', platforms = ['any'], entry_points = """ [console_scripts] sqlpython = sqlpython.mysqlpy:run editplot_sqlpython = sqlpython.editplot.bash""" )
normal
{ "blob_id": "f960c95afe1f7a161e0144bb523bfaca117ae61e", "index": 2260, "step-1": "<mask token>\n", "step-2": "try:\n from setuptools import setup, find_packages\nexcept ImportError:\n from distutils.core import setup\n\n def find_packages():\n return ['sqlpython']\n<mask token>\nsetup(name='sqlpython', version='1.7.3', description=\n 'Command-line interface to Oracle', long_description=\n \"Customizable alternative to Oracle's SQL*PLUS command-line interface\",\n author='Luca Canali', author_email='[email protected]', url=\n 'http://packages.python.org/sqlpython', packages=find_packages(),\n include_package_data=True, install_requires=['pyparsing', 'cmd2==0.6.3',\n 'gerald>=0.4.1.1', 'genshi==0.6'], extras_require={'oracle': [\n 'cx_Oracle==6.1'], 'postgres': ['psycopg2']}, keywords=\n 'client oracle database', license='MIT', platforms=['any'],\n entry_points=\n \"\"\"\n [console_scripts]\n sqlpython = sqlpython.mysqlpy:run\n editplot_sqlpython = sqlpython.editplot.bash\"\"\"\n )\n", "step-3": "try:\n from setuptools import setup, find_packages\nexcept ImportError:\n from distutils.core import setup\n\n def find_packages():\n return ['sqlpython']\nclassifiers = (\n \"\"\"Development Status :: 4 - Beta\nIntended Audience :: Information Technology\nLicense :: OSI Approved :: MIT License\nProgramming Language :: Python\nProgramming Language :: SQL\nTopic :: Database :: Front-Ends\nOperating System :: OS Independent\"\"\"\n .splitlines())\nsetup(name='sqlpython', version='1.7.3', description=\n 'Command-line interface to Oracle', long_description=\n \"Customizable alternative to Oracle's SQL*PLUS command-line interface\",\n author='Luca Canali', author_email='[email protected]', url=\n 'http://packages.python.org/sqlpython', packages=find_packages(),\n include_package_data=True, install_requires=['pyparsing', 'cmd2==0.6.3',\n 'gerald>=0.4.1.1', 'genshi==0.6'], extras_require={'oracle': [\n 'cx_Oracle==6.1'], 'postgres': ['psycopg2']}, keywords=\n 'client oracle database', license='MIT', platforms=['any'],\n entry_points=\n \"\"\"\n [console_scripts]\n sqlpython = sqlpython.mysqlpy:run\n editplot_sqlpython = sqlpython.editplot.bash\"\"\"\n )\n", "step-4": "try:\n from setuptools import setup, find_packages\nexcept ImportError:\n from distutils.core import setup\n def find_packages():\n return ['sqlpython']\n \nclassifiers = \"\"\"Development Status :: 4 - Beta\nIntended Audience :: Information Technology\nLicense :: OSI Approved :: MIT License\nProgramming Language :: Python\nProgramming Language :: SQL\nTopic :: Database :: Front-Ends\nOperating System :: OS Independent\"\"\".splitlines()\n\nsetup(name=\"sqlpython\",\n version=\"1.7.3\",\n description=\"Command-line interface to Oracle\",\n long_description=\"Customizable alternative to Oracle's SQL*PLUS command-line interface\",\n author=\"Luca Canali\",\n author_email=\"[email protected]\",\n url=\"http://packages.python.org/sqlpython\",\n packages=find_packages(),\n include_package_data=True, \n install_requires=['pyparsing','cmd2==0.6.3','gerald>=0.4.1.1',\n 'genshi==0.6'],\n extras_require = {\n 'oracle': ['cx_Oracle==6.1'],\n 'postgres': ['psycopg2'],\n },\n keywords = 'client oracle database',\n license = 'MIT',\n platforms = ['any'],\n entry_points = \"\"\"\n [console_scripts]\n sqlpython = sqlpython.mysqlpy:run\n editplot_sqlpython = sqlpython.editplot.bash\"\"\" \n )\n\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. """BatchNorm (BN) utility functions and custom batch-size BN implementations""" from functools import partial import torch import torch.nn as nn from pytorchvideo.layers.batch_norm import ( NaiveSyncBatchNorm1d, NaiveSyncBatchNorm3d, ) # noqa def get_norm(cfg): """ Args: cfg (CfgNode): model building configs, details are in the comments of the config file. Returns: nn.Module: the normalization layer. """ if cfg.BN.NORM_TYPE in {"batchnorm", "sync_batchnorm_apex"}: return nn.BatchNorm3d elif cfg.BN.NORM_TYPE == "sub_batchnorm": return partial(SubBatchNorm3d, num_splits=cfg.BN.NUM_SPLITS) elif cfg.BN.NORM_TYPE == "sync_batchnorm": return partial( NaiveSyncBatchNorm3d, num_sync_devices=cfg.BN.NUM_SYNC_DEVICES, global_sync=cfg.BN.GLOBAL_SYNC, ) else: raise NotImplementedError( "Norm type {} is not supported".format(cfg.BN.NORM_TYPE) ) class SubBatchNorm3d(nn.Module): """ The standard BN layer computes stats across all examples in a GPU. In some cases it is desirable to compute stats across only a subset of examples (e.g., in multigrid training https://arxiv.org/abs/1912.00998). SubBatchNorm3d splits the batch dimension into N splits, and run BN on each of them separately (so that the stats are computed on each subset of examples (1/N of batch) independently. During evaluation, it aggregates the stats from all splits into one BN. """ def __init__(self, num_splits, **args): """ Args: num_splits (int): number of splits. args (list): other arguments. """ super(SubBatchNorm3d, self).__init__() self.num_splits = num_splits num_features = args["num_features"] # Keep only one set of weight and bias. if args.get("affine", True): self.affine = True args["affine"] = False self.weight = torch.nn.Parameter(torch.ones(num_features)) self.bias = torch.nn.Parameter(torch.zeros(num_features)) else: self.affine = False self.bn = nn.BatchNorm3d(**args) args["num_features"] = num_features * num_splits self.split_bn = nn.BatchNorm3d(**args) def _get_aggregated_mean_std(self, means, stds, n): """ Calculate the aggregated mean and stds. Args: means (tensor): mean values. stds (tensor): standard deviations. n (int): number of sets of means and stds. """ mean = means.view(n, -1).sum(0) / n std = ( stds.view(n, -1).sum(0) / n + ((means.view(n, -1) - mean) ** 2).view(n, -1).sum(0) / n ) return mean.detach(), std.detach() def aggregate_stats(self): """ Synchronize running_mean, and running_var. Call this before eval. """ if self.split_bn.track_running_stats: ( self.bn.running_mean.data, self.bn.running_var.data, ) = self._get_aggregated_mean_std( self.split_bn.running_mean, self.split_bn.running_var, self.num_splits, ) def forward(self, x): if self.training: n, c, t, h, w = x.shape x = x.view(n // self.num_splits, c * self.num_splits, t, h, w) x = self.split_bn(x) x = x.view(n, c, t, h, w) else: x = self.bn(x) if self.affine: x = x * self.weight.view((-1, 1, 1, 1)) x = x + self.bias.view((-1, 1, 1, 1)) return x
normal
{ "blob_id": "4e5e1be289b32655736d8c6c02d354a85d4268b7", "index": 3027, "step-1": "<mask token>\n\n\nclass SubBatchNorm3d(nn.Module):\n <mask token>\n\n def __init__(self, num_splits, **args):\n \"\"\"\n Args:\n num_splits (int): number of splits.\n args (list): other arguments.\n \"\"\"\n super(SubBatchNorm3d, self).__init__()\n self.num_splits = num_splits\n num_features = args['num_features']\n if args.get('affine', True):\n self.affine = True\n args['affine'] = False\n self.weight = torch.nn.Parameter(torch.ones(num_features))\n self.bias = torch.nn.Parameter(torch.zeros(num_features))\n else:\n self.affine = False\n self.bn = nn.BatchNorm3d(**args)\n args['num_features'] = num_features * num_splits\n self.split_bn = nn.BatchNorm3d(**args)\n\n def _get_aggregated_mean_std(self, means, stds, n):\n \"\"\"\n Calculate the aggregated mean and stds.\n Args:\n means (tensor): mean values.\n stds (tensor): standard deviations.\n n (int): number of sets of means and stds.\n \"\"\"\n mean = means.view(n, -1).sum(0) / n\n std = stds.view(n, -1).sum(0) / n + ((means.view(n, -1) - mean) ** 2\n ).view(n, -1).sum(0) / n\n return mean.detach(), std.detach()\n\n def aggregate_stats(self):\n \"\"\"\n Synchronize running_mean, and running_var. Call this before eval.\n \"\"\"\n if self.split_bn.track_running_stats:\n self.bn.running_mean.data, self.bn.running_var.data = (self.\n _get_aggregated_mean_std(self.split_bn.running_mean, self.\n split_bn.running_var, self.num_splits))\n\n def forward(self, x):\n if self.training:\n n, c, t, h, w = x.shape\n x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)\n x = self.split_bn(x)\n x = x.view(n, c, t, h, w)\n else:\n x = self.bn(x)\n if self.affine:\n x = x * self.weight.view((-1, 1, 1, 1))\n x = x + self.bias.view((-1, 1, 1, 1))\n return x\n", "step-2": "<mask token>\n\n\nclass SubBatchNorm3d(nn.Module):\n \"\"\"\n The standard BN layer computes stats across all examples in a GPU. In some\n cases it is desirable to compute stats across only a subset of examples\n (e.g., in multigrid training https://arxiv.org/abs/1912.00998).\n SubBatchNorm3d splits the batch dimension into N splits, and run BN on\n each of them separately (so that the stats are computed on each subset of\n examples (1/N of batch) independently. During evaluation, it aggregates\n the stats from all splits into one BN.\n \"\"\"\n\n def __init__(self, num_splits, **args):\n \"\"\"\n Args:\n num_splits (int): number of splits.\n args (list): other arguments.\n \"\"\"\n super(SubBatchNorm3d, self).__init__()\n self.num_splits = num_splits\n num_features = args['num_features']\n if args.get('affine', True):\n self.affine = True\n args['affine'] = False\n self.weight = torch.nn.Parameter(torch.ones(num_features))\n self.bias = torch.nn.Parameter(torch.zeros(num_features))\n else:\n self.affine = False\n self.bn = nn.BatchNorm3d(**args)\n args['num_features'] = num_features * num_splits\n self.split_bn = nn.BatchNorm3d(**args)\n\n def _get_aggregated_mean_std(self, means, stds, n):\n \"\"\"\n Calculate the aggregated mean and stds.\n Args:\n means (tensor): mean values.\n stds (tensor): standard deviations.\n n (int): number of sets of means and stds.\n \"\"\"\n mean = means.view(n, -1).sum(0) / n\n std = stds.view(n, -1).sum(0) / n + ((means.view(n, -1) - mean) ** 2\n ).view(n, -1).sum(0) / n\n return mean.detach(), std.detach()\n\n def aggregate_stats(self):\n \"\"\"\n Synchronize running_mean, and running_var. Call this before eval.\n \"\"\"\n if self.split_bn.track_running_stats:\n self.bn.running_mean.data, self.bn.running_var.data = (self.\n _get_aggregated_mean_std(self.split_bn.running_mean, self.\n split_bn.running_var, self.num_splits))\n\n def forward(self, x):\n if self.training:\n n, c, t, h, w = x.shape\n x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)\n x = self.split_bn(x)\n x = x.view(n, c, t, h, w)\n else:\n x = self.bn(x)\n if self.affine:\n x = x * self.weight.view((-1, 1, 1, 1))\n x = x + self.bias.view((-1, 1, 1, 1))\n return x\n", "step-3": "<mask token>\n\n\ndef get_norm(cfg):\n \"\"\"\n Args:\n cfg (CfgNode): model building configs, details are in the comments of\n the config file.\n Returns:\n nn.Module: the normalization layer.\n \"\"\"\n if cfg.BN.NORM_TYPE in {'batchnorm', 'sync_batchnorm_apex'}:\n return nn.BatchNorm3d\n elif cfg.BN.NORM_TYPE == 'sub_batchnorm':\n return partial(SubBatchNorm3d, num_splits=cfg.BN.NUM_SPLITS)\n elif cfg.BN.NORM_TYPE == 'sync_batchnorm':\n return partial(NaiveSyncBatchNorm3d, num_sync_devices=cfg.BN.\n NUM_SYNC_DEVICES, global_sync=cfg.BN.GLOBAL_SYNC)\n else:\n raise NotImplementedError('Norm type {} is not supported'.format(\n cfg.BN.NORM_TYPE))\n\n\nclass SubBatchNorm3d(nn.Module):\n \"\"\"\n The standard BN layer computes stats across all examples in a GPU. In some\n cases it is desirable to compute stats across only a subset of examples\n (e.g., in multigrid training https://arxiv.org/abs/1912.00998).\n SubBatchNorm3d splits the batch dimension into N splits, and run BN on\n each of them separately (so that the stats are computed on each subset of\n examples (1/N of batch) independently. During evaluation, it aggregates\n the stats from all splits into one BN.\n \"\"\"\n\n def __init__(self, num_splits, **args):\n \"\"\"\n Args:\n num_splits (int): number of splits.\n args (list): other arguments.\n \"\"\"\n super(SubBatchNorm3d, self).__init__()\n self.num_splits = num_splits\n num_features = args['num_features']\n if args.get('affine', True):\n self.affine = True\n args['affine'] = False\n self.weight = torch.nn.Parameter(torch.ones(num_features))\n self.bias = torch.nn.Parameter(torch.zeros(num_features))\n else:\n self.affine = False\n self.bn = nn.BatchNorm3d(**args)\n args['num_features'] = num_features * num_splits\n self.split_bn = nn.BatchNorm3d(**args)\n\n def _get_aggregated_mean_std(self, means, stds, n):\n \"\"\"\n Calculate the aggregated mean and stds.\n Args:\n means (tensor): mean values.\n stds (tensor): standard deviations.\n n (int): number of sets of means and stds.\n \"\"\"\n mean = means.view(n, -1).sum(0) / n\n std = stds.view(n, -1).sum(0) / n + ((means.view(n, -1) - mean) ** 2\n ).view(n, -1).sum(0) / n\n return mean.detach(), std.detach()\n\n def aggregate_stats(self):\n \"\"\"\n Synchronize running_mean, and running_var. Call this before eval.\n \"\"\"\n if self.split_bn.track_running_stats:\n self.bn.running_mean.data, self.bn.running_var.data = (self.\n _get_aggregated_mean_std(self.split_bn.running_mean, self.\n split_bn.running_var, self.num_splits))\n\n def forward(self, x):\n if self.training:\n n, c, t, h, w = x.shape\n x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)\n x = self.split_bn(x)\n x = x.view(n, c, t, h, w)\n else:\n x = self.bn(x)\n if self.affine:\n x = x * self.weight.view((-1, 1, 1, 1))\n x = x + self.bias.view((-1, 1, 1, 1))\n return x\n", "step-4": "<mask token>\nfrom functools import partial\nimport torch\nimport torch.nn as nn\nfrom pytorchvideo.layers.batch_norm import NaiveSyncBatchNorm1d, NaiveSyncBatchNorm3d\n\n\ndef get_norm(cfg):\n \"\"\"\n Args:\n cfg (CfgNode): model building configs, details are in the comments of\n the config file.\n Returns:\n nn.Module: the normalization layer.\n \"\"\"\n if cfg.BN.NORM_TYPE in {'batchnorm', 'sync_batchnorm_apex'}:\n return nn.BatchNorm3d\n elif cfg.BN.NORM_TYPE == 'sub_batchnorm':\n return partial(SubBatchNorm3d, num_splits=cfg.BN.NUM_SPLITS)\n elif cfg.BN.NORM_TYPE == 'sync_batchnorm':\n return partial(NaiveSyncBatchNorm3d, num_sync_devices=cfg.BN.\n NUM_SYNC_DEVICES, global_sync=cfg.BN.GLOBAL_SYNC)\n else:\n raise NotImplementedError('Norm type {} is not supported'.format(\n cfg.BN.NORM_TYPE))\n\n\nclass SubBatchNorm3d(nn.Module):\n \"\"\"\n The standard BN layer computes stats across all examples in a GPU. In some\n cases it is desirable to compute stats across only a subset of examples\n (e.g., in multigrid training https://arxiv.org/abs/1912.00998).\n SubBatchNorm3d splits the batch dimension into N splits, and run BN on\n each of them separately (so that the stats are computed on each subset of\n examples (1/N of batch) independently. During evaluation, it aggregates\n the stats from all splits into one BN.\n \"\"\"\n\n def __init__(self, num_splits, **args):\n \"\"\"\n Args:\n num_splits (int): number of splits.\n args (list): other arguments.\n \"\"\"\n super(SubBatchNorm3d, self).__init__()\n self.num_splits = num_splits\n num_features = args['num_features']\n if args.get('affine', True):\n self.affine = True\n args['affine'] = False\n self.weight = torch.nn.Parameter(torch.ones(num_features))\n self.bias = torch.nn.Parameter(torch.zeros(num_features))\n else:\n self.affine = False\n self.bn = nn.BatchNorm3d(**args)\n args['num_features'] = num_features * num_splits\n self.split_bn = nn.BatchNorm3d(**args)\n\n def _get_aggregated_mean_std(self, means, stds, n):\n \"\"\"\n Calculate the aggregated mean and stds.\n Args:\n means (tensor): mean values.\n stds (tensor): standard deviations.\n n (int): number of sets of means and stds.\n \"\"\"\n mean = means.view(n, -1).sum(0) / n\n std = stds.view(n, -1).sum(0) / n + ((means.view(n, -1) - mean) ** 2\n ).view(n, -1).sum(0) / n\n return mean.detach(), std.detach()\n\n def aggregate_stats(self):\n \"\"\"\n Synchronize running_mean, and running_var. Call this before eval.\n \"\"\"\n if self.split_bn.track_running_stats:\n self.bn.running_mean.data, self.bn.running_var.data = (self.\n _get_aggregated_mean_std(self.split_bn.running_mean, self.\n split_bn.running_var, self.num_splits))\n\n def forward(self, x):\n if self.training:\n n, c, t, h, w = x.shape\n x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)\n x = self.split_bn(x)\n x = x.view(n, c, t, h, w)\n else:\n x = self.bn(x)\n if self.affine:\n x = x * self.weight.view((-1, 1, 1, 1))\n x = x + self.bias.view((-1, 1, 1, 1))\n return x\n", "step-5": "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\n\"\"\"BatchNorm (BN) utility functions and custom batch-size BN implementations\"\"\"\n\nfrom functools import partial\nimport torch\nimport torch.nn as nn\n\nfrom pytorchvideo.layers.batch_norm import (\n NaiveSyncBatchNorm1d,\n NaiveSyncBatchNorm3d,\n) # noqa\n\n\ndef get_norm(cfg):\n \"\"\"\n Args:\n cfg (CfgNode): model building configs, details are in the comments of\n the config file.\n Returns:\n nn.Module: the normalization layer.\n \"\"\"\n if cfg.BN.NORM_TYPE in {\"batchnorm\", \"sync_batchnorm_apex\"}:\n return nn.BatchNorm3d\n elif cfg.BN.NORM_TYPE == \"sub_batchnorm\":\n return partial(SubBatchNorm3d, num_splits=cfg.BN.NUM_SPLITS)\n elif cfg.BN.NORM_TYPE == \"sync_batchnorm\":\n return partial(\n NaiveSyncBatchNorm3d,\n num_sync_devices=cfg.BN.NUM_SYNC_DEVICES,\n global_sync=cfg.BN.GLOBAL_SYNC,\n )\n else:\n raise NotImplementedError(\n \"Norm type {} is not supported\".format(cfg.BN.NORM_TYPE)\n )\n\n\nclass SubBatchNorm3d(nn.Module):\n \"\"\"\n The standard BN layer computes stats across all examples in a GPU. In some\n cases it is desirable to compute stats across only a subset of examples\n (e.g., in multigrid training https://arxiv.org/abs/1912.00998).\n SubBatchNorm3d splits the batch dimension into N splits, and run BN on\n each of them separately (so that the stats are computed on each subset of\n examples (1/N of batch) independently. During evaluation, it aggregates\n the stats from all splits into one BN.\n \"\"\"\n\n def __init__(self, num_splits, **args):\n \"\"\"\n Args:\n num_splits (int): number of splits.\n args (list): other arguments.\n \"\"\"\n super(SubBatchNorm3d, self).__init__()\n self.num_splits = num_splits\n num_features = args[\"num_features\"]\n # Keep only one set of weight and bias.\n if args.get(\"affine\", True):\n self.affine = True\n args[\"affine\"] = False\n self.weight = torch.nn.Parameter(torch.ones(num_features))\n self.bias = torch.nn.Parameter(torch.zeros(num_features))\n else:\n self.affine = False\n self.bn = nn.BatchNorm3d(**args)\n args[\"num_features\"] = num_features * num_splits\n self.split_bn = nn.BatchNorm3d(**args)\n\n def _get_aggregated_mean_std(self, means, stds, n):\n \"\"\"\n Calculate the aggregated mean and stds.\n Args:\n means (tensor): mean values.\n stds (tensor): standard deviations.\n n (int): number of sets of means and stds.\n \"\"\"\n mean = means.view(n, -1).sum(0) / n\n std = (\n stds.view(n, -1).sum(0) / n\n + ((means.view(n, -1) - mean) ** 2).view(n, -1).sum(0) / n\n )\n return mean.detach(), std.detach()\n\n def aggregate_stats(self):\n \"\"\"\n Synchronize running_mean, and running_var. Call this before eval.\n \"\"\"\n if self.split_bn.track_running_stats:\n (\n self.bn.running_mean.data,\n self.bn.running_var.data,\n ) = self._get_aggregated_mean_std(\n self.split_bn.running_mean,\n self.split_bn.running_var,\n self.num_splits,\n )\n\n def forward(self, x):\n if self.training:\n n, c, t, h, w = x.shape\n x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)\n x = self.split_bn(x)\n x = x.view(n, c, t, h, w)\n else:\n x = self.bn(x)\n if self.affine:\n x = x * self.weight.view((-1, 1, 1, 1))\n x = x + self.bias.view((-1, 1, 1, 1))\n return x\n", "step-ids": [ 5, 6, 7, 8, 9 ] }
[ 5, 6, 7, 8, 9 ]
#! /usr/bin/env python # -*- coding: utf-8 -*- # vim:fenc=utf-8 # Copyright © YXC # CreateTime: 2016-03-09 10:06:02 """ Example of functions with arbitrary number arguments """ def optional_argument_func(arg1='', arg2=''): """ Function with two optional arguments """ print("arg1:{0}".format(arg1)) print("arg2:{0}".format(arg2)) def arbitrary_argument_func(*args): """ just use "*" to collect all remaining arguments into a tuple """ numargs = len(args) print("Number of arguments:{0}".format(numargs)) for i, arg in enumerate(args): print("Argument {0} is : {1}".format(i, arg)) if __name__ == "__main__": optional_argument_func("Hello", "World") arbitrary_argument_func() arbitrary_argument_func("hello") arbitrary_argument_func("hello", "world", "again")
normal
{ "blob_id": "061a78650e2abf6a9d1e4796dd349174a8df5cb8", "index": 8747, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef optional_argument_func(arg1='', arg2=''):\n \"\"\"\n Function with two optional arguments\n \"\"\"\n print('arg1:{0}'.format(arg1))\n print('arg2:{0}'.format(arg2))\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef optional_argument_func(arg1='', arg2=''):\n \"\"\"\n Function with two optional arguments\n \"\"\"\n print('arg1:{0}'.format(arg1))\n print('arg2:{0}'.format(arg2))\n\n\ndef arbitrary_argument_func(*args):\n \"\"\"\n just use \"*\" to collect all remaining arguments into a tuple\n \"\"\"\n numargs = len(args)\n print('Number of arguments:{0}'.format(numargs))\n for i, arg in enumerate(args):\n print('Argument {0} is : {1}'.format(i, arg))\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\ndef optional_argument_func(arg1='', arg2=''):\n \"\"\"\n Function with two optional arguments\n \"\"\"\n print('arg1:{0}'.format(arg1))\n print('arg2:{0}'.format(arg2))\n\n\ndef arbitrary_argument_func(*args):\n \"\"\"\n just use \"*\" to collect all remaining arguments into a tuple\n \"\"\"\n numargs = len(args)\n print('Number of arguments:{0}'.format(numargs))\n for i, arg in enumerate(args):\n print('Argument {0} is : {1}'.format(i, arg))\n\n\nif __name__ == '__main__':\n optional_argument_func('Hello', 'World')\n arbitrary_argument_func()\n arbitrary_argument_func('hello')\n arbitrary_argument_func('hello', 'world', 'again')\n", "step-5": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n# Copyright © YXC\n# CreateTime: 2016-03-09 10:06:02\n\n\"\"\"\nExample of functions with arbitrary number arguments\n\"\"\"\n\n\ndef optional_argument_func(arg1='', arg2=''):\n \"\"\"\n Function with two optional arguments\n \"\"\"\n print(\"arg1:{0}\".format(arg1))\n print(\"arg2:{0}\".format(arg2))\n\n\ndef arbitrary_argument_func(*args):\n \"\"\"\n just use \"*\" to collect all remaining arguments into a tuple\n \"\"\"\n numargs = len(args)\n print(\"Number of arguments:{0}\".format(numargs))\n for i, arg in enumerate(args):\n print(\"Argument {0} is : {1}\".format(i, arg))\n\n\nif __name__ == \"__main__\":\n optional_argument_func(\"Hello\", \"World\")\n arbitrary_argument_func()\n arbitrary_argument_func(\"hello\")\n arbitrary_argument_func(\"hello\", \"world\", \"again\")\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import unittest from nldata.corpora import Telegram import os class TestTelegram(unittest.TestCase): def test_export_iter(self): pass # telegram = Telegram(data_dir) # it = telegram.split("train", n=20) # samples = [s for s in it] # self.assertEqual(len(samples), 20) # list(map(print,samples)) if __name__ == '__main__': unittest.main()
normal
{ "blob_id": "5c1d81c973487f1b091e58a6ccf5947c3f2a7e6d", "index": 1058, "step-1": "<mask token>\n\n\nclass TestTelegram(unittest.TestCase):\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass TestTelegram(unittest.TestCase):\n\n def test_export_iter(self):\n pass\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass TestTelegram(unittest.TestCase):\n\n def test_export_iter(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-4": "import unittest\nfrom nldata.corpora import Telegram\nimport os\n\n\nclass TestTelegram(unittest.TestCase):\n\n def test_export_iter(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-5": "import unittest\nfrom nldata.corpora import Telegram\nimport os\n\n\nclass TestTelegram(unittest.TestCase):\n def test_export_iter(self):\n pass\n # telegram = Telegram(data_dir)\n # it = telegram.split(\"train\", n=20)\n # samples = [s for s in it]\n # self.assertEqual(len(samples), 20)\n # list(map(print,samples))\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
""" Tests of neo.io.exampleio """ import pathlib import unittest from neo.io.exampleio import ExampleIO # , HAVE_SCIPY from neo.test.iotest.common_io_test import BaseTestIO from neo.test.iotest.tools import get_test_file_full_path from neo.io.proxyobjects import (AnalogSignalProxy, SpikeTrainProxy, EventProxy, EpochProxy) from neo import (AnalogSignal, SpikeTrain) import quantities as pq import numpy as np # This run standart tests, this is mandatory for all IO class TestExampleIO(BaseTestIO, unittest.TestCase, ): ioclass = ExampleIO entities_to_download = [] entities_to_test = [ 'fake1.fake', 'fake2.fake', ] def setUp(self): super().setUp() # ensure fake test files exist before running common tests for entity in self.entities_to_test: full_path = get_test_file_full_path(self.ioclass, filename=entity, directory=self.local_test_dir) pathlib.Path(full_path).touch() def tearDown(self) -> None: super().tearDown() for entity in self.entities_to_test: full_path = get_test_file_full_path(self.ioclass, filename=entity, directory=self.local_test_dir) pathlib.Path(full_path).unlink(missing_ok=True) # This is the minimal variables that are required # to run the common IO tests. IO specific tests # can be added here and will be run automatically # in addition to the common tests. class Specific_TestExampleIO(unittest.TestCase): def test_read_segment_lazy(self): r = ExampleIO(filename=None) seg = r.read_segment(lazy=True) for ana in seg.analogsignals: assert isinstance(ana, AnalogSignalProxy) ana = ana.load() assert isinstance(ana, AnalogSignal) for st in seg.spiketrains: assert isinstance(st, SpikeTrainProxy) st = st.load() assert isinstance(st, SpikeTrain) seg = r.read_segment(lazy=False) for anasig in seg.analogsignals: assert isinstance(ana, AnalogSignal) self.assertNotEqual(anasig.size, 0) for st in seg.spiketrains: assert isinstance(st, SpikeTrain) self.assertNotEqual(st.size, 0) # annotations assert 'seg_extra_info' in seg.annotations assert seg.name == 'Seg #0 Block #0' for anasig in seg.analogsignals: assert anasig.name is not None for st in seg.spiketrains: assert st.name is not None for ev in seg.events: assert ev.name is not None for ep in seg.epochs: assert ep.name is not None def test_read_block(self): r = ExampleIO(filename=None) bl = r.read_block(lazy=True) #assert len(bl.list_units) == 3 #assert len(bl.channel_indexes) == 1 + 1 # signals grouped + units grouped def test_read_segment_with_time_slice(self): r = ExampleIO(filename=None) seg = r.read_segment(time_slice=None) shape_full = seg.analogsignals[0].shape spikes_full = seg.spiketrains[0] event_full = seg.events[0] t_start, t_stop = 260 * pq.ms, 1.854 * pq.s seg = r.read_segment(time_slice=(t_start, t_stop)) shape_slice = seg.analogsignals[0].shape spikes_slice = seg.spiketrains[0] event_slice = seg.events[0] assert shape_full[0] > shape_slice[0] assert spikes_full.size > spikes_slice.size assert np.all(spikes_slice >= t_start) assert np.all(spikes_slice <= t_stop) assert spikes_slice.t_start == t_start assert spikes_slice.t_stop == t_stop assert event_full.size > event_slice.size assert np.all(event_slice.times >= t_start) assert np.all(event_slice.times <= t_stop) if __name__ == "__main__": unittest.main()
normal
{ "blob_id": "e51c0d8c6430603d989d55a64fdf77f9e1a2397b", "index": 1081, "step-1": "<mask token>\n\n\nclass TestExampleIO(BaseTestIO, unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def tearDown(self) ->None:\n super().tearDown()\n for entity in self.entities_to_test:\n full_path = get_test_file_full_path(self.ioclass, filename=\n entity, directory=self.local_test_dir)\n pathlib.Path(full_path).unlink(missing_ok=True)\n\n\nclass Specific_TestExampleIO(unittest.TestCase):\n\n def test_read_segment_lazy(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(lazy=True)\n for ana in seg.analogsignals:\n assert isinstance(ana, AnalogSignalProxy)\n ana = ana.load()\n assert isinstance(ana, AnalogSignal)\n for st in seg.spiketrains:\n assert isinstance(st, SpikeTrainProxy)\n st = st.load()\n assert isinstance(st, SpikeTrain)\n seg = r.read_segment(lazy=False)\n for anasig in seg.analogsignals:\n assert isinstance(ana, AnalogSignal)\n self.assertNotEqual(anasig.size, 0)\n for st in seg.spiketrains:\n assert isinstance(st, SpikeTrain)\n self.assertNotEqual(st.size, 0)\n assert 'seg_extra_info' in seg.annotations\n assert seg.name == 'Seg #0 Block #0'\n for anasig in seg.analogsignals:\n assert anasig.name is not None\n for st in seg.spiketrains:\n assert st.name is not None\n for ev in seg.events:\n assert ev.name is not None\n for ep in seg.epochs:\n assert ep.name is not None\n\n def test_read_block(self):\n r = ExampleIO(filename=None)\n bl = r.read_block(lazy=True)\n\n def test_read_segment_with_time_slice(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(time_slice=None)\n shape_full = seg.analogsignals[0].shape\n spikes_full = seg.spiketrains[0]\n event_full = seg.events[0]\n t_start, t_stop = 260 * pq.ms, 1.854 * pq.s\n seg = r.read_segment(time_slice=(t_start, t_stop))\n shape_slice = seg.analogsignals[0].shape\n spikes_slice = seg.spiketrains[0]\n event_slice = seg.events[0]\n assert shape_full[0] > shape_slice[0]\n assert spikes_full.size > spikes_slice.size\n assert np.all(spikes_slice >= t_start)\n assert np.all(spikes_slice <= t_stop)\n assert spikes_slice.t_start == t_start\n assert spikes_slice.t_stop == t_stop\n assert event_full.size > event_slice.size\n assert np.all(event_slice.times >= t_start)\n assert np.all(event_slice.times <= t_stop)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass TestExampleIO(BaseTestIO, unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n\n def setUp(self):\n super().setUp()\n for entity in self.entities_to_test:\n full_path = get_test_file_full_path(self.ioclass, filename=\n entity, directory=self.local_test_dir)\n pathlib.Path(full_path).touch()\n\n def tearDown(self) ->None:\n super().tearDown()\n for entity in self.entities_to_test:\n full_path = get_test_file_full_path(self.ioclass, filename=\n entity, directory=self.local_test_dir)\n pathlib.Path(full_path).unlink(missing_ok=True)\n\n\nclass Specific_TestExampleIO(unittest.TestCase):\n\n def test_read_segment_lazy(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(lazy=True)\n for ana in seg.analogsignals:\n assert isinstance(ana, AnalogSignalProxy)\n ana = ana.load()\n assert isinstance(ana, AnalogSignal)\n for st in seg.spiketrains:\n assert isinstance(st, SpikeTrainProxy)\n st = st.load()\n assert isinstance(st, SpikeTrain)\n seg = r.read_segment(lazy=False)\n for anasig in seg.analogsignals:\n assert isinstance(ana, AnalogSignal)\n self.assertNotEqual(anasig.size, 0)\n for st in seg.spiketrains:\n assert isinstance(st, SpikeTrain)\n self.assertNotEqual(st.size, 0)\n assert 'seg_extra_info' in seg.annotations\n assert seg.name == 'Seg #0 Block #0'\n for anasig in seg.analogsignals:\n assert anasig.name is not None\n for st in seg.spiketrains:\n assert st.name is not None\n for ev in seg.events:\n assert ev.name is not None\n for ep in seg.epochs:\n assert ep.name is not None\n\n def test_read_block(self):\n r = ExampleIO(filename=None)\n bl = r.read_block(lazy=True)\n\n def test_read_segment_with_time_slice(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(time_slice=None)\n shape_full = seg.analogsignals[0].shape\n spikes_full = seg.spiketrains[0]\n event_full = seg.events[0]\n t_start, t_stop = 260 * pq.ms, 1.854 * pq.s\n seg = r.read_segment(time_slice=(t_start, t_stop))\n shape_slice = seg.analogsignals[0].shape\n spikes_slice = seg.spiketrains[0]\n event_slice = seg.events[0]\n assert shape_full[0] > shape_slice[0]\n assert spikes_full.size > spikes_slice.size\n assert np.all(spikes_slice >= t_start)\n assert np.all(spikes_slice <= t_stop)\n assert spikes_slice.t_start == t_start\n assert spikes_slice.t_stop == t_stop\n assert event_full.size > event_slice.size\n assert np.all(event_slice.times >= t_start)\n assert np.all(event_slice.times <= t_stop)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass TestExampleIO(BaseTestIO, unittest.TestCase):\n ioclass = ExampleIO\n entities_to_download = []\n entities_to_test = ['fake1.fake', 'fake2.fake']\n\n def setUp(self):\n super().setUp()\n for entity in self.entities_to_test:\n full_path = get_test_file_full_path(self.ioclass, filename=\n entity, directory=self.local_test_dir)\n pathlib.Path(full_path).touch()\n\n def tearDown(self) ->None:\n super().tearDown()\n for entity in self.entities_to_test:\n full_path = get_test_file_full_path(self.ioclass, filename=\n entity, directory=self.local_test_dir)\n pathlib.Path(full_path).unlink(missing_ok=True)\n\n\nclass Specific_TestExampleIO(unittest.TestCase):\n\n def test_read_segment_lazy(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(lazy=True)\n for ana in seg.analogsignals:\n assert isinstance(ana, AnalogSignalProxy)\n ana = ana.load()\n assert isinstance(ana, AnalogSignal)\n for st in seg.spiketrains:\n assert isinstance(st, SpikeTrainProxy)\n st = st.load()\n assert isinstance(st, SpikeTrain)\n seg = r.read_segment(lazy=False)\n for anasig in seg.analogsignals:\n assert isinstance(ana, AnalogSignal)\n self.assertNotEqual(anasig.size, 0)\n for st in seg.spiketrains:\n assert isinstance(st, SpikeTrain)\n self.assertNotEqual(st.size, 0)\n assert 'seg_extra_info' in seg.annotations\n assert seg.name == 'Seg #0 Block #0'\n for anasig in seg.analogsignals:\n assert anasig.name is not None\n for st in seg.spiketrains:\n assert st.name is not None\n for ev in seg.events:\n assert ev.name is not None\n for ep in seg.epochs:\n assert ep.name is not None\n\n def test_read_block(self):\n r = ExampleIO(filename=None)\n bl = r.read_block(lazy=True)\n\n def test_read_segment_with_time_slice(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(time_slice=None)\n shape_full = seg.analogsignals[0].shape\n spikes_full = seg.spiketrains[0]\n event_full = seg.events[0]\n t_start, t_stop = 260 * pq.ms, 1.854 * pq.s\n seg = r.read_segment(time_slice=(t_start, t_stop))\n shape_slice = seg.analogsignals[0].shape\n spikes_slice = seg.spiketrains[0]\n event_slice = seg.events[0]\n assert shape_full[0] > shape_slice[0]\n assert spikes_full.size > spikes_slice.size\n assert np.all(spikes_slice >= t_start)\n assert np.all(spikes_slice <= t_stop)\n assert spikes_slice.t_start == t_start\n assert spikes_slice.t_stop == t_stop\n assert event_full.size > event_slice.size\n assert np.all(event_slice.times >= t_start)\n assert np.all(event_slice.times <= t_stop)\n\n\n<mask token>\n", "step-4": "<mask token>\nimport pathlib\nimport unittest\nfrom neo.io.exampleio import ExampleIO\nfrom neo.test.iotest.common_io_test import BaseTestIO\nfrom neo.test.iotest.tools import get_test_file_full_path\nfrom neo.io.proxyobjects import AnalogSignalProxy, SpikeTrainProxy, EventProxy, EpochProxy\nfrom neo import AnalogSignal, SpikeTrain\nimport quantities as pq\nimport numpy as np\n\n\nclass TestExampleIO(BaseTestIO, unittest.TestCase):\n ioclass = ExampleIO\n entities_to_download = []\n entities_to_test = ['fake1.fake', 'fake2.fake']\n\n def setUp(self):\n super().setUp()\n for entity in self.entities_to_test:\n full_path = get_test_file_full_path(self.ioclass, filename=\n entity, directory=self.local_test_dir)\n pathlib.Path(full_path).touch()\n\n def tearDown(self) ->None:\n super().tearDown()\n for entity in self.entities_to_test:\n full_path = get_test_file_full_path(self.ioclass, filename=\n entity, directory=self.local_test_dir)\n pathlib.Path(full_path).unlink(missing_ok=True)\n\n\nclass Specific_TestExampleIO(unittest.TestCase):\n\n def test_read_segment_lazy(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(lazy=True)\n for ana in seg.analogsignals:\n assert isinstance(ana, AnalogSignalProxy)\n ana = ana.load()\n assert isinstance(ana, AnalogSignal)\n for st in seg.spiketrains:\n assert isinstance(st, SpikeTrainProxy)\n st = st.load()\n assert isinstance(st, SpikeTrain)\n seg = r.read_segment(lazy=False)\n for anasig in seg.analogsignals:\n assert isinstance(ana, AnalogSignal)\n self.assertNotEqual(anasig.size, 0)\n for st in seg.spiketrains:\n assert isinstance(st, SpikeTrain)\n self.assertNotEqual(st.size, 0)\n assert 'seg_extra_info' in seg.annotations\n assert seg.name == 'Seg #0 Block #0'\n for anasig in seg.analogsignals:\n assert anasig.name is not None\n for st in seg.spiketrains:\n assert st.name is not None\n for ev in seg.events:\n assert ev.name is not None\n for ep in seg.epochs:\n assert ep.name is not None\n\n def test_read_block(self):\n r = ExampleIO(filename=None)\n bl = r.read_block(lazy=True)\n\n def test_read_segment_with_time_slice(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(time_slice=None)\n shape_full = seg.analogsignals[0].shape\n spikes_full = seg.spiketrains[0]\n event_full = seg.events[0]\n t_start, t_stop = 260 * pq.ms, 1.854 * pq.s\n seg = r.read_segment(time_slice=(t_start, t_stop))\n shape_slice = seg.analogsignals[0].shape\n spikes_slice = seg.spiketrains[0]\n event_slice = seg.events[0]\n assert shape_full[0] > shape_slice[0]\n assert spikes_full.size > spikes_slice.size\n assert np.all(spikes_slice >= t_start)\n assert np.all(spikes_slice <= t_stop)\n assert spikes_slice.t_start == t_start\n assert spikes_slice.t_stop == t_stop\n assert event_full.size > event_slice.size\n assert np.all(event_slice.times >= t_start)\n assert np.all(event_slice.times <= t_stop)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-5": "\"\"\"\nTests of neo.io.exampleio\n\"\"\"\n\nimport pathlib\nimport unittest\n\nfrom neo.io.exampleio import ExampleIO # , HAVE_SCIPY\nfrom neo.test.iotest.common_io_test import BaseTestIO\nfrom neo.test.iotest.tools import get_test_file_full_path\nfrom neo.io.proxyobjects import (AnalogSignalProxy,\n SpikeTrainProxy, EventProxy, EpochProxy)\nfrom neo import (AnalogSignal, SpikeTrain)\n\nimport quantities as pq\nimport numpy as np\n\n\n# This run standart tests, this is mandatory for all IO\nclass TestExampleIO(BaseTestIO, unittest.TestCase, ):\n ioclass = ExampleIO\n entities_to_download = []\n entities_to_test = [\n 'fake1.fake',\n 'fake2.fake',\n ]\n\n def setUp(self):\n super().setUp()\n # ensure fake test files exist before running common tests\n for entity in self.entities_to_test:\n full_path = get_test_file_full_path(self.ioclass, filename=entity,\n directory=self.local_test_dir)\n pathlib.Path(full_path).touch()\n\n def tearDown(self) -> None:\n super().tearDown()\n for entity in self.entities_to_test:\n full_path = get_test_file_full_path(self.ioclass, filename=entity,\n directory=self.local_test_dir)\n pathlib.Path(full_path).unlink(missing_ok=True)\n\n# This is the minimal variables that are required\n# to run the common IO tests. IO specific tests\n# can be added here and will be run automatically\n# in addition to the common tests.\nclass Specific_TestExampleIO(unittest.TestCase):\n def test_read_segment_lazy(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(lazy=True)\n for ana in seg.analogsignals:\n assert isinstance(ana, AnalogSignalProxy)\n ana = ana.load()\n assert isinstance(ana, AnalogSignal)\n for st in seg.spiketrains:\n assert isinstance(st, SpikeTrainProxy)\n st = st.load()\n assert isinstance(st, SpikeTrain)\n\n seg = r.read_segment(lazy=False)\n for anasig in seg.analogsignals:\n assert isinstance(ana, AnalogSignal)\n self.assertNotEqual(anasig.size, 0)\n for st in seg.spiketrains:\n assert isinstance(st, SpikeTrain)\n self.assertNotEqual(st.size, 0)\n\n # annotations\n assert 'seg_extra_info' in seg.annotations\n assert seg.name == 'Seg #0 Block #0'\n for anasig in seg.analogsignals:\n assert anasig.name is not None\n for st in seg.spiketrains:\n assert st.name is not None\n for ev in seg.events:\n assert ev.name is not None\n for ep in seg.epochs:\n assert ep.name is not None\n\n def test_read_block(self):\n r = ExampleIO(filename=None)\n bl = r.read_block(lazy=True)\n #assert len(bl.list_units) == 3\n #assert len(bl.channel_indexes) == 1 + 1 # signals grouped + units grouped\n\n def test_read_segment_with_time_slice(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(time_slice=None)\n shape_full = seg.analogsignals[0].shape\n spikes_full = seg.spiketrains[0]\n event_full = seg.events[0]\n\n t_start, t_stop = 260 * pq.ms, 1.854 * pq.s\n seg = r.read_segment(time_slice=(t_start, t_stop))\n shape_slice = seg.analogsignals[0].shape\n spikes_slice = seg.spiketrains[0]\n event_slice = seg.events[0]\n\n assert shape_full[0] > shape_slice[0]\n\n assert spikes_full.size > spikes_slice.size\n assert np.all(spikes_slice >= t_start)\n assert np.all(spikes_slice <= t_stop)\n assert spikes_slice.t_start == t_start\n assert spikes_slice.t_stop == t_stop\n\n assert event_full.size > event_slice.size\n assert np.all(event_slice.times >= t_start)\n assert np.all(event_slice.times <= t_stop)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "step-ids": [ 6, 7, 8, 10, 11 ] }
[ 6, 7, 8, 10, 11 ]
t_dim_2 = [[1, 2], [3, 4]] def z(i, j, dim): t = dim ** 2 if dim == 2: return t_dim_2[i-1][j-1] d = dim//2 if i <= d: # I or II if j <= d: return z(i, j, d) #I else: j -= d return t//4 + z(i, j, d) # II else: # III or IV if j <=d: i -= d return t//2 + z(i, j, d) # III else: i -= d j -= d return 3*t//4 + z(i, j, d) # IV n = 2 i = 3 j = 3 dim = 2**n print(z(i,j,dim))
normal
{ "blob_id": "07ed8c12e8e5c568c897b6b632c48831267eba51", "index": 1815, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef z(i, j, dim):\n t = dim ** 2\n if dim == 2:\n return t_dim_2[i - 1][j - 1]\n d = dim // 2\n if i <= d:\n if j <= d:\n return z(i, j, d)\n else:\n j -= d\n return t // 4 + z(i, j, d)\n elif j <= d:\n i -= d\n return t // 2 + z(i, j, d)\n else:\n i -= d\n j -= d\n return 3 * t // 4 + z(i, j, d)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef z(i, j, dim):\n t = dim ** 2\n if dim == 2:\n return t_dim_2[i - 1][j - 1]\n d = dim // 2\n if i <= d:\n if j <= d:\n return z(i, j, d)\n else:\n j -= d\n return t // 4 + z(i, j, d)\n elif j <= d:\n i -= d\n return t // 2 + z(i, j, d)\n else:\n i -= d\n j -= d\n return 3 * t // 4 + z(i, j, d)\n\n\n<mask token>\nprint(z(i, j, dim))\n", "step-4": "t_dim_2 = [[1, 2], [3, 4]]\n\n\ndef z(i, j, dim):\n t = dim ** 2\n if dim == 2:\n return t_dim_2[i - 1][j - 1]\n d = dim // 2\n if i <= d:\n if j <= d:\n return z(i, j, d)\n else:\n j -= d\n return t // 4 + z(i, j, d)\n elif j <= d:\n i -= d\n return t // 2 + z(i, j, d)\n else:\n i -= d\n j -= d\n return 3 * t // 4 + z(i, j, d)\n\n\nn = 2\ni = 3\nj = 3\ndim = 2 ** n\nprint(z(i, j, dim))\n", "step-5": "\nt_dim_2 = [[1, 2], [3, 4]]\n\ndef z(i, j, dim):\n t = dim ** 2\n if dim == 2:\n return t_dim_2[i-1][j-1]\n\n d = dim//2\n if i <= d: # I or II\n if j <= d:\n return z(i, j, d) #I\n else:\n j -= d\n return t//4 + z(i, j, d) # II\n else: # III or IV\n if j <=d:\n i -= d\n return t//2 + z(i, j, d) # III\n else:\n i -= d\n j -= d\n return 3*t//4 + z(i, j, d) # IV\nn = 2\ni = 3\nj = 3\ndim = 2**n\nprint(z(i,j,dim))", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import FWCore.ParameterSet.Config as cms process = cms.Process("GeometryInfo") # minimum of logs process.MessageLogger = cms.Service("MessageLogger", cerr = cms.untracked.PSet( enable = cms.untracked.bool(False) ), cout = cms.untracked.PSet( enable = cms.untracked.bool(True), threshold = cms.untracked.string('INFO') ) ) # geometry process.load("Geometry.VeryForwardGeometry.geometryRPFromDD_2018_cfi") #process.load("Geometry.VeryForwardGeometry.geometryRPFromDD_2017_cfi") # no events to process process.source = cms.Source("EmptyIOVSource", timetype = cms.string('runnumber'), firstValue = cms.uint64(1), lastValue = cms.uint64(1), interval = cms.uint64(1) ) process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) ) #Database output service process.load("CondCore.CondDB.CondDB_cfi") # input database (in this case local sqlite file) process.CondDB.connect = 'sqlite_file:CTPPSRPAlignment.db' process.PoolDBESSource = cms.ESSource("PoolDBESSource", process.CondDB, DumpStat=cms.untracked.bool(True), toGet = cms.VPSet( cms.PSet( record = cms.string('RPMisalignedAlignmentRecord'), tag = cms.string("CTPPSRPAlignment_misaligned") ) ) ) process.ctppsGeometryInfo = cms.EDAnalyzer("CTPPSGeometryInfo", geometryType = cms.untracked.string("misaligned"), printRPInfo = cms.untracked.bool(True), printSensorInfo = cms.untracked.bool(True) ) process.p = cms.Path( process.ctppsGeometryInfo )
normal
{ "blob_id": "ac0e301e58ea64465ccd4b2b9aa4ae69283d6d0c", "index": 6052, "step-1": "<mask token>\n", "step-2": "<mask token>\nprocess.load('Geometry.VeryForwardGeometry.geometryRPFromDD_2018_cfi')\n<mask token>\nprocess.load('CondCore.CondDB.CondDB_cfi')\n<mask token>\n", "step-3": "<mask token>\nprocess = cms.Process('GeometryInfo')\nprocess.MessageLogger = cms.Service('MessageLogger', cerr=cms.untracked.\n PSet(enable=cms.untracked.bool(False)), cout=cms.untracked.PSet(enable=\n cms.untracked.bool(True), threshold=cms.untracked.string('INFO')))\nprocess.load('Geometry.VeryForwardGeometry.geometryRPFromDD_2018_cfi')\nprocess.source = cms.Source('EmptyIOVSource', timetype=cms.string(\n 'runnumber'), firstValue=cms.uint64(1), lastValue=cms.uint64(1),\n interval=cms.uint64(1))\nprocess.maxEvents = cms.untracked.PSet(input=cms.untracked.int32(1))\nprocess.load('CondCore.CondDB.CondDB_cfi')\nprocess.CondDB.connect = 'sqlite_file:CTPPSRPAlignment.db'\nprocess.PoolDBESSource = cms.ESSource('PoolDBESSource', process.CondDB,\n DumpStat=cms.untracked.bool(True), toGet=cms.VPSet(cms.PSet(record=cms.\n string('RPMisalignedAlignmentRecord'), tag=cms.string(\n 'CTPPSRPAlignment_misaligned'))))\nprocess.ctppsGeometryInfo = cms.EDAnalyzer('CTPPSGeometryInfo',\n geometryType=cms.untracked.string('misaligned'), printRPInfo=cms.\n untracked.bool(True), printSensorInfo=cms.untracked.bool(True))\nprocess.p = cms.Path(process.ctppsGeometryInfo)\n", "step-4": "import FWCore.ParameterSet.Config as cms\nprocess = cms.Process('GeometryInfo')\nprocess.MessageLogger = cms.Service('MessageLogger', cerr=cms.untracked.\n PSet(enable=cms.untracked.bool(False)), cout=cms.untracked.PSet(enable=\n cms.untracked.bool(True), threshold=cms.untracked.string('INFO')))\nprocess.load('Geometry.VeryForwardGeometry.geometryRPFromDD_2018_cfi')\nprocess.source = cms.Source('EmptyIOVSource', timetype=cms.string(\n 'runnumber'), firstValue=cms.uint64(1), lastValue=cms.uint64(1),\n interval=cms.uint64(1))\nprocess.maxEvents = cms.untracked.PSet(input=cms.untracked.int32(1))\nprocess.load('CondCore.CondDB.CondDB_cfi')\nprocess.CondDB.connect = 'sqlite_file:CTPPSRPAlignment.db'\nprocess.PoolDBESSource = cms.ESSource('PoolDBESSource', process.CondDB,\n DumpStat=cms.untracked.bool(True), toGet=cms.VPSet(cms.PSet(record=cms.\n string('RPMisalignedAlignmentRecord'), tag=cms.string(\n 'CTPPSRPAlignment_misaligned'))))\nprocess.ctppsGeometryInfo = cms.EDAnalyzer('CTPPSGeometryInfo',\n geometryType=cms.untracked.string('misaligned'), printRPInfo=cms.\n untracked.bool(True), printSensorInfo=cms.untracked.bool(True))\nprocess.p = cms.Path(process.ctppsGeometryInfo)\n", "step-5": "import FWCore.ParameterSet.Config as cms\nprocess = cms.Process(\"GeometryInfo\")\n\n# minimum of logs\nprocess.MessageLogger = cms.Service(\"MessageLogger\",\n cerr = cms.untracked.PSet(\n enable = cms.untracked.bool(False)\n ),\n cout = cms.untracked.PSet(\n enable = cms.untracked.bool(True),\n threshold = cms.untracked.string('INFO')\n )\n)\n\n# geometry\nprocess.load(\"Geometry.VeryForwardGeometry.geometryRPFromDD_2018_cfi\")\n#process.load(\"Geometry.VeryForwardGeometry.geometryRPFromDD_2017_cfi\")\n\n# no events to process\nprocess.source = cms.Source(\"EmptyIOVSource\",\n timetype = cms.string('runnumber'),\n firstValue = cms.uint64(1),\n lastValue = cms.uint64(1),\n interval = cms.uint64(1)\n)\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(1)\n)\n\n#Database output service\nprocess.load(\"CondCore.CondDB.CondDB_cfi\")\n# input database (in this case local sqlite file)\nprocess.CondDB.connect = 'sqlite_file:CTPPSRPAlignment.db'\n\nprocess.PoolDBESSource = cms.ESSource(\"PoolDBESSource\",\n process.CondDB,\n DumpStat=cms.untracked.bool(True),\n toGet = cms.VPSet(\n cms.PSet(\n record = cms.string('RPMisalignedAlignmentRecord'),\n tag = cms.string(\"CTPPSRPAlignment_misaligned\")\n )\n )\n)\n\nprocess.ctppsGeometryInfo = cms.EDAnalyzer(\"CTPPSGeometryInfo\",\n geometryType = cms.untracked.string(\"misaligned\"),\n printRPInfo = cms.untracked.bool(True),\n printSensorInfo = cms.untracked.bool(True)\n)\n\nprocess.p = cms.Path(\n process.ctppsGeometryInfo\n)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/usr/bin/python # coding: utf-8 from os.path import dirname, abspath PICKITEMSP = True RAREP = True REPAIRP = False ITEMS = { "legendary": ["#02CE01", # set "#BF642F"], # legndary "rare": ["#BBBB00"] } current_abpath = abspath(dirname(__file__)) + "/" # With py2exe the dirname is INSTPATH/server/library.zip. So # current_abpath will be INSTPATH/server/library.zip/ if current_abpath[-12:] == "library.zip/": current_abpath = current_abpath[:-12] imgs_dir = current_abpath + "imgs\\" def get_item_colors(): ''' >>> get_item_colors() ''' result = [] if not PICKITEMSP: return result if RAREP: for a in ITEMS: result += ITEMS[a] return result else: result = ITEMS["legendary"] return result
normal
{ "blob_id": "927b42326ad62f5e484fd7016c42a44b93609f83", "index": 1296, "step-1": "<mask token>\n", "step-2": "<mask token>\nif current_abpath[-12:] == 'library.zip/':\n current_abpath = current_abpath[:-12]\n<mask token>\n\n\ndef get_item_colors():\n \"\"\"\n >>> get_item_colors()\n \"\"\"\n result = []\n if not PICKITEMSP:\n return result\n if RAREP:\n for a in ITEMS:\n result += ITEMS[a]\n return result\n else:\n result = ITEMS['legendary']\n return result\n", "step-3": "<mask token>\nPICKITEMSP = True\nRAREP = True\nREPAIRP = False\nITEMS = {'legendary': ['#02CE01', '#BF642F'], 'rare': ['#BBBB00']}\ncurrent_abpath = abspath(dirname(__file__)) + '/'\nif current_abpath[-12:] == 'library.zip/':\n current_abpath = current_abpath[:-12]\nimgs_dir = current_abpath + 'imgs\\\\'\n\n\ndef get_item_colors():\n \"\"\"\n >>> get_item_colors()\n \"\"\"\n result = []\n if not PICKITEMSP:\n return result\n if RAREP:\n for a in ITEMS:\n result += ITEMS[a]\n return result\n else:\n result = ITEMS['legendary']\n return result\n", "step-4": "from os.path import dirname, abspath\nPICKITEMSP = True\nRAREP = True\nREPAIRP = False\nITEMS = {'legendary': ['#02CE01', '#BF642F'], 'rare': ['#BBBB00']}\ncurrent_abpath = abspath(dirname(__file__)) + '/'\nif current_abpath[-12:] == 'library.zip/':\n current_abpath = current_abpath[:-12]\nimgs_dir = current_abpath + 'imgs\\\\'\n\n\ndef get_item_colors():\n \"\"\"\n >>> get_item_colors()\n \"\"\"\n result = []\n if not PICKITEMSP:\n return result\n if RAREP:\n for a in ITEMS:\n result += ITEMS[a]\n return result\n else:\n result = ITEMS['legendary']\n return result\n", "step-5": "#!/usr/bin/python\r\n# coding: utf-8\r\n\r\nfrom os.path import dirname, abspath\r\n\r\nPICKITEMSP = True\r\nRAREP\t = True\r\nREPAIRP = False\r\n\r\nITEMS = {\r\n \"legendary\": [\"#02CE01\", # set\r\n \"#BF642F\"], # legndary\r\n \"rare\":\t [\"#BBBB00\"]\r\n }\r\n\r\ncurrent_abpath = abspath(dirname(__file__)) + \"/\"\r\n# With py2exe the dirname is INSTPATH/server/library.zip. So\r\n# current_abpath will be INSTPATH/server/library.zip/\r\nif current_abpath[-12:] == \"library.zip/\":\r\n current_abpath = current_abpath[:-12]\r\n\r\nimgs_dir = current_abpath + \"imgs\\\\\"\r\n\r\n\r\ndef get_item_colors():\r\n '''\r\n >>> get_item_colors()\r\n '''\r\n result = []\r\n if not PICKITEMSP: return result\r\n \r\n if RAREP:\r\n for a in ITEMS:\r\n result += ITEMS[a]\r\n return result\r\n else:\r\n result = ITEMS[\"legendary\"]\r\n return result\r\n \r\n", "step-ids": [ 0, 2, 3, 4, 5 ] }
[ 0, 2, 3, 4, 5 ]
import collections import inspect import struct from pygments.token import * import decompil.builder import decompil.disassemblers import decompil.ir class Context(decompil.ir.Context): def __init__(self): super(Context, self).__init__(16) self.pointer_type = self.create_pointer_type(self.half_type) self.init_registers() def init_registers(self): self.registers = regs = [ # 0x00-0x03 Register(self, 'ar0', 16), Register(self, 'ar1', 16), Register(self, 'ar2', 16), Register(self, 'ar3', 16), # 0x04-0x07 Register(self, 'ix0', 16), Register(self, 'ix1', 16), Register(self, 'ix2', 16), Register(self, 'ix3', 16), # 0x08-0xb Register(self, 'r08', 16), Register(self, 'r09', 16), Register(self, 'r0a', 16), Register(self, 'r0b', 16), # 0x0c-0x0f # TODO: something special? Register(self, 'st0', 16), Register(self, 'st1', 16), Register(self, 'st2', 16), Register(self, 'st3', 16), # 0x10-0x11 # TODO: handle 8-bit overflow Register(self, 'ac0.h', 16), Register(self, 'ac1.h', 16), # 0x12-0x13 Register(self, 'config', 16), Register(self, 'sr', 16), # 0x14-0x17 Register(self, 'prod.l', 16), Register(self, 'prod.m1', 16), # TODO: handle 8-bit overflow Register(self, 'prod.h', 16), Register(self, 'prod.m2', 16), # 0x18-0x1b Register(self, 'ax0.l', 16), Register(self, 'ax1.l', 16), Register(self, 'ax0.h', 16), Register(self, 'ax1.h', 16), # 0x1c-0x1f Register(self, 'ac0.l', 16), Register(self, 'ac1.l', 16), Register(self, 'ac0.m', 16), Register(self, 'ac1.m', 16), ] self.wr_registers = [ Register(self, 'wr{}'.format(i), 16) for i in range(4) ] self.addr_to_wr = { self.registers[0x00]: self.wr_registers[0x00], self.registers[0x01]: self.wr_registers[0x01], self.registers[0x02]: self.wr_registers[0x02], self.registers[0x03]: self.wr_registers[0x03], } self.addr_to_ix = { self.registers[0x00]: self.registers[0x04], self.registers[0x01]: self.registers[0x05], self.registers[0x02]: self.registers[0x06], self.registers[0x03]: self.registers[0x07], } self.long_accumulators = [ Register(self, 'ac0', 40, [ (regs[0x10], 32), (regs[0x1e], 16), (regs[0x1c], 0) ]), Register(self, 'ac1', 40, [ (regs[0x11], 32), (regs[0x1f], 16), (regs[0x1d], 0) ]), ] self.short_accumulators = [ Register(self, 'acs0', 24, [(regs[0x10], 16), (regs[0x1e], 0)]), Register(self, 'acs1', 24, [(regs[0x11], 16), (regs[0x1f], 0)]), ] self.extra_acculumators = [ Register(self, 'ax0', 32, [(regs[0x1a], 16), (regs[0x18], 0)]), Register(self, 'ax1', 32, [(regs[0x1b], 16), (regs[0x19], 0)]), ] self.prod_register = Register(self, 'prod', 40, [ (regs[0x17], 16), (regs[0x16], 32), (regs[0x15], 16), (regs[0x14], 0), ]) class Register(decompil.ir.Register): def __init__(self, context, name, width, components=None): self.context = context self.type = context.create_int_type(width) self.name = name self.components = components self.registers = ( [reg for reg, _ in components] if components else None ) def build_load(self, builder): if self.components is None: return builder.build_rload(self) else: result = None for reg, shift in self.components: val = builder.build_zext( self.type, builder.build_rload(reg) ) if shift: val = builder.build_lshl(val, self.type.create(shift)) if result: result = builder.build_add(result, val) else: result = val return result def build_store(self, builder, value): assert value.type == self.type if self.components is None: builder.build_rstore(self, value) else: for reg, shift in self.components: if shift: val = builder.build_lshl(value, value.type.create(shift)) val = builder.build_trunc(reg.type, val) builder.build_rstore(reg, val) def build_load_comp(self, builder): return [ builder.build_rload(reg) for reg, _ in self.components ] def build_store_comp(self, builder, *values): assert len(values) == len(self.components) for value, (reg, _) in zip(values, self.components): builder.build_rstore(reg, value) def format(self): return [(Name.Variable, '${}'.format(self.name))] class BaseDecoder: name = None opcode = None opcode_mask = None operands_format = None def decode(self, context, disassembler, builder): raise NotImplementedError() def decode_operands(self, context): return [op.extract(context, self) for op in self.operands_format] class Instruction(BaseDecoder): have_extra_operand = False is_extended = False def __init__(self, address, opcode, extra_operand=None, extension=None): self.address = address self.opcode_value = opcode self.extension = extension assert self.is_extended == (extension is not None) assert self.have_extra_operand == (extra_operand is not None) self.extra_operand = extra_operand if self.extension: self.extension.instruction = self def __repr__(self): ext = ( ' ({})'.format(self.extension.name) if self.extension else '' ) return '{:04x}: {}{}'.format( self.address, self.name, ext ) class InstructionExtension(BaseDecoder): def __init__(self, opcode): self.opcode_value = opcode # When accepting an extension, instructions should set the following # field: self.instruction = None def __repr__(self): return '{:04x}: {} (extension)'.format( self.address, self.name ) instructions = [] instruction_extensions = [] def _init_tables(): import gcdsp.decoders def helper(table, cls): for obj_name in dir(gcdsp.decoders): obj = getattr(gcdsp.decoders, obj_name) if not ( inspect.isclass(obj) and issubclass(obj, cls) and obj != cls ): continue assert (obj.opcode & ~obj.opcode_mask) == 0 table.append(obj) helper(instructions, Instruction) helper(instruction_extensions, InstructionExtension) _init_tables() def load_insns(): import gcdsp.decoders def default_decoder(self, context, disassembler, builder): builder.build_undef() disassembler.stop_basic_block() def decode_operands(self, context): result = [] for _, size, addend, rshift, mask in self.operands_format: operand = (self.opcode & mask) >> rshift result.append(self.opcode & mask + addend) return result Insn = collections.namedtuple( 'Insn', 'name opcode mask size unused0 operands is_extended unused1' ) for insn in gcdsp.decoders.opcodes: insn = Insn(*insn) insn_decoder = getattr( gcdsp.decoders, 'decode_{}'.format(insn.name.lower()), default_decoder, ) instructions.append( type(insn.name, (Instruction, ), { 'name': insn.name, 'opcode': insn.opcode, 'opcode_mask': insn.mask, 'have_extra_operand': insn.size == 2, 'is_extended': insn.is_extended, 'decode': insn_decoder, 'decode_operands': decode_operands, 'operands_format': insn.operands }) ) for ext in gcdsp.decoders.opcodes_ext: ext = Insn(*ext) instruction_extensions.append( type(ext.name, (InstructionExtension, ), { 'name': ext.name, 'opcode': ext.opcode, 'opcode_mask': ext.mask, 'decode': insn_decoder, 'decode_operands': decode_operands, 'operands_format': insn.operands }) ) load_insns() class Decoder(decompil.disassemblers.BaseDecoder): def __init__(self, fp): self.fp = fp def parse_insn(self, disassembler, builder, address): opcode = self.get_word(address) next_address = address + 1 if opcode is None: return None insn_pat = self.lookup(opcode, instructions) # Parse the extra operand, if any. if insn_pat.have_extra_operand: extra_operand = self.get_word(address + 1) next_address += 1 if extra_operand is None: raise ValueError('Incomplete file') else: extra_operand = None # Parse the instruction extension, if any. if insn_pat.is_extended: ext_pat = self.lookup(opcode, instruction_extensions) ext = ext_pat(opcode) else: ext = None insn = insn_pat(address, opcode, extra_operand, ext) insn_image = '{}{}'.format( insn.name, "'{}".format(insn.extension.name) if insn.is_extended else '' ) builder.set_origin('At {:#04x}: {}'.format(address, insn_image)) # Always decode the extension first (if any). if insn.is_extended: insn.extension.decode(disassembler.context, disassembler, builder) # TODO: remove this once all extensions are supported. if disassembler.must_stop_basic_block: return next_address insn.decode(disassembler.context, disassembler, builder) return next_address def iter_insns(self, address): while True: address, insn = self.parse_insn(address) if insn is None: break else: yield address, insn def get_word(self, address): self.fp.seek(2 * address) word = self.fp.read(2) if len(word) == 0: return None elif len(word) == 2: return struct.unpack('>H', word)[0] else: raise ValueError('Incomplete file') def lookup(self, opcode, pattern_set): for pat in pattern_set: if opcode & pat.opcode_mask == pat.opcode: return pat else: raise ValueError('Invalid opcode: {:04x}'.format(opcode))
normal
{ "blob_id": "865d7c606b287dbce158f721c6cf768cd078eb48", "index": 9231, "step-1": "<mask token>\n\n\nclass Register(decompil.ir.Register):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass BaseDecoder:\n name = None\n opcode = None\n opcode_mask = None\n operands_format = None\n\n def decode(self, context, disassembler, builder):\n raise NotImplementedError()\n\n def decode_operands(self, context):\n return [op.extract(context, self) for op in self.operands_format]\n\n\nclass Instruction(BaseDecoder):\n have_extra_operand = False\n is_extended = False\n\n def __init__(self, address, opcode, extra_operand=None, extension=None):\n self.address = address\n self.opcode_value = opcode\n self.extension = extension\n assert self.is_extended == (extension is not None)\n assert self.have_extra_operand == (extra_operand is not None)\n self.extra_operand = extra_operand\n if self.extension:\n self.extension.instruction = self\n\n def __repr__(self):\n ext = ' ({})'.format(self.extension.name) if self.extension else ''\n return '{:04x}: {}{}'.format(self.address, self.name, ext)\n\n\nclass InstructionExtension(BaseDecoder):\n\n def __init__(self, opcode):\n self.opcode_value = opcode\n self.instruction = None\n\n def __repr__(self):\n return '{:04x}: {} (extension)'.format(self.address, self.name)\n\n\n<mask token>\n\n\nclass Decoder(decompil.disassemblers.BaseDecoder):\n\n def __init__(self, fp):\n self.fp = fp\n\n def parse_insn(self, disassembler, builder, address):\n opcode = self.get_word(address)\n next_address = address + 1\n if opcode is None:\n return None\n insn_pat = self.lookup(opcode, instructions)\n if insn_pat.have_extra_operand:\n extra_operand = self.get_word(address + 1)\n next_address += 1\n if extra_operand is None:\n raise ValueError('Incomplete file')\n else:\n extra_operand = None\n if insn_pat.is_extended:\n ext_pat = self.lookup(opcode, instruction_extensions)\n ext = ext_pat(opcode)\n else:\n ext = None\n insn = insn_pat(address, opcode, extra_operand, ext)\n insn_image = '{}{}'.format(insn.name, \"'{}\".format(insn.extension.\n name) if insn.is_extended else '')\n builder.set_origin('At {:#04x}: {}'.format(address, insn_image))\n if insn.is_extended:\n insn.extension.decode(disassembler.context, disassembler, builder)\n if disassembler.must_stop_basic_block:\n return next_address\n insn.decode(disassembler.context, disassembler, builder)\n return next_address\n\n def iter_insns(self, address):\n while True:\n address, insn = self.parse_insn(address)\n if insn is None:\n break\n else:\n yield address, insn\n\n def get_word(self, address):\n self.fp.seek(2 * address)\n word = self.fp.read(2)\n if len(word) == 0:\n return None\n elif len(word) == 2:\n return struct.unpack('>H', word)[0]\n else:\n raise ValueError('Incomplete file')\n\n def lookup(self, opcode, pattern_set):\n for pat in pattern_set:\n if opcode & pat.opcode_mask == pat.opcode:\n return pat\n else:\n raise ValueError('Invalid opcode: {:04x}'.format(opcode))\n", "step-2": "<mask token>\n\n\nclass Register(decompil.ir.Register):\n\n def __init__(self, context, name, width, components=None):\n self.context = context\n self.type = context.create_int_type(width)\n self.name = name\n self.components = components\n self.registers = [reg for reg, _ in components] if components else None\n\n def build_load(self, builder):\n if self.components is None:\n return builder.build_rload(self)\n else:\n result = None\n for reg, shift in self.components:\n val = builder.build_zext(self.type, builder.build_rload(reg))\n if shift:\n val = builder.build_lshl(val, self.type.create(shift))\n if result:\n result = builder.build_add(result, val)\n else:\n result = val\n return result\n\n def build_store(self, builder, value):\n assert value.type == self.type\n if self.components is None:\n builder.build_rstore(self, value)\n else:\n for reg, shift in self.components:\n if shift:\n val = builder.build_lshl(value, value.type.create(shift))\n val = builder.build_trunc(reg.type, val)\n builder.build_rstore(reg, val)\n\n def build_load_comp(self, builder):\n return [builder.build_rload(reg) for reg, _ in self.components]\n\n def build_store_comp(self, builder, *values):\n assert len(values) == len(self.components)\n for value, (reg, _) in zip(values, self.components):\n builder.build_rstore(reg, value)\n\n def format(self):\n return [(Name.Variable, '${}'.format(self.name))]\n\n\nclass BaseDecoder:\n name = None\n opcode = None\n opcode_mask = None\n operands_format = None\n\n def decode(self, context, disassembler, builder):\n raise NotImplementedError()\n\n def decode_operands(self, context):\n return [op.extract(context, self) for op in self.operands_format]\n\n\nclass Instruction(BaseDecoder):\n have_extra_operand = False\n is_extended = False\n\n def __init__(self, address, opcode, extra_operand=None, extension=None):\n self.address = address\n self.opcode_value = opcode\n self.extension = extension\n assert self.is_extended == (extension is not None)\n assert self.have_extra_operand == (extra_operand is not None)\n self.extra_operand = extra_operand\n if self.extension:\n self.extension.instruction = self\n\n def __repr__(self):\n ext = ' ({})'.format(self.extension.name) if self.extension else ''\n return '{:04x}: {}{}'.format(self.address, self.name, ext)\n\n\nclass InstructionExtension(BaseDecoder):\n\n def __init__(self, opcode):\n self.opcode_value = opcode\n self.instruction = None\n\n def __repr__(self):\n return '{:04x}: {} (extension)'.format(self.address, self.name)\n\n\n<mask token>\n\n\nclass Decoder(decompil.disassemblers.BaseDecoder):\n\n def __init__(self, fp):\n self.fp = fp\n\n def parse_insn(self, disassembler, builder, address):\n opcode = self.get_word(address)\n next_address = address + 1\n if opcode is None:\n return None\n insn_pat = self.lookup(opcode, instructions)\n if insn_pat.have_extra_operand:\n extra_operand = self.get_word(address + 1)\n next_address += 1\n if extra_operand is None:\n raise ValueError('Incomplete file')\n else:\n extra_operand = None\n if insn_pat.is_extended:\n ext_pat = self.lookup(opcode, instruction_extensions)\n ext = ext_pat(opcode)\n else:\n ext = None\n insn = insn_pat(address, opcode, extra_operand, ext)\n insn_image = '{}{}'.format(insn.name, \"'{}\".format(insn.extension.\n name) if insn.is_extended else '')\n builder.set_origin('At {:#04x}: {}'.format(address, insn_image))\n if insn.is_extended:\n insn.extension.decode(disassembler.context, disassembler, builder)\n if disassembler.must_stop_basic_block:\n return next_address\n insn.decode(disassembler.context, disassembler, builder)\n return next_address\n\n def iter_insns(self, address):\n while True:\n address, insn = self.parse_insn(address)\n if insn is None:\n break\n else:\n yield address, insn\n\n def get_word(self, address):\n self.fp.seek(2 * address)\n word = self.fp.read(2)\n if len(word) == 0:\n return None\n elif len(word) == 2:\n return struct.unpack('>H', word)[0]\n else:\n raise ValueError('Incomplete file')\n\n def lookup(self, opcode, pattern_set):\n for pat in pattern_set:\n if opcode & pat.opcode_mask == pat.opcode:\n return pat\n else:\n raise ValueError('Invalid opcode: {:04x}'.format(opcode))\n", "step-3": "<mask token>\n\n\nclass Context(decompil.ir.Context):\n <mask token>\n <mask token>\n\n\nclass Register(decompil.ir.Register):\n\n def __init__(self, context, name, width, components=None):\n self.context = context\n self.type = context.create_int_type(width)\n self.name = name\n self.components = components\n self.registers = [reg for reg, _ in components] if components else None\n\n def build_load(self, builder):\n if self.components is None:\n return builder.build_rload(self)\n else:\n result = None\n for reg, shift in self.components:\n val = builder.build_zext(self.type, builder.build_rload(reg))\n if shift:\n val = builder.build_lshl(val, self.type.create(shift))\n if result:\n result = builder.build_add(result, val)\n else:\n result = val\n return result\n\n def build_store(self, builder, value):\n assert value.type == self.type\n if self.components is None:\n builder.build_rstore(self, value)\n else:\n for reg, shift in self.components:\n if shift:\n val = builder.build_lshl(value, value.type.create(shift))\n val = builder.build_trunc(reg.type, val)\n builder.build_rstore(reg, val)\n\n def build_load_comp(self, builder):\n return [builder.build_rload(reg) for reg, _ in self.components]\n\n def build_store_comp(self, builder, *values):\n assert len(values) == len(self.components)\n for value, (reg, _) in zip(values, self.components):\n builder.build_rstore(reg, value)\n\n def format(self):\n return [(Name.Variable, '${}'.format(self.name))]\n\n\nclass BaseDecoder:\n name = None\n opcode = None\n opcode_mask = None\n operands_format = None\n\n def decode(self, context, disassembler, builder):\n raise NotImplementedError()\n\n def decode_operands(self, context):\n return [op.extract(context, self) for op in self.operands_format]\n\n\nclass Instruction(BaseDecoder):\n have_extra_operand = False\n is_extended = False\n\n def __init__(self, address, opcode, extra_operand=None, extension=None):\n self.address = address\n self.opcode_value = opcode\n self.extension = extension\n assert self.is_extended == (extension is not None)\n assert self.have_extra_operand == (extra_operand is not None)\n self.extra_operand = extra_operand\n if self.extension:\n self.extension.instruction = self\n\n def __repr__(self):\n ext = ' ({})'.format(self.extension.name) if self.extension else ''\n return '{:04x}: {}{}'.format(self.address, self.name, ext)\n\n\nclass InstructionExtension(BaseDecoder):\n\n def __init__(self, opcode):\n self.opcode_value = opcode\n self.instruction = None\n\n def __repr__(self):\n return '{:04x}: {} (extension)'.format(self.address, self.name)\n\n\n<mask token>\n\n\nclass Decoder(decompil.disassemblers.BaseDecoder):\n\n def __init__(self, fp):\n self.fp = fp\n\n def parse_insn(self, disassembler, builder, address):\n opcode = self.get_word(address)\n next_address = address + 1\n if opcode is None:\n return None\n insn_pat = self.lookup(opcode, instructions)\n if insn_pat.have_extra_operand:\n extra_operand = self.get_word(address + 1)\n next_address += 1\n if extra_operand is None:\n raise ValueError('Incomplete file')\n else:\n extra_operand = None\n if insn_pat.is_extended:\n ext_pat = self.lookup(opcode, instruction_extensions)\n ext = ext_pat(opcode)\n else:\n ext = None\n insn = insn_pat(address, opcode, extra_operand, ext)\n insn_image = '{}{}'.format(insn.name, \"'{}\".format(insn.extension.\n name) if insn.is_extended else '')\n builder.set_origin('At {:#04x}: {}'.format(address, insn_image))\n if insn.is_extended:\n insn.extension.decode(disassembler.context, disassembler, builder)\n if disassembler.must_stop_basic_block:\n return next_address\n insn.decode(disassembler.context, disassembler, builder)\n return next_address\n\n def iter_insns(self, address):\n while True:\n address, insn = self.parse_insn(address)\n if insn is None:\n break\n else:\n yield address, insn\n\n def get_word(self, address):\n self.fp.seek(2 * address)\n word = self.fp.read(2)\n if len(word) == 0:\n return None\n elif len(word) == 2:\n return struct.unpack('>H', word)[0]\n else:\n raise ValueError('Incomplete file')\n\n def lookup(self, opcode, pattern_set):\n for pat in pattern_set:\n if opcode & pat.opcode_mask == pat.opcode:\n return pat\n else:\n raise ValueError('Invalid opcode: {:04x}'.format(opcode))\n", "step-4": "<mask token>\n\n\nclass Context(decompil.ir.Context):\n\n def __init__(self):\n super(Context, self).__init__(16)\n self.pointer_type = self.create_pointer_type(self.half_type)\n self.init_registers()\n\n def init_registers(self):\n self.registers = regs = [Register(self, 'ar0', 16), Register(self,\n 'ar1', 16), Register(self, 'ar2', 16), Register(self, 'ar3', 16\n ), Register(self, 'ix0', 16), Register(self, 'ix1', 16),\n Register(self, 'ix2', 16), Register(self, 'ix3', 16), Register(\n self, 'r08', 16), Register(self, 'r09', 16), Register(self,\n 'r0a', 16), Register(self, 'r0b', 16), Register(self, 'st0', 16\n ), Register(self, 'st1', 16), Register(self, 'st2', 16),\n Register(self, 'st3', 16), Register(self, 'ac0.h', 16),\n Register(self, 'ac1.h', 16), Register(self, 'config', 16),\n Register(self, 'sr', 16), Register(self, 'prod.l', 16),\n Register(self, 'prod.m1', 16), Register(self, 'prod.h', 16),\n Register(self, 'prod.m2', 16), Register(self, 'ax0.l', 16),\n Register(self, 'ax1.l', 16), Register(self, 'ax0.h', 16),\n Register(self, 'ax1.h', 16), Register(self, 'ac0.l', 16),\n Register(self, 'ac1.l', 16), Register(self, 'ac0.m', 16),\n Register(self, 'ac1.m', 16)]\n self.wr_registers = [Register(self, 'wr{}'.format(i), 16) for i in\n range(4)]\n self.addr_to_wr = {self.registers[0]: self.wr_registers[0], self.\n registers[1]: self.wr_registers[1], self.registers[2]: self.\n wr_registers[2], self.registers[3]: self.wr_registers[3]}\n self.addr_to_ix = {self.registers[0]: self.registers[4], self.\n registers[1]: self.registers[5], self.registers[2]: self.\n registers[6], self.registers[3]: self.registers[7]}\n self.long_accumulators = [Register(self, 'ac0', 40, [(regs[16], 32),\n (regs[30], 16), (regs[28], 0)]), Register(self, 'ac1', 40, [(\n regs[17], 32), (regs[31], 16), (regs[29], 0)])]\n self.short_accumulators = [Register(self, 'acs0', 24, [(regs[16], \n 16), (regs[30], 0)]), Register(self, 'acs1', 24, [(regs[17], 16\n ), (regs[31], 0)])]\n self.extra_acculumators = [Register(self, 'ax0', 32, [(regs[26], 16\n ), (regs[24], 0)]), Register(self, 'ax1', 32, [(regs[27], 16),\n (regs[25], 0)])]\n self.prod_register = Register(self, 'prod', 40, [(regs[23], 16), (\n regs[22], 32), (regs[21], 16), (regs[20], 0)])\n\n\nclass Register(decompil.ir.Register):\n\n def __init__(self, context, name, width, components=None):\n self.context = context\n self.type = context.create_int_type(width)\n self.name = name\n self.components = components\n self.registers = [reg for reg, _ in components] if components else None\n\n def build_load(self, builder):\n if self.components is None:\n return builder.build_rload(self)\n else:\n result = None\n for reg, shift in self.components:\n val = builder.build_zext(self.type, builder.build_rload(reg))\n if shift:\n val = builder.build_lshl(val, self.type.create(shift))\n if result:\n result = builder.build_add(result, val)\n else:\n result = val\n return result\n\n def build_store(self, builder, value):\n assert value.type == self.type\n if self.components is None:\n builder.build_rstore(self, value)\n else:\n for reg, shift in self.components:\n if shift:\n val = builder.build_lshl(value, value.type.create(shift))\n val = builder.build_trunc(reg.type, val)\n builder.build_rstore(reg, val)\n\n def build_load_comp(self, builder):\n return [builder.build_rload(reg) for reg, _ in self.components]\n\n def build_store_comp(self, builder, *values):\n assert len(values) == len(self.components)\n for value, (reg, _) in zip(values, self.components):\n builder.build_rstore(reg, value)\n\n def format(self):\n return [(Name.Variable, '${}'.format(self.name))]\n\n\nclass BaseDecoder:\n name = None\n opcode = None\n opcode_mask = None\n operands_format = None\n\n def decode(self, context, disassembler, builder):\n raise NotImplementedError()\n\n def decode_operands(self, context):\n return [op.extract(context, self) for op in self.operands_format]\n\n\nclass Instruction(BaseDecoder):\n have_extra_operand = False\n is_extended = False\n\n def __init__(self, address, opcode, extra_operand=None, extension=None):\n self.address = address\n self.opcode_value = opcode\n self.extension = extension\n assert self.is_extended == (extension is not None)\n assert self.have_extra_operand == (extra_operand is not None)\n self.extra_operand = extra_operand\n if self.extension:\n self.extension.instruction = self\n\n def __repr__(self):\n ext = ' ({})'.format(self.extension.name) if self.extension else ''\n return '{:04x}: {}{}'.format(self.address, self.name, ext)\n\n\nclass InstructionExtension(BaseDecoder):\n\n def __init__(self, opcode):\n self.opcode_value = opcode\n self.instruction = None\n\n def __repr__(self):\n return '{:04x}: {} (extension)'.format(self.address, self.name)\n\n\n<mask token>\n\n\ndef _init_tables():\n import gcdsp.decoders\n\n def helper(table, cls):\n for obj_name in dir(gcdsp.decoders):\n obj = getattr(gcdsp.decoders, obj_name)\n if not (inspect.isclass(obj) and issubclass(obj, cls) and obj !=\n cls):\n continue\n assert obj.opcode & ~obj.opcode_mask == 0\n table.append(obj)\n helper(instructions, Instruction)\n helper(instruction_extensions, InstructionExtension)\n\n\n<mask token>\n\n\ndef load_insns():\n import gcdsp.decoders\n\n def default_decoder(self, context, disassembler, builder):\n builder.build_undef()\n disassembler.stop_basic_block()\n\n def decode_operands(self, context):\n result = []\n for _, size, addend, rshift, mask in self.operands_format:\n operand = (self.opcode & mask) >> rshift\n result.append(self.opcode & mask + addend)\n return result\n Insn = collections.namedtuple('Insn',\n 'name opcode mask size unused0 operands is_extended unused1')\n for insn in gcdsp.decoders.opcodes:\n insn = Insn(*insn)\n insn_decoder = getattr(gcdsp.decoders, 'decode_{}'.format(insn.name\n .lower()), default_decoder)\n instructions.append(type(insn.name, (Instruction,), {'name': insn.\n name, 'opcode': insn.opcode, 'opcode_mask': insn.mask,\n 'have_extra_operand': insn.size == 2, 'is_extended': insn.\n is_extended, 'decode': insn_decoder, 'decode_operands':\n decode_operands, 'operands_format': insn.operands}))\n for ext in gcdsp.decoders.opcodes_ext:\n ext = Insn(*ext)\n instruction_extensions.append(type(ext.name, (InstructionExtension,\n ), {'name': ext.name, 'opcode': ext.opcode, 'opcode_mask': ext.\n mask, 'decode': insn_decoder, 'decode_operands':\n decode_operands, 'operands_format': insn.operands}))\n\n\n<mask token>\n\n\nclass Decoder(decompil.disassemblers.BaseDecoder):\n\n def __init__(self, fp):\n self.fp = fp\n\n def parse_insn(self, disassembler, builder, address):\n opcode = self.get_word(address)\n next_address = address + 1\n if opcode is None:\n return None\n insn_pat = self.lookup(opcode, instructions)\n if insn_pat.have_extra_operand:\n extra_operand = self.get_word(address + 1)\n next_address += 1\n if extra_operand is None:\n raise ValueError('Incomplete file')\n else:\n extra_operand = None\n if insn_pat.is_extended:\n ext_pat = self.lookup(opcode, instruction_extensions)\n ext = ext_pat(opcode)\n else:\n ext = None\n insn = insn_pat(address, opcode, extra_operand, ext)\n insn_image = '{}{}'.format(insn.name, \"'{}\".format(insn.extension.\n name) if insn.is_extended else '')\n builder.set_origin('At {:#04x}: {}'.format(address, insn_image))\n if insn.is_extended:\n insn.extension.decode(disassembler.context, disassembler, builder)\n if disassembler.must_stop_basic_block:\n return next_address\n insn.decode(disassembler.context, disassembler, builder)\n return next_address\n\n def iter_insns(self, address):\n while True:\n address, insn = self.parse_insn(address)\n if insn is None:\n break\n else:\n yield address, insn\n\n def get_word(self, address):\n self.fp.seek(2 * address)\n word = self.fp.read(2)\n if len(word) == 0:\n return None\n elif len(word) == 2:\n return struct.unpack('>H', word)[0]\n else:\n raise ValueError('Incomplete file')\n\n def lookup(self, opcode, pattern_set):\n for pat in pattern_set:\n if opcode & pat.opcode_mask == pat.opcode:\n return pat\n else:\n raise ValueError('Invalid opcode: {:04x}'.format(opcode))\n", "step-5": "import collections\nimport inspect\nimport struct\n\nfrom pygments.token import *\n\nimport decompil.builder\nimport decompil.disassemblers\nimport decompil.ir\n\n\nclass Context(decompil.ir.Context):\n\n def __init__(self):\n super(Context, self).__init__(16)\n self.pointer_type = self.create_pointer_type(self.half_type)\n self.init_registers()\n\n def init_registers(self):\n self.registers = regs = [\n # 0x00-0x03\n Register(self, 'ar0', 16),\n Register(self, 'ar1', 16),\n Register(self, 'ar2', 16),\n Register(self, 'ar3', 16),\n\n # 0x04-0x07\n Register(self, 'ix0', 16),\n Register(self, 'ix1', 16),\n Register(self, 'ix2', 16),\n Register(self, 'ix3', 16),\n\n # 0x08-0xb\n Register(self, 'r08', 16),\n Register(self, 'r09', 16),\n Register(self, 'r0a', 16),\n Register(self, 'r0b', 16),\n\n # 0x0c-0x0f\n # TODO: something special?\n Register(self, 'st0', 16),\n Register(self, 'st1', 16),\n Register(self, 'st2', 16),\n Register(self, 'st3', 16),\n\n # 0x10-0x11\n # TODO: handle 8-bit overflow\n Register(self, 'ac0.h', 16),\n Register(self, 'ac1.h', 16),\n\n # 0x12-0x13\n Register(self, 'config', 16),\n Register(self, 'sr', 16),\n\n # 0x14-0x17\n Register(self, 'prod.l', 16),\n Register(self, 'prod.m1', 16),\n # TODO: handle 8-bit overflow\n Register(self, 'prod.h', 16),\n Register(self, 'prod.m2', 16),\n\n # 0x18-0x1b\n Register(self, 'ax0.l', 16),\n Register(self, 'ax1.l', 16),\n Register(self, 'ax0.h', 16),\n Register(self, 'ax1.h', 16),\n\n # 0x1c-0x1f\n Register(self, 'ac0.l', 16),\n Register(self, 'ac1.l', 16),\n Register(self, 'ac0.m', 16),\n Register(self, 'ac1.m', 16),\n ]\n\n self.wr_registers = [\n Register(self, 'wr{}'.format(i), 16) for i in range(4)\n ]\n\n self.addr_to_wr = {\n self.registers[0x00]: self.wr_registers[0x00],\n self.registers[0x01]: self.wr_registers[0x01],\n self.registers[0x02]: self.wr_registers[0x02],\n self.registers[0x03]: self.wr_registers[0x03],\n }\n self.addr_to_ix = {\n self.registers[0x00]: self.registers[0x04],\n self.registers[0x01]: self.registers[0x05],\n self.registers[0x02]: self.registers[0x06],\n self.registers[0x03]: self.registers[0x07],\n }\n\n self.long_accumulators = [\n Register(self, 'ac0', 40, [\n (regs[0x10], 32), (regs[0x1e], 16), (regs[0x1c], 0)\n ]),\n Register(self, 'ac1', 40, [\n (regs[0x11], 32), (regs[0x1f], 16), (regs[0x1d], 0)\n ]),\n ]\n self.short_accumulators = [\n Register(self, 'acs0', 24, [(regs[0x10], 16), (regs[0x1e], 0)]),\n Register(self, 'acs1', 24, [(regs[0x11], 16), (regs[0x1f], 0)]),\n ]\n self.extra_acculumators = [\n Register(self, 'ax0', 32, [(regs[0x1a], 16), (regs[0x18], 0)]),\n Register(self, 'ax1', 32, [(regs[0x1b], 16), (regs[0x19], 0)]),\n ]\n self.prod_register = Register(self, 'prod', 40, [\n (regs[0x17], 16),\n (regs[0x16], 32),\n (regs[0x15], 16),\n (regs[0x14], 0),\n ])\n\n\nclass Register(decompil.ir.Register):\n def __init__(self, context, name, width, components=None):\n self.context = context\n self.type = context.create_int_type(width)\n self.name = name\n self.components = components\n self.registers = (\n [reg for reg, _ in components]\n if components else\n None\n )\n\n def build_load(self, builder):\n if self.components is None:\n return builder.build_rload(self)\n else:\n result = None\n for reg, shift in self.components:\n val = builder.build_zext(\n self.type, builder.build_rload(reg)\n )\n if shift:\n val = builder.build_lshl(val, self.type.create(shift))\n\n if result:\n result = builder.build_add(result, val)\n else:\n result = val\n return result\n\n def build_store(self, builder, value):\n assert value.type == self.type\n if self.components is None:\n builder.build_rstore(self, value)\n else:\n for reg, shift in self.components:\n if shift:\n val = builder.build_lshl(value, value.type.create(shift))\n val = builder.build_trunc(reg.type, val)\n builder.build_rstore(reg, val)\n\n def build_load_comp(self, builder):\n return [\n builder.build_rload(reg)\n for reg, _ in self.components\n ]\n\n def build_store_comp(self, builder, *values):\n assert len(values) == len(self.components)\n for value, (reg, _) in zip(values, self.components):\n builder.build_rstore(reg, value)\n\n def format(self):\n return [(Name.Variable, '${}'.format(self.name))]\n\n\nclass BaseDecoder:\n name = None\n opcode = None\n opcode_mask = None\n operands_format = None\n\n def decode(self, context, disassembler, builder):\n raise NotImplementedError()\n\n def decode_operands(self, context):\n return [op.extract(context, self) for op in self.operands_format]\n\nclass Instruction(BaseDecoder):\n have_extra_operand = False\n is_extended = False\n\n def __init__(self, address, opcode, extra_operand=None, extension=None):\n self.address = address\n self.opcode_value = opcode\n self.extension = extension\n assert self.is_extended == (extension is not None)\n assert self.have_extra_operand == (extra_operand is not None)\n self.extra_operand = extra_operand\n if self.extension:\n self.extension.instruction = self\n\n def __repr__(self):\n ext = (\n ' ({})'.format(self.extension.name)\n if self.extension else\n ''\n )\n return '{:04x}: {}{}'.format(\n self.address, self.name, ext\n )\n\n\nclass InstructionExtension(BaseDecoder):\n def __init__(self, opcode):\n self.opcode_value = opcode\n # When accepting an extension, instructions should set the following\n # field:\n self.instruction = None\n\n def __repr__(self):\n return '{:04x}: {} (extension)'.format(\n self.address, self.name\n )\n\n\ninstructions = []\ninstruction_extensions = []\ndef _init_tables():\n import gcdsp.decoders\n\n def helper(table, cls):\n for obj_name in dir(gcdsp.decoders):\n obj = getattr(gcdsp.decoders, obj_name)\n if not (\n inspect.isclass(obj)\n and issubclass(obj, cls)\n and obj != cls\n ):\n continue\n assert (obj.opcode & ~obj.opcode_mask) == 0\n table.append(obj)\n\n helper(instructions, Instruction)\n helper(instruction_extensions, InstructionExtension)\n_init_tables()\n\n\ndef load_insns():\n import gcdsp.decoders\n\n def default_decoder(self, context, disassembler, builder):\n builder.build_undef()\n disassembler.stop_basic_block()\n\n def decode_operands(self, context):\n result = []\n for _, size, addend, rshift, mask in self.operands_format:\n operand = (self.opcode & mask) >> rshift\n result.append(self.opcode & mask + addend)\n return result\n\n Insn = collections.namedtuple(\n 'Insn', 'name opcode mask size unused0 operands is_extended unused1'\n )\n\n for insn in gcdsp.decoders.opcodes:\n insn = Insn(*insn)\n insn_decoder = getattr(\n gcdsp.decoders,\n 'decode_{}'.format(insn.name.lower()),\n default_decoder,\n )\n instructions.append(\n type(insn.name, (Instruction, ), {\n 'name': insn.name,\n 'opcode': insn.opcode,\n 'opcode_mask': insn.mask,\n 'have_extra_operand': insn.size == 2,\n 'is_extended': insn.is_extended,\n 'decode': insn_decoder,\n 'decode_operands': decode_operands,\n 'operands_format': insn.operands\n })\n )\n\n for ext in gcdsp.decoders.opcodes_ext:\n ext = Insn(*ext)\n instruction_extensions.append(\n type(ext.name, (InstructionExtension, ), {\n 'name': ext.name,\n 'opcode': ext.opcode,\n 'opcode_mask': ext.mask,\n 'decode': insn_decoder,\n 'decode_operands': decode_operands,\n 'operands_format': insn.operands\n })\n )\nload_insns()\n\n\nclass Decoder(decompil.disassemblers.BaseDecoder):\n\n def __init__(self, fp):\n self.fp = fp\n\n def parse_insn(self, disassembler, builder, address):\n\n opcode = self.get_word(address)\n next_address = address + 1\n if opcode is None:\n return None\n insn_pat = self.lookup(opcode, instructions)\n\n # Parse the extra operand, if any.\n if insn_pat.have_extra_operand:\n extra_operand = self.get_word(address + 1)\n next_address += 1\n if extra_operand is None:\n raise ValueError('Incomplete file')\n else:\n extra_operand = None\n\n # Parse the instruction extension, if any.\n if insn_pat.is_extended:\n ext_pat = self.lookup(opcode, instruction_extensions)\n ext = ext_pat(opcode)\n else:\n ext = None\n\n insn = insn_pat(address, opcode, extra_operand, ext)\n insn_image = '{}{}'.format(\n insn.name,\n \"'{}\".format(insn.extension.name) if insn.is_extended else ''\n )\n builder.set_origin('At {:#04x}: {}'.format(address, insn_image))\n\n # Always decode the extension first (if any).\n if insn.is_extended:\n insn.extension.decode(disassembler.context, disassembler, builder)\n # TODO: remove this once all extensions are supported.\n if disassembler.must_stop_basic_block:\n return next_address\n insn.decode(disassembler.context, disassembler, builder)\n\n return next_address\n\n def iter_insns(self, address):\n while True:\n address, insn = self.parse_insn(address)\n if insn is None:\n break\n else:\n yield address, insn\n\n def get_word(self, address):\n self.fp.seek(2 * address)\n word = self.fp.read(2)\n if len(word) == 0:\n return None\n elif len(word) == 2:\n return struct.unpack('>H', word)[0]\n else:\n raise ValueError('Incomplete file')\n\n def lookup(self, opcode, pattern_set):\n for pat in pattern_set:\n if opcode & pat.opcode_mask == pat.opcode:\n return pat\n else:\n raise ValueError('Invalid opcode: {:04x}'.format(opcode))\n", "step-ids": [ 18, 24, 25, 29, 33 ] }
[ 18, 24, 25, 29, 33 ]
# -*- coding: utf-8 -*- from __future__ import print_function """phy main CLI tool. Usage: phy --help """ #------------------------------------------------------------------------------ # Imports #------------------------------------------------------------------------------ import sys import os.path as op import argparse from textwrap import dedent import numpy as np from six import exec_, string_types #------------------------------------------------------------------------------ # Parser utilities #------------------------------------------------------------------------------ class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter): pass class Parser(argparse.ArgumentParser): def error(self, message): sys.stderr.write(message + '\n\n') self.print_help() sys.exit(2) _examples = dedent(""" examples: phy -v display the version of phy phy download hybrid_120sec.dat -o data/ download a sample raw data file in `data/` phy describe my_file.kwik display information about a Kwik dataset phy spikesort my_params.prm run the whole suite (spike detection and clustering) phy detect my_params.prm run spike detection on a parameters file phy cluster-auto my_file.kwik run klustakwik on a dataset (after spike detection) phy cluster-manual my_file.kwik run the manual clustering GUI """) #------------------------------------------------------------------------------ # Parser creator #------------------------------------------------------------------------------ class ParserCreator(object): def __init__(self): self.create_main() self.create_download() self.create_traces() self.create_describe() self.create_spikesort() self.create_detect() self.create_auto() self.create_manual() self.create_notebook() @property def parser(self): return self._parser def _add_sub_parser(self, name, desc): p = self._subparsers.add_parser(name, help=desc, description=desc) self._add_options(p) return p def _add_options(self, parser): parser.add_argument('--debug', '-d', action='store_true', help='activate debug logging mode') parser.add_argument('--hide-traceback', action='store_true', help='hide the traceback for cleaner error ' 'messages') parser.add_argument('--profiler', '-p', action='store_true', help='activate the profiler') parser.add_argument('--line-profiler', '-lp', dest='line_profiler', action='store_true', help='activate the line-profiler -- you ' 'need to decorate the functions ' 'to profile with `@profile` ' 'in the code') parser.add_argument('--ipython', '-i', action='store_true', help='launch the script in an interactive ' 'IPython console') parser.add_argument('--pdb', action='store_true', help='activate the Python debugger') def create_main(self): import phy desc = sys.modules['phy'].__doc__ self._parser = Parser(description=desc, epilog=_examples, formatter_class=CustomFormatter, ) self._parser.set_defaults(func=None) self._parser.add_argument('--version', '-v', action='version', version=phy.__version_git__, help='print the version of phy') self._add_options(self._parser) self._subparsers = self._parser.add_subparsers(dest='command', title='subcommand', ) def create_download(self): desc = 'download a sample dataset' p = self._add_sub_parser('download', desc) p.add_argument('file', help='dataset filename') p.add_argument('--output-dir', '-o', help='output directory') p.add_argument('--base', default='cortexlab', choices=('cortexlab', 'github'), help='data repository name: `cortexlab` or `github`', ) p.set_defaults(func=download) def create_describe(self): desc = 'describe a `.kwik` file' p = self._add_sub_parser('describe', desc) p.add_argument('file', help='path to a `.kwik` file') p.add_argument('--clustering', default='main', help='name of the clustering to use') p.set_defaults(func=describe) def create_traces(self): desc = 'show the traces of a raw data file' p = self._add_sub_parser('traces', desc) p.add_argument('file', help='path to a `.kwd` or `.dat` file') p.add_argument('--interval', help='detection interval in seconds (e.g. `0,10`)') p.add_argument('--n-channels', '-n', help='number of channels in the recording ' '(only required when using a flat binary file)') p.add_argument('--dtype', help='NumPy data type ' '(only required when using a flat binary file)', default='int16', ) p.add_argument('--sample-rate', '-s', help='sample rate in Hz ' '(only required when using a flat binary file)') p.set_defaults(func=traces) def create_spikesort(self): desc = 'launch the whole spike sorting pipeline on a `.prm` file' p = self._add_sub_parser('spikesort', desc) p.add_argument('file', help='path to a `.prm` file') p.add_argument('--kwik-path', help='filename of the `.kwik` file ' 'to create (by default, `"experiment_name".kwik`)') p.add_argument('--overwrite', action='store_true', default=False, help='overwrite the `.kwik` file ') p.add_argument('--interval', help='detection interval in seconds (e.g. `0,10`)') p.set_defaults(func=spikesort) def create_detect(self): desc = 'launch the spike detection algorithm on a `.prm` file' p = self._add_sub_parser('detect', desc) p.add_argument('file', help='path to a `.prm` file') p.add_argument('--kwik-path', help='filename of the `.kwik` file ' 'to create (by default, `"experiment_name".kwik`)') p.add_argument('--overwrite', action='store_true', default=False, help='overwrite the `.kwik` file ') p.add_argument('--interval', help='detection interval in seconds (e.g. `0,10`)') p.set_defaults(func=detect) def create_auto(self): desc = 'launch the automatic clustering algorithm on a `.kwik` file' p = self._add_sub_parser('cluster-auto', desc) p.add_argument('file', help='path to a `.kwik` file') p.add_argument('--clustering', default='main', help='name of the clustering to use') p.set_defaults(func=cluster_auto) def create_manual(self): desc = 'launch the manual clustering GUI on a `.kwik` file' p = self._add_sub_parser('cluster-manual', desc) p.add_argument('file', help='path to a `.kwik` file') p.add_argument('--clustering', default='main', help='name of the clustering to use') p.add_argument('--cluster-ids', '-c', help='list of clusters to select initially') p.add_argument('--no-store', action='store_true', default=False, help='do not create the store (faster loading time, ' 'slower GUI)') p.set_defaults(func=cluster_manual) def create_notebook(self): # TODO pass def parse(self, args): try: return self._parser.parse_args(args) except SystemExit as e: if e.code != 0: raise e #------------------------------------------------------------------------------ # Subcommand functions #------------------------------------------------------------------------------ def _get_kwik_path(args): kwik_path = args.file if not op.exists(kwik_path): raise IOError("The file `{}` doesn't exist.".format(kwik_path)) return kwik_path def _create_session(args, **kwargs): from phy.session import Session kwik_path = _get_kwik_path(args) session = Session(kwik_path, **kwargs) return session def describe(args): from phy.io.kwik import KwikModel path = _get_kwik_path(args) model = KwikModel(path, clustering=args.clustering) return 'model.describe()', dict(model=model) def download(args): from phy import download_sample_data download_sample_data(args.file, output_dir=args.output_dir, base=args.base, ) def traces(args): from vispy.app import run from phy.plot.traces import TraceView from phy.io.h5 import open_h5 from phy.io.traces import read_kwd, read_dat path = args.file if path.endswith('.kwd'): f = open_h5(args.file) traces = read_kwd(f) elif path.endswith(('.dat', '.bin')): if not args.n_channels: raise ValueError("Please specify `--n-channels`.") if not args.dtype: raise ValueError("Please specify `--dtype`.") if not args.sample_rate: raise ValueError("Please specify `--sample-rate`.") n_channels = int(args.n_channels) dtype = np.dtype(args.dtype) traces = read_dat(path, dtype=dtype, n_channels=n_channels) start, end = map(int, args.interval.split(',')) sample_rate = float(args.sample_rate) start = int(sample_rate * start) end = int(sample_rate * end) c = TraceView(keys='interactive') c.visual.traces = .01 * traces[start:end, ...] c.show() run() return None, None def detect(args): from phy.io import create_kwik assert args.file.endswith('.prm') kwik_path = args.kwik_path kwik_path = create_kwik(args.file, overwrite=args.overwrite, kwik_path=kwik_path) interval = args.interval if interval is not None: interval = list(map(float, interval.split(','))) # Create the session with the newly-created .kwik file. args.file = kwik_path session = _create_session(args, use_store=False) return ('session.detect(interval=interval)', dict(session=session, interval=interval)) def cluster_auto(args): from phy.utils._misc import _read_python from phy.session import Session assert args.file.endswith('.prm') params = _read_python(args.file) kwik_path = params['experiment_name'] + '.kwik' session = Session(kwik_path) ns = dict(session=session, clustering=args.clustering, ) cmd = ('session.cluster(clustering=clustering)') return (cmd, ns) def spikesort(args): from phy.io import create_kwik assert args.file.endswith('.prm') kwik_path = args.kwik_path kwik_path = create_kwik(args.file, overwrite=args.overwrite, kwik_path=kwik_path, ) # Create the session with the newly-created .kwik file. args.file = kwik_path session = _create_session(args, use_store=False) interval = args.interval if interval is not None: interval = list(map(float, interval.split(','))) ns = dict(session=session, interval=interval, n_s_clusters=100, # TODO: better handling of KK parameters ) cmd = ('session.detect(interval=interval); session.cluster();') return (cmd, ns) def cluster_manual(args): session = _create_session(args, clustering=args.clustering, use_store=not(args.no_store), ) cluster_ids = (list(map(int, args.cluster_ids.split(','))) if args.cluster_ids else None) session.model.describe() from phy.gui import start_qt_app start_qt_app() gui = session.show_gui(cluster_ids=cluster_ids, show=False) print("\nPress `ctrl+h` to see the list of keyboard shortcuts.\n") return 'gui.show()', dict(session=session, gui=gui, requires_qt=True) #------------------------------------------------------------------------------ # Main functions #------------------------------------------------------------------------------ def main(args=None): p = ParserCreator() if args is None: args = sys.argv[1:] elif isinstance(args, string_types): args = args.split(' ') args = p.parse(args) if args is None: return if args.profiler or args.line_profiler: from phy.utils.testing import _enable_profiler, _profile prof = _enable_profiler(args.line_profiler) else: prof = None import phy if args.debug: phy.debug() # Hide the traceback. if args.hide_traceback: def exception_handler(exception_type, exception, traceback): print("{}: {}".format(exception_type.__name__, exception)) sys.excepthook = exception_handler # Activate IPython debugger. if args.pdb: from IPython.core import ultratb sys.excepthook = ultratb.FormattedTB(mode='Verbose', color_scheme='Linux', call_pdb=1, ) func = args.func if func is None: p.parser.print_help() return out = func(args) if not out: return cmd, ns = out if not cmd: return requires_qt = ns.pop('requires_qt', False) requires_vispy = ns.pop('requires_vispy', False) # Default variables in namespace. ns.update(phy=phy, path=args.file) if 'session' in ns: ns['model'] = ns['session'].model # Interactive mode with IPython. if args.ipython: print("\nStarting IPython...") from IPython import start_ipython args_ipy = ["-i", "-c='{}'".format(cmd)] if requires_qt or requires_vispy: # Activate Qt event loop integration with Qt. args_ipy += ["--gui=qt"] start_ipython(args_ipy, user_ns=ns) else: if not prof: exec_(cmd, {}, ns) else: _profile(prof, cmd, {}, ns) if requires_qt: # Launch the Qt app. from phy.gui import run_qt_app run_qt_app() elif requires_vispy: # Launch the VisPy Qt app. from vispy.app import use_app, run use_app('pyqt4') run() #------------------------------------------------------------------------------ # Entry point #------------------------------------------------------------------------------ if __name__ == '__main__': main()
normal
{ "blob_id": "539523f177e2c3c0e1fb0226d1fcd65463b68a0e", "index": 6576, "step-1": "<mask token>\n\n\nclass Parser(argparse.ArgumentParser):\n\n def error(self, message):\n sys.stderr.write(message + '\\n\\n')\n self.print_help()\n sys.exit(2)\n\n\n<mask token>\n\n\nclass ParserCreator(object):\n\n def __init__(self):\n self.create_main()\n self.create_download()\n self.create_traces()\n self.create_describe()\n self.create_spikesort()\n self.create_detect()\n self.create_auto()\n self.create_manual()\n self.create_notebook()\n\n @property\n def parser(self):\n return self._parser\n\n def _add_sub_parser(self, name, desc):\n p = self._subparsers.add_parser(name, help=desc, description=desc)\n self._add_options(p)\n return p\n\n def _add_options(self, parser):\n parser.add_argument('--debug', '-d', action='store_true', help=\n 'activate debug logging mode')\n parser.add_argument('--hide-traceback', action='store_true', help=\n 'hide the traceback for cleaner error messages')\n parser.add_argument('--profiler', '-p', action='store_true', help=\n 'activate the profiler')\n parser.add_argument('--line-profiler', '-lp', dest='line_profiler',\n action='store_true', help=\n 'activate the line-profiler -- you need to decorate the functions to profile with `@profile` in the code'\n )\n parser.add_argument('--ipython', '-i', action='store_true', help=\n 'launch the script in an interactive IPython console')\n parser.add_argument('--pdb', action='store_true', help=\n 'activate the Python debugger')\n\n def create_main(self):\n import phy\n desc = sys.modules['phy'].__doc__\n self._parser = Parser(description=desc, epilog=_examples,\n formatter_class=CustomFormatter)\n self._parser.set_defaults(func=None)\n self._parser.add_argument('--version', '-v', action='version',\n version=phy.__version_git__, help='print the version of phy')\n self._add_options(self._parser)\n self._subparsers = self._parser.add_subparsers(dest='command',\n title='subcommand')\n\n def create_download(self):\n desc = 'download a sample dataset'\n p = self._add_sub_parser('download', desc)\n p.add_argument('file', help='dataset filename')\n p.add_argument('--output-dir', '-o', help='output directory')\n p.add_argument('--base', default='cortexlab', choices=('cortexlab',\n 'github'), help='data repository name: `cortexlab` or `github`')\n p.set_defaults(func=download)\n\n def create_describe(self):\n desc = 'describe a `.kwik` file'\n p = self._add_sub_parser('describe', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main', help=\n 'name of the clustering to use')\n p.set_defaults(func=describe)\n\n def create_traces(self):\n desc = 'show the traces of a raw data file'\n p = self._add_sub_parser('traces', desc)\n p.add_argument('file', help='path to a `.kwd` or `.dat` file')\n p.add_argument('--interval', help=\n 'detection interval in seconds (e.g. `0,10`)')\n p.add_argument('--n-channels', '-n', help=\n 'number of channels in the recording (only required when using a flat binary file)'\n )\n p.add_argument('--dtype', help=\n 'NumPy data type (only required when using a flat binary file)',\n default='int16')\n p.add_argument('--sample-rate', '-s', help=\n 'sample rate in Hz (only required when using a flat binary file)')\n p.set_defaults(func=traces)\n\n def create_spikesort(self):\n desc = 'launch the whole spike sorting pipeline on a `.prm` file'\n p = self._add_sub_parser('spikesort', desc)\n p.add_argument('file', help='path to a `.prm` file')\n p.add_argument('--kwik-path', help=\n 'filename of the `.kwik` file to create (by default, `\"experiment_name\".kwik`)'\n )\n p.add_argument('--overwrite', action='store_true', default=False,\n help='overwrite the `.kwik` file ')\n p.add_argument('--interval', help=\n 'detection interval in seconds (e.g. `0,10`)')\n p.set_defaults(func=spikesort)\n\n def create_detect(self):\n desc = 'launch the spike detection algorithm on a `.prm` file'\n p = self._add_sub_parser('detect', desc)\n p.add_argument('file', help='path to a `.prm` file')\n p.add_argument('--kwik-path', help=\n 'filename of the `.kwik` file to create (by default, `\"experiment_name\".kwik`)'\n )\n p.add_argument('--overwrite', action='store_true', default=False,\n help='overwrite the `.kwik` file ')\n p.add_argument('--interval', help=\n 'detection interval in seconds (e.g. `0,10`)')\n p.set_defaults(func=detect)\n\n def create_auto(self):\n desc = 'launch the automatic clustering algorithm on a `.kwik` file'\n p = self._add_sub_parser('cluster-auto', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main', help=\n 'name of the clustering to use')\n p.set_defaults(func=cluster_auto)\n\n def create_manual(self):\n desc = 'launch the manual clustering GUI on a `.kwik` file'\n p = self._add_sub_parser('cluster-manual', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main', help=\n 'name of the clustering to use')\n p.add_argument('--cluster-ids', '-c', help=\n 'list of clusters to select initially')\n p.add_argument('--no-store', action='store_true', default=False,\n help='do not create the store (faster loading time, slower GUI)')\n p.set_defaults(func=cluster_manual)\n\n def create_notebook(self):\n pass\n\n def parse(self, args):\n try:\n return self._parser.parse_args(args)\n except SystemExit as e:\n if e.code != 0:\n raise e\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.\n RawDescriptionHelpFormatter):\n pass\n\n\nclass Parser(argparse.ArgumentParser):\n\n def error(self, message):\n sys.stderr.write(message + '\\n\\n')\n self.print_help()\n sys.exit(2)\n\n\n<mask token>\n\n\nclass ParserCreator(object):\n\n def __init__(self):\n self.create_main()\n self.create_download()\n self.create_traces()\n self.create_describe()\n self.create_spikesort()\n self.create_detect()\n self.create_auto()\n self.create_manual()\n self.create_notebook()\n\n @property\n def parser(self):\n return self._parser\n\n def _add_sub_parser(self, name, desc):\n p = self._subparsers.add_parser(name, help=desc, description=desc)\n self._add_options(p)\n return p\n\n def _add_options(self, parser):\n parser.add_argument('--debug', '-d', action='store_true', help=\n 'activate debug logging mode')\n parser.add_argument('--hide-traceback', action='store_true', help=\n 'hide the traceback for cleaner error messages')\n parser.add_argument('--profiler', '-p', action='store_true', help=\n 'activate the profiler')\n parser.add_argument('--line-profiler', '-lp', dest='line_profiler',\n action='store_true', help=\n 'activate the line-profiler -- you need to decorate the functions to profile with `@profile` in the code'\n )\n parser.add_argument('--ipython', '-i', action='store_true', help=\n 'launch the script in an interactive IPython console')\n parser.add_argument('--pdb', action='store_true', help=\n 'activate the Python debugger')\n\n def create_main(self):\n import phy\n desc = sys.modules['phy'].__doc__\n self._parser = Parser(description=desc, epilog=_examples,\n formatter_class=CustomFormatter)\n self._parser.set_defaults(func=None)\n self._parser.add_argument('--version', '-v', action='version',\n version=phy.__version_git__, help='print the version of phy')\n self._add_options(self._parser)\n self._subparsers = self._parser.add_subparsers(dest='command',\n title='subcommand')\n\n def create_download(self):\n desc = 'download a sample dataset'\n p = self._add_sub_parser('download', desc)\n p.add_argument('file', help='dataset filename')\n p.add_argument('--output-dir', '-o', help='output directory')\n p.add_argument('--base', default='cortexlab', choices=('cortexlab',\n 'github'), help='data repository name: `cortexlab` or `github`')\n p.set_defaults(func=download)\n\n def create_describe(self):\n desc = 'describe a `.kwik` file'\n p = self._add_sub_parser('describe', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main', help=\n 'name of the clustering to use')\n p.set_defaults(func=describe)\n\n def create_traces(self):\n desc = 'show the traces of a raw data file'\n p = self._add_sub_parser('traces', desc)\n p.add_argument('file', help='path to a `.kwd` or `.dat` file')\n p.add_argument('--interval', help=\n 'detection interval in seconds (e.g. `0,10`)')\n p.add_argument('--n-channels', '-n', help=\n 'number of channels in the recording (only required when using a flat binary file)'\n )\n p.add_argument('--dtype', help=\n 'NumPy data type (only required when using a flat binary file)',\n default='int16')\n p.add_argument('--sample-rate', '-s', help=\n 'sample rate in Hz (only required when using a flat binary file)')\n p.set_defaults(func=traces)\n\n def create_spikesort(self):\n desc = 'launch the whole spike sorting pipeline on a `.prm` file'\n p = self._add_sub_parser('spikesort', desc)\n p.add_argument('file', help='path to a `.prm` file')\n p.add_argument('--kwik-path', help=\n 'filename of the `.kwik` file to create (by default, `\"experiment_name\".kwik`)'\n )\n p.add_argument('--overwrite', action='store_true', default=False,\n help='overwrite the `.kwik` file ')\n p.add_argument('--interval', help=\n 'detection interval in seconds (e.g. `0,10`)')\n p.set_defaults(func=spikesort)\n\n def create_detect(self):\n desc = 'launch the spike detection algorithm on a `.prm` file'\n p = self._add_sub_parser('detect', desc)\n p.add_argument('file', help='path to a `.prm` file')\n p.add_argument('--kwik-path', help=\n 'filename of the `.kwik` file to create (by default, `\"experiment_name\".kwik`)'\n )\n p.add_argument('--overwrite', action='store_true', default=False,\n help='overwrite the `.kwik` file ')\n p.add_argument('--interval', help=\n 'detection interval in seconds (e.g. `0,10`)')\n p.set_defaults(func=detect)\n\n def create_auto(self):\n desc = 'launch the automatic clustering algorithm on a `.kwik` file'\n p = self._add_sub_parser('cluster-auto', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main', help=\n 'name of the clustering to use')\n p.set_defaults(func=cluster_auto)\n\n def create_manual(self):\n desc = 'launch the manual clustering GUI on a `.kwik` file'\n p = self._add_sub_parser('cluster-manual', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main', help=\n 'name of the clustering to use')\n p.add_argument('--cluster-ids', '-c', help=\n 'list of clusters to select initially')\n p.add_argument('--no-store', action='store_true', default=False,\n help='do not create the store (faster loading time, slower GUI)')\n p.set_defaults(func=cluster_manual)\n\n def create_notebook(self):\n pass\n\n def parse(self, args):\n try:\n return self._parser.parse_args(args)\n except SystemExit as e:\n if e.code != 0:\n raise e\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.\n RawDescriptionHelpFormatter):\n pass\n\n\nclass Parser(argparse.ArgumentParser):\n\n def error(self, message):\n sys.stderr.write(message + '\\n\\n')\n self.print_help()\n sys.exit(2)\n\n\n<mask token>\n\n\nclass ParserCreator(object):\n\n def __init__(self):\n self.create_main()\n self.create_download()\n self.create_traces()\n self.create_describe()\n self.create_spikesort()\n self.create_detect()\n self.create_auto()\n self.create_manual()\n self.create_notebook()\n\n @property\n def parser(self):\n return self._parser\n\n def _add_sub_parser(self, name, desc):\n p = self._subparsers.add_parser(name, help=desc, description=desc)\n self._add_options(p)\n return p\n\n def _add_options(self, parser):\n parser.add_argument('--debug', '-d', action='store_true', help=\n 'activate debug logging mode')\n parser.add_argument('--hide-traceback', action='store_true', help=\n 'hide the traceback for cleaner error messages')\n parser.add_argument('--profiler', '-p', action='store_true', help=\n 'activate the profiler')\n parser.add_argument('--line-profiler', '-lp', dest='line_profiler',\n action='store_true', help=\n 'activate the line-profiler -- you need to decorate the functions to profile with `@profile` in the code'\n )\n parser.add_argument('--ipython', '-i', action='store_true', help=\n 'launch the script in an interactive IPython console')\n parser.add_argument('--pdb', action='store_true', help=\n 'activate the Python debugger')\n\n def create_main(self):\n import phy\n desc = sys.modules['phy'].__doc__\n self._parser = Parser(description=desc, epilog=_examples,\n formatter_class=CustomFormatter)\n self._parser.set_defaults(func=None)\n self._parser.add_argument('--version', '-v', action='version',\n version=phy.__version_git__, help='print the version of phy')\n self._add_options(self._parser)\n self._subparsers = self._parser.add_subparsers(dest='command',\n title='subcommand')\n\n def create_download(self):\n desc = 'download a sample dataset'\n p = self._add_sub_parser('download', desc)\n p.add_argument('file', help='dataset filename')\n p.add_argument('--output-dir', '-o', help='output directory')\n p.add_argument('--base', default='cortexlab', choices=('cortexlab',\n 'github'), help='data repository name: `cortexlab` or `github`')\n p.set_defaults(func=download)\n\n def create_describe(self):\n desc = 'describe a `.kwik` file'\n p = self._add_sub_parser('describe', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main', help=\n 'name of the clustering to use')\n p.set_defaults(func=describe)\n\n def create_traces(self):\n desc = 'show the traces of a raw data file'\n p = self._add_sub_parser('traces', desc)\n p.add_argument('file', help='path to a `.kwd` or `.dat` file')\n p.add_argument('--interval', help=\n 'detection interval in seconds (e.g. `0,10`)')\n p.add_argument('--n-channels', '-n', help=\n 'number of channels in the recording (only required when using a flat binary file)'\n )\n p.add_argument('--dtype', help=\n 'NumPy data type (only required when using a flat binary file)',\n default='int16')\n p.add_argument('--sample-rate', '-s', help=\n 'sample rate in Hz (only required when using a flat binary file)')\n p.set_defaults(func=traces)\n\n def create_spikesort(self):\n desc = 'launch the whole spike sorting pipeline on a `.prm` file'\n p = self._add_sub_parser('spikesort', desc)\n p.add_argument('file', help='path to a `.prm` file')\n p.add_argument('--kwik-path', help=\n 'filename of the `.kwik` file to create (by default, `\"experiment_name\".kwik`)'\n )\n p.add_argument('--overwrite', action='store_true', default=False,\n help='overwrite the `.kwik` file ')\n p.add_argument('--interval', help=\n 'detection interval in seconds (e.g. `0,10`)')\n p.set_defaults(func=spikesort)\n\n def create_detect(self):\n desc = 'launch the spike detection algorithm on a `.prm` file'\n p = self._add_sub_parser('detect', desc)\n p.add_argument('file', help='path to a `.prm` file')\n p.add_argument('--kwik-path', help=\n 'filename of the `.kwik` file to create (by default, `\"experiment_name\".kwik`)'\n )\n p.add_argument('--overwrite', action='store_true', default=False,\n help='overwrite the `.kwik` file ')\n p.add_argument('--interval', help=\n 'detection interval in seconds (e.g. `0,10`)')\n p.set_defaults(func=detect)\n\n def create_auto(self):\n desc = 'launch the automatic clustering algorithm on a `.kwik` file'\n p = self._add_sub_parser('cluster-auto', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main', help=\n 'name of the clustering to use')\n p.set_defaults(func=cluster_auto)\n\n def create_manual(self):\n desc = 'launch the manual clustering GUI on a `.kwik` file'\n p = self._add_sub_parser('cluster-manual', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main', help=\n 'name of the clustering to use')\n p.add_argument('--cluster-ids', '-c', help=\n 'list of clusters to select initially')\n p.add_argument('--no-store', action='store_true', default=False,\n help='do not create the store (faster loading time, slower GUI)')\n p.set_defaults(func=cluster_manual)\n\n def create_notebook(self):\n pass\n\n def parse(self, args):\n try:\n return self._parser.parse_args(args)\n except SystemExit as e:\n if e.code != 0:\n raise e\n\n\ndef _get_kwik_path(args):\n kwik_path = args.file\n if not op.exists(kwik_path):\n raise IOError(\"The file `{}` doesn't exist.\".format(kwik_path))\n return kwik_path\n\n\ndef _create_session(args, **kwargs):\n from phy.session import Session\n kwik_path = _get_kwik_path(args)\n session = Session(kwik_path, **kwargs)\n return session\n\n\ndef describe(args):\n from phy.io.kwik import KwikModel\n path = _get_kwik_path(args)\n model = KwikModel(path, clustering=args.clustering)\n return 'model.describe()', dict(model=model)\n\n\ndef download(args):\n from phy import download_sample_data\n download_sample_data(args.file, output_dir=args.output_dir, base=args.base)\n\n\ndef traces(args):\n from vispy.app import run\n from phy.plot.traces import TraceView\n from phy.io.h5 import open_h5\n from phy.io.traces import read_kwd, read_dat\n path = args.file\n if path.endswith('.kwd'):\n f = open_h5(args.file)\n traces = read_kwd(f)\n elif path.endswith(('.dat', '.bin')):\n if not args.n_channels:\n raise ValueError('Please specify `--n-channels`.')\n if not args.dtype:\n raise ValueError('Please specify `--dtype`.')\n if not args.sample_rate:\n raise ValueError('Please specify `--sample-rate`.')\n n_channels = int(args.n_channels)\n dtype = np.dtype(args.dtype)\n traces = read_dat(path, dtype=dtype, n_channels=n_channels)\n start, end = map(int, args.interval.split(','))\n sample_rate = float(args.sample_rate)\n start = int(sample_rate * start)\n end = int(sample_rate * end)\n c = TraceView(keys='interactive')\n c.visual.traces = 0.01 * traces[start:end, ...]\n c.show()\n run()\n return None, None\n\n\ndef detect(args):\n from phy.io import create_kwik\n assert args.file.endswith('.prm')\n kwik_path = args.kwik_path\n kwik_path = create_kwik(args.file, overwrite=args.overwrite, kwik_path=\n kwik_path)\n interval = args.interval\n if interval is not None:\n interval = list(map(float, interval.split(',')))\n args.file = kwik_path\n session = _create_session(args, use_store=False)\n return 'session.detect(interval=interval)', dict(session=session,\n interval=interval)\n\n\ndef cluster_auto(args):\n from phy.utils._misc import _read_python\n from phy.session import Session\n assert args.file.endswith('.prm')\n params = _read_python(args.file)\n kwik_path = params['experiment_name'] + '.kwik'\n session = Session(kwik_path)\n ns = dict(session=session, clustering=args.clustering)\n cmd = 'session.cluster(clustering=clustering)'\n return cmd, ns\n\n\ndef spikesort(args):\n from phy.io import create_kwik\n assert args.file.endswith('.prm')\n kwik_path = args.kwik_path\n kwik_path = create_kwik(args.file, overwrite=args.overwrite, kwik_path=\n kwik_path)\n args.file = kwik_path\n session = _create_session(args, use_store=False)\n interval = args.interval\n if interval is not None:\n interval = list(map(float, interval.split(',')))\n ns = dict(session=session, interval=interval, n_s_clusters=100)\n cmd = 'session.detect(interval=interval); session.cluster();'\n return cmd, ns\n\n\ndef cluster_manual(args):\n session = _create_session(args, clustering=args.clustering, use_store=\n not args.no_store)\n cluster_ids = list(map(int, args.cluster_ids.split(','))\n ) if args.cluster_ids else None\n session.model.describe()\n from phy.gui import start_qt_app\n start_qt_app()\n gui = session.show_gui(cluster_ids=cluster_ids, show=False)\n print('\\nPress `ctrl+h` to see the list of keyboard shortcuts.\\n')\n return 'gui.show()', dict(session=session, gui=gui, requires_qt=True)\n\n\ndef main(args=None):\n p = ParserCreator()\n if args is None:\n args = sys.argv[1:]\n elif isinstance(args, string_types):\n args = args.split(' ')\n args = p.parse(args)\n if args is None:\n return\n if args.profiler or args.line_profiler:\n from phy.utils.testing import _enable_profiler, _profile\n prof = _enable_profiler(args.line_profiler)\n else:\n prof = None\n import phy\n if args.debug:\n phy.debug()\n if args.hide_traceback:\n\n def exception_handler(exception_type, exception, traceback):\n print('{}: {}'.format(exception_type.__name__, exception))\n sys.excepthook = exception_handler\n if args.pdb:\n from IPython.core import ultratb\n sys.excepthook = ultratb.FormattedTB(mode='Verbose', color_scheme=\n 'Linux', call_pdb=1)\n func = args.func\n if func is None:\n p.parser.print_help()\n return\n out = func(args)\n if not out:\n return\n cmd, ns = out\n if not cmd:\n return\n requires_qt = ns.pop('requires_qt', False)\n requires_vispy = ns.pop('requires_vispy', False)\n ns.update(phy=phy, path=args.file)\n if 'session' in ns:\n ns['model'] = ns['session'].model\n if args.ipython:\n print('\\nStarting IPython...')\n from IPython import start_ipython\n args_ipy = ['-i', \"-c='{}'\".format(cmd)]\n if requires_qt or requires_vispy:\n args_ipy += ['--gui=qt']\n start_ipython(args_ipy, user_ns=ns)\n else:\n if not prof:\n exec_(cmd, {}, ns)\n else:\n _profile(prof, cmd, {}, ns)\n if requires_qt:\n from phy.gui import run_qt_app\n run_qt_app()\n elif requires_vispy:\n from vispy.app import use_app, run\n use_app('pyqt4')\n run()\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "<mask token>\n\n\nclass CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.\n RawDescriptionHelpFormatter):\n pass\n\n\nclass Parser(argparse.ArgumentParser):\n\n def error(self, message):\n sys.stderr.write(message + '\\n\\n')\n self.print_help()\n sys.exit(2)\n\n\n_examples = dedent(\n \"\"\"\n\nexamples:\n phy -v display the version of phy\n phy download hybrid_120sec.dat -o data/\n download a sample raw data file in `data/`\n phy describe my_file.kwik\n display information about a Kwik dataset\n phy spikesort my_params.prm\n run the whole suite (spike detection and clustering)\n phy detect my_params.prm\n run spike detection on a parameters file\n phy cluster-auto my_file.kwik\n run klustakwik on a dataset (after spike detection)\n phy cluster-manual my_file.kwik\n run the manual clustering GUI\n\n\"\"\"\n )\n\n\nclass ParserCreator(object):\n\n def __init__(self):\n self.create_main()\n self.create_download()\n self.create_traces()\n self.create_describe()\n self.create_spikesort()\n self.create_detect()\n self.create_auto()\n self.create_manual()\n self.create_notebook()\n\n @property\n def parser(self):\n return self._parser\n\n def _add_sub_parser(self, name, desc):\n p = self._subparsers.add_parser(name, help=desc, description=desc)\n self._add_options(p)\n return p\n\n def _add_options(self, parser):\n parser.add_argument('--debug', '-d', action='store_true', help=\n 'activate debug logging mode')\n parser.add_argument('--hide-traceback', action='store_true', help=\n 'hide the traceback for cleaner error messages')\n parser.add_argument('--profiler', '-p', action='store_true', help=\n 'activate the profiler')\n parser.add_argument('--line-profiler', '-lp', dest='line_profiler',\n action='store_true', help=\n 'activate the line-profiler -- you need to decorate the functions to profile with `@profile` in the code'\n )\n parser.add_argument('--ipython', '-i', action='store_true', help=\n 'launch the script in an interactive IPython console')\n parser.add_argument('--pdb', action='store_true', help=\n 'activate the Python debugger')\n\n def create_main(self):\n import phy\n desc = sys.modules['phy'].__doc__\n self._parser = Parser(description=desc, epilog=_examples,\n formatter_class=CustomFormatter)\n self._parser.set_defaults(func=None)\n self._parser.add_argument('--version', '-v', action='version',\n version=phy.__version_git__, help='print the version of phy')\n self._add_options(self._parser)\n self._subparsers = self._parser.add_subparsers(dest='command',\n title='subcommand')\n\n def create_download(self):\n desc = 'download a sample dataset'\n p = self._add_sub_parser('download', desc)\n p.add_argument('file', help='dataset filename')\n p.add_argument('--output-dir', '-o', help='output directory')\n p.add_argument('--base', default='cortexlab', choices=('cortexlab',\n 'github'), help='data repository name: `cortexlab` or `github`')\n p.set_defaults(func=download)\n\n def create_describe(self):\n desc = 'describe a `.kwik` file'\n p = self._add_sub_parser('describe', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main', help=\n 'name of the clustering to use')\n p.set_defaults(func=describe)\n\n def create_traces(self):\n desc = 'show the traces of a raw data file'\n p = self._add_sub_parser('traces', desc)\n p.add_argument('file', help='path to a `.kwd` or `.dat` file')\n p.add_argument('--interval', help=\n 'detection interval in seconds (e.g. `0,10`)')\n p.add_argument('--n-channels', '-n', help=\n 'number of channels in the recording (only required when using a flat binary file)'\n )\n p.add_argument('--dtype', help=\n 'NumPy data type (only required when using a flat binary file)',\n default='int16')\n p.add_argument('--sample-rate', '-s', help=\n 'sample rate in Hz (only required when using a flat binary file)')\n p.set_defaults(func=traces)\n\n def create_spikesort(self):\n desc = 'launch the whole spike sorting pipeline on a `.prm` file'\n p = self._add_sub_parser('spikesort', desc)\n p.add_argument('file', help='path to a `.prm` file')\n p.add_argument('--kwik-path', help=\n 'filename of the `.kwik` file to create (by default, `\"experiment_name\".kwik`)'\n )\n p.add_argument('--overwrite', action='store_true', default=False,\n help='overwrite the `.kwik` file ')\n p.add_argument('--interval', help=\n 'detection interval in seconds (e.g. `0,10`)')\n p.set_defaults(func=spikesort)\n\n def create_detect(self):\n desc = 'launch the spike detection algorithm on a `.prm` file'\n p = self._add_sub_parser('detect', desc)\n p.add_argument('file', help='path to a `.prm` file')\n p.add_argument('--kwik-path', help=\n 'filename of the `.kwik` file to create (by default, `\"experiment_name\".kwik`)'\n )\n p.add_argument('--overwrite', action='store_true', default=False,\n help='overwrite the `.kwik` file ')\n p.add_argument('--interval', help=\n 'detection interval in seconds (e.g. `0,10`)')\n p.set_defaults(func=detect)\n\n def create_auto(self):\n desc = 'launch the automatic clustering algorithm on a `.kwik` file'\n p = self._add_sub_parser('cluster-auto', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main', help=\n 'name of the clustering to use')\n p.set_defaults(func=cluster_auto)\n\n def create_manual(self):\n desc = 'launch the manual clustering GUI on a `.kwik` file'\n p = self._add_sub_parser('cluster-manual', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main', help=\n 'name of the clustering to use')\n p.add_argument('--cluster-ids', '-c', help=\n 'list of clusters to select initially')\n p.add_argument('--no-store', action='store_true', default=False,\n help='do not create the store (faster loading time, slower GUI)')\n p.set_defaults(func=cluster_manual)\n\n def create_notebook(self):\n pass\n\n def parse(self, args):\n try:\n return self._parser.parse_args(args)\n except SystemExit as e:\n if e.code != 0:\n raise e\n\n\ndef _get_kwik_path(args):\n kwik_path = args.file\n if not op.exists(kwik_path):\n raise IOError(\"The file `{}` doesn't exist.\".format(kwik_path))\n return kwik_path\n\n\ndef _create_session(args, **kwargs):\n from phy.session import Session\n kwik_path = _get_kwik_path(args)\n session = Session(kwik_path, **kwargs)\n return session\n\n\ndef describe(args):\n from phy.io.kwik import KwikModel\n path = _get_kwik_path(args)\n model = KwikModel(path, clustering=args.clustering)\n return 'model.describe()', dict(model=model)\n\n\ndef download(args):\n from phy import download_sample_data\n download_sample_data(args.file, output_dir=args.output_dir, base=args.base)\n\n\ndef traces(args):\n from vispy.app import run\n from phy.plot.traces import TraceView\n from phy.io.h5 import open_h5\n from phy.io.traces import read_kwd, read_dat\n path = args.file\n if path.endswith('.kwd'):\n f = open_h5(args.file)\n traces = read_kwd(f)\n elif path.endswith(('.dat', '.bin')):\n if not args.n_channels:\n raise ValueError('Please specify `--n-channels`.')\n if not args.dtype:\n raise ValueError('Please specify `--dtype`.')\n if not args.sample_rate:\n raise ValueError('Please specify `--sample-rate`.')\n n_channels = int(args.n_channels)\n dtype = np.dtype(args.dtype)\n traces = read_dat(path, dtype=dtype, n_channels=n_channels)\n start, end = map(int, args.interval.split(','))\n sample_rate = float(args.sample_rate)\n start = int(sample_rate * start)\n end = int(sample_rate * end)\n c = TraceView(keys='interactive')\n c.visual.traces = 0.01 * traces[start:end, ...]\n c.show()\n run()\n return None, None\n\n\ndef detect(args):\n from phy.io import create_kwik\n assert args.file.endswith('.prm')\n kwik_path = args.kwik_path\n kwik_path = create_kwik(args.file, overwrite=args.overwrite, kwik_path=\n kwik_path)\n interval = args.interval\n if interval is not None:\n interval = list(map(float, interval.split(',')))\n args.file = kwik_path\n session = _create_session(args, use_store=False)\n return 'session.detect(interval=interval)', dict(session=session,\n interval=interval)\n\n\ndef cluster_auto(args):\n from phy.utils._misc import _read_python\n from phy.session import Session\n assert args.file.endswith('.prm')\n params = _read_python(args.file)\n kwik_path = params['experiment_name'] + '.kwik'\n session = Session(kwik_path)\n ns = dict(session=session, clustering=args.clustering)\n cmd = 'session.cluster(clustering=clustering)'\n return cmd, ns\n\n\ndef spikesort(args):\n from phy.io import create_kwik\n assert args.file.endswith('.prm')\n kwik_path = args.kwik_path\n kwik_path = create_kwik(args.file, overwrite=args.overwrite, kwik_path=\n kwik_path)\n args.file = kwik_path\n session = _create_session(args, use_store=False)\n interval = args.interval\n if interval is not None:\n interval = list(map(float, interval.split(',')))\n ns = dict(session=session, interval=interval, n_s_clusters=100)\n cmd = 'session.detect(interval=interval); session.cluster();'\n return cmd, ns\n\n\ndef cluster_manual(args):\n session = _create_session(args, clustering=args.clustering, use_store=\n not args.no_store)\n cluster_ids = list(map(int, args.cluster_ids.split(','))\n ) if args.cluster_ids else None\n session.model.describe()\n from phy.gui import start_qt_app\n start_qt_app()\n gui = session.show_gui(cluster_ids=cluster_ids, show=False)\n print('\\nPress `ctrl+h` to see the list of keyboard shortcuts.\\n')\n return 'gui.show()', dict(session=session, gui=gui, requires_qt=True)\n\n\ndef main(args=None):\n p = ParserCreator()\n if args is None:\n args = sys.argv[1:]\n elif isinstance(args, string_types):\n args = args.split(' ')\n args = p.parse(args)\n if args is None:\n return\n if args.profiler or args.line_profiler:\n from phy.utils.testing import _enable_profiler, _profile\n prof = _enable_profiler(args.line_profiler)\n else:\n prof = None\n import phy\n if args.debug:\n phy.debug()\n if args.hide_traceback:\n\n def exception_handler(exception_type, exception, traceback):\n print('{}: {}'.format(exception_type.__name__, exception))\n sys.excepthook = exception_handler\n if args.pdb:\n from IPython.core import ultratb\n sys.excepthook = ultratb.FormattedTB(mode='Verbose', color_scheme=\n 'Linux', call_pdb=1)\n func = args.func\n if func is None:\n p.parser.print_help()\n return\n out = func(args)\n if not out:\n return\n cmd, ns = out\n if not cmd:\n return\n requires_qt = ns.pop('requires_qt', False)\n requires_vispy = ns.pop('requires_vispy', False)\n ns.update(phy=phy, path=args.file)\n if 'session' in ns:\n ns['model'] = ns['session'].model\n if args.ipython:\n print('\\nStarting IPython...')\n from IPython import start_ipython\n args_ipy = ['-i', \"-c='{}'\".format(cmd)]\n if requires_qt or requires_vispy:\n args_ipy += ['--gui=qt']\n start_ipython(args_ipy, user_ns=ns)\n else:\n if not prof:\n exec_(cmd, {}, ns)\n else:\n _profile(prof, cmd, {}, ns)\n if requires_qt:\n from phy.gui import run_qt_app\n run_qt_app()\n elif requires_vispy:\n from vispy.app import use_app, run\n use_app('pyqt4')\n run()\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n\n\"\"\"phy main CLI tool.\n\nUsage:\n\n phy --help\n\n\"\"\"\n\n#------------------------------------------------------------------------------\n# Imports\n#------------------------------------------------------------------------------\n\nimport sys\nimport os.path as op\nimport argparse\nfrom textwrap import dedent\n\nimport numpy as np\nfrom six import exec_, string_types\n\n\n#------------------------------------------------------------------------------\n# Parser utilities\n#------------------------------------------------------------------------------\n\nclass CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,\n argparse.RawDescriptionHelpFormatter):\n pass\n\n\nclass Parser(argparse.ArgumentParser):\n def error(self, message):\n sys.stderr.write(message + '\\n\\n')\n self.print_help()\n sys.exit(2)\n\n\n_examples = dedent(\"\"\"\n\nexamples:\n phy -v display the version of phy\n phy download hybrid_120sec.dat -o data/\n download a sample raw data file in `data/`\n phy describe my_file.kwik\n display information about a Kwik dataset\n phy spikesort my_params.prm\n run the whole suite (spike detection and clustering)\n phy detect my_params.prm\n run spike detection on a parameters file\n phy cluster-auto my_file.kwik\n run klustakwik on a dataset (after spike detection)\n phy cluster-manual my_file.kwik\n run the manual clustering GUI\n\n\"\"\")\n\n\n#------------------------------------------------------------------------------\n# Parser creator\n#------------------------------------------------------------------------------\n\nclass ParserCreator(object):\n def __init__(self):\n self.create_main()\n self.create_download()\n self.create_traces()\n self.create_describe()\n self.create_spikesort()\n self.create_detect()\n self.create_auto()\n self.create_manual()\n self.create_notebook()\n\n @property\n def parser(self):\n return self._parser\n\n def _add_sub_parser(self, name, desc):\n p = self._subparsers.add_parser(name, help=desc, description=desc)\n self._add_options(p)\n return p\n\n def _add_options(self, parser):\n parser.add_argument('--debug', '-d',\n action='store_true',\n help='activate debug logging mode')\n\n parser.add_argument('--hide-traceback',\n action='store_true',\n help='hide the traceback for cleaner error '\n 'messages')\n\n parser.add_argument('--profiler', '-p',\n action='store_true',\n help='activate the profiler')\n\n parser.add_argument('--line-profiler', '-lp',\n dest='line_profiler',\n action='store_true',\n help='activate the line-profiler -- you '\n 'need to decorate the functions '\n 'to profile with `@profile` '\n 'in the code')\n\n parser.add_argument('--ipython', '-i', action='store_true',\n help='launch the script in an interactive '\n 'IPython console')\n\n parser.add_argument('--pdb', action='store_true',\n help='activate the Python debugger')\n\n def create_main(self):\n import phy\n\n desc = sys.modules['phy'].__doc__\n self._parser = Parser(description=desc,\n epilog=_examples,\n formatter_class=CustomFormatter,\n )\n self._parser.set_defaults(func=None)\n self._parser.add_argument('--version', '-v',\n action='version',\n version=phy.__version_git__,\n help='print the version of phy')\n self._add_options(self._parser)\n self._subparsers = self._parser.add_subparsers(dest='command',\n title='subcommand',\n )\n\n def create_download(self):\n desc = 'download a sample dataset'\n p = self._add_sub_parser('download', desc)\n p.add_argument('file', help='dataset filename')\n p.add_argument('--output-dir', '-o', help='output directory')\n p.add_argument('--base',\n default='cortexlab',\n choices=('cortexlab', 'github'),\n help='data repository name: `cortexlab` or `github`',\n )\n p.set_defaults(func=download)\n\n def create_describe(self):\n desc = 'describe a `.kwik` file'\n p = self._add_sub_parser('describe', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main',\n help='name of the clustering to use')\n p.set_defaults(func=describe)\n\n def create_traces(self):\n desc = 'show the traces of a raw data file'\n p = self._add_sub_parser('traces', desc)\n p.add_argument('file', help='path to a `.kwd` or `.dat` file')\n p.add_argument('--interval',\n help='detection interval in seconds (e.g. `0,10`)')\n p.add_argument('--n-channels', '-n',\n help='number of channels in the recording '\n '(only required when using a flat binary file)')\n p.add_argument('--dtype',\n help='NumPy data type '\n '(only required when using a flat binary file)',\n default='int16',\n )\n p.add_argument('--sample-rate', '-s',\n help='sample rate in Hz '\n '(only required when using a flat binary file)')\n p.set_defaults(func=traces)\n\n def create_spikesort(self):\n desc = 'launch the whole spike sorting pipeline on a `.prm` file'\n p = self._add_sub_parser('spikesort', desc)\n p.add_argument('file', help='path to a `.prm` file')\n p.add_argument('--kwik-path', help='filename of the `.kwik` file '\n 'to create (by default, `\"experiment_name\".kwik`)')\n p.add_argument('--overwrite', action='store_true', default=False,\n help='overwrite the `.kwik` file ')\n p.add_argument('--interval',\n help='detection interval in seconds (e.g. `0,10`)')\n p.set_defaults(func=spikesort)\n\n def create_detect(self):\n desc = 'launch the spike detection algorithm on a `.prm` file'\n p = self._add_sub_parser('detect', desc)\n p.add_argument('file', help='path to a `.prm` file')\n p.add_argument('--kwik-path', help='filename of the `.kwik` file '\n 'to create (by default, `\"experiment_name\".kwik`)')\n p.add_argument('--overwrite', action='store_true', default=False,\n help='overwrite the `.kwik` file ')\n p.add_argument('--interval',\n help='detection interval in seconds (e.g. `0,10`)')\n p.set_defaults(func=detect)\n\n def create_auto(self):\n desc = 'launch the automatic clustering algorithm on a `.kwik` file'\n p = self._add_sub_parser('cluster-auto', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main',\n help='name of the clustering to use')\n p.set_defaults(func=cluster_auto)\n\n def create_manual(self):\n desc = 'launch the manual clustering GUI on a `.kwik` file'\n p = self._add_sub_parser('cluster-manual', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main',\n help='name of the clustering to use')\n p.add_argument('--cluster-ids', '-c',\n help='list of clusters to select initially')\n p.add_argument('--no-store', action='store_true', default=False,\n help='do not create the store (faster loading time, '\n 'slower GUI)')\n p.set_defaults(func=cluster_manual)\n\n def create_notebook(self):\n # TODO\n pass\n\n def parse(self, args):\n try:\n return self._parser.parse_args(args)\n except SystemExit as e:\n if e.code != 0:\n raise e\n\n\n#------------------------------------------------------------------------------\n# Subcommand functions\n#------------------------------------------------------------------------------\n\ndef _get_kwik_path(args):\n kwik_path = args.file\n\n if not op.exists(kwik_path):\n raise IOError(\"The file `{}` doesn't exist.\".format(kwik_path))\n\n return kwik_path\n\n\ndef _create_session(args, **kwargs):\n from phy.session import Session\n kwik_path = _get_kwik_path(args)\n session = Session(kwik_path, **kwargs)\n return session\n\n\ndef describe(args):\n from phy.io.kwik import KwikModel\n path = _get_kwik_path(args)\n model = KwikModel(path, clustering=args.clustering)\n return 'model.describe()', dict(model=model)\n\n\ndef download(args):\n from phy import download_sample_data\n download_sample_data(args.file,\n output_dir=args.output_dir,\n base=args.base,\n )\n\n\ndef traces(args):\n from vispy.app import run\n from phy.plot.traces import TraceView\n from phy.io.h5 import open_h5\n from phy.io.traces import read_kwd, read_dat\n\n path = args.file\n if path.endswith('.kwd'):\n f = open_h5(args.file)\n traces = read_kwd(f)\n elif path.endswith(('.dat', '.bin')):\n if not args.n_channels:\n raise ValueError(\"Please specify `--n-channels`.\")\n if not args.dtype:\n raise ValueError(\"Please specify `--dtype`.\")\n if not args.sample_rate:\n raise ValueError(\"Please specify `--sample-rate`.\")\n n_channels = int(args.n_channels)\n dtype = np.dtype(args.dtype)\n traces = read_dat(path, dtype=dtype, n_channels=n_channels)\n\n start, end = map(int, args.interval.split(','))\n sample_rate = float(args.sample_rate)\n start = int(sample_rate * start)\n end = int(sample_rate * end)\n\n c = TraceView(keys='interactive')\n c.visual.traces = .01 * traces[start:end, ...]\n c.show()\n run()\n\n return None, None\n\n\ndef detect(args):\n from phy.io import create_kwik\n\n assert args.file.endswith('.prm')\n kwik_path = args.kwik_path\n kwik_path = create_kwik(args.file,\n overwrite=args.overwrite,\n kwik_path=kwik_path)\n\n interval = args.interval\n if interval is not None:\n interval = list(map(float, interval.split(',')))\n\n # Create the session with the newly-created .kwik file.\n args.file = kwik_path\n session = _create_session(args, use_store=False)\n return ('session.detect(interval=interval)',\n dict(session=session, interval=interval))\n\n\ndef cluster_auto(args):\n from phy.utils._misc import _read_python\n from phy.session import Session\n\n assert args.file.endswith('.prm')\n\n params = _read_python(args.file)\n kwik_path = params['experiment_name'] + '.kwik'\n session = Session(kwik_path)\n\n ns = dict(session=session,\n clustering=args.clustering,\n )\n cmd = ('session.cluster(clustering=clustering)')\n return (cmd, ns)\n\n\ndef spikesort(args):\n from phy.io import create_kwik\n\n assert args.file.endswith('.prm')\n kwik_path = args.kwik_path\n kwik_path = create_kwik(args.file,\n overwrite=args.overwrite,\n kwik_path=kwik_path,\n )\n # Create the session with the newly-created .kwik file.\n args.file = kwik_path\n session = _create_session(args, use_store=False)\n\n interval = args.interval\n if interval is not None:\n interval = list(map(float, interval.split(',')))\n\n ns = dict(session=session,\n interval=interval,\n n_s_clusters=100, # TODO: better handling of KK parameters\n )\n cmd = ('session.detect(interval=interval); session.cluster();')\n return (cmd, ns)\n\n\ndef cluster_manual(args):\n session = _create_session(args,\n clustering=args.clustering,\n use_store=not(args.no_store),\n )\n cluster_ids = (list(map(int, args.cluster_ids.split(',')))\n if args.cluster_ids else None)\n\n session.model.describe()\n\n from phy.gui import start_qt_app\n start_qt_app()\n\n gui = session.show_gui(cluster_ids=cluster_ids, show=False)\n print(\"\\nPress `ctrl+h` to see the list of keyboard shortcuts.\\n\")\n return 'gui.show()', dict(session=session, gui=gui, requires_qt=True)\n\n\n#------------------------------------------------------------------------------\n# Main functions\n#------------------------------------------------------------------------------\n\ndef main(args=None):\n p = ParserCreator()\n if args is None:\n args = sys.argv[1:]\n elif isinstance(args, string_types):\n args = args.split(' ')\n args = p.parse(args)\n if args is None:\n return\n\n if args.profiler or args.line_profiler:\n from phy.utils.testing import _enable_profiler, _profile\n prof = _enable_profiler(args.line_profiler)\n else:\n prof = None\n\n import phy\n if args.debug:\n phy.debug()\n\n # Hide the traceback.\n if args.hide_traceback:\n def exception_handler(exception_type, exception, traceback):\n print(\"{}: {}\".format(exception_type.__name__, exception))\n\n sys.excepthook = exception_handler\n\n # Activate IPython debugger.\n if args.pdb:\n from IPython.core import ultratb\n sys.excepthook = ultratb.FormattedTB(mode='Verbose',\n color_scheme='Linux',\n call_pdb=1,\n )\n\n func = args.func\n if func is None:\n p.parser.print_help()\n return\n\n out = func(args)\n if not out:\n return\n cmd, ns = out\n if not cmd:\n return\n requires_qt = ns.pop('requires_qt', False)\n requires_vispy = ns.pop('requires_vispy', False)\n\n # Default variables in namespace.\n ns.update(phy=phy, path=args.file)\n if 'session' in ns:\n ns['model'] = ns['session'].model\n\n # Interactive mode with IPython.\n if args.ipython:\n print(\"\\nStarting IPython...\")\n from IPython import start_ipython\n args_ipy = [\"-i\", \"-c='{}'\".format(cmd)]\n if requires_qt or requires_vispy:\n # Activate Qt event loop integration with Qt.\n args_ipy += [\"--gui=qt\"]\n start_ipython(args_ipy, user_ns=ns)\n else:\n if not prof:\n exec_(cmd, {}, ns)\n else:\n _profile(prof, cmd, {}, ns)\n\n if requires_qt:\n # Launch the Qt app.\n from phy.gui import run_qt_app\n run_qt_app()\n elif requires_vispy:\n # Launch the VisPy Qt app.\n from vispy.app import use_app, run\n use_app('pyqt4')\n run()\n\n\n#------------------------------------------------------------------------------\n# Entry point\n#------------------------------------------------------------------------------\n\nif __name__ == '__main__':\n main()\n", "step-ids": [ 17, 18, 29, 30, 32 ] }
[ 17, 18, 29, 30, 32 ]
variable_1 = 100 variable_2 = 500 variable_3 = 222.5 variable_4 = 'Hello' variable_5 = 'world' print(variable_1, variable_2, variable_3, sep=', ') print(variable_4, variable_5, sep=', ', end='!\n') user_age = input('Введите ваш возраст: ') user_name = input('Введите ваше имя: ') print(variable_4 + ', ' + user_name + '! ' + 'Ваш возраст: ' + user_age)
normal
{ "blob_id": "12ca9a81574d34d1004ac9ebcb2ee4b31d7171e2", "index": 5623, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(variable_1, variable_2, variable_3, sep=', ')\nprint(variable_4, variable_5, sep=', ', end='!\\n')\n<mask token>\nprint(variable_4 + ', ' + user_name + '! ' + 'Ваш возраст: ' + user_age)\n", "step-3": "variable_1 = 100\nvariable_2 = 500\nvariable_3 = 222.5\nvariable_4 = 'Hello'\nvariable_5 = 'world'\nprint(variable_1, variable_2, variable_3, sep=', ')\nprint(variable_4, variable_5, sep=', ', end='!\\n')\nuser_age = input('Введите ваш возраст: ')\nuser_name = input('Введите ваше имя: ')\nprint(variable_4 + ', ' + user_name + '! ' + 'Ваш возраст: ' + user_age)\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
# -*- coding: utf-8 -*- # # Akamatsu CMS # https://github.com/rmed/akamatsu # # MIT License # # Copyright (c) 2020 Rafael Medina García <[email protected]> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """This module contains user profile views.""" from flask import current_app, flash, redirect, render_template, url_for from flask_babel import _ from flask_login import current_user, fresh_login_required, login_required from sqlalchemy.exc import IntegrityError from akamatsu import crypto_manager, db from akamatsu.views.admin import bp_admin from akamatsu.forms import PasswordResetForm, ProfileForm @bp_admin.route('/profile', methods=['GET', 'POST']) @login_required def profile_edit(): """Show user profile edition form.""" form = ProfileForm(obj=current_user) if form.validate_on_submit(): form.populate_obj(current_user) try: correct = True db.session.commit() flash(_('Profile updated correctly'), 'success') return render_template('admin/profile/edit.html', form=form) except IntegrityError: # Email already exists correct = False form.errors.email.append(_('Email is already registered')) return render_template('admin/profile/edit.html', form=form) except Exception: # Catch anything unknown correct = False flash(_('Failed to update profile, contact an administrator'), 'error') return render_template('admin/profile/edit.html', form=form) finally: if not correct: db.session.rollback() return render_template('admin/profile/edit.html', form=form) @bp_admin.route('/profile/change-password', methods=['GET', 'POST']) @fresh_login_required def change_password(): """Show form to update user password. Requires confirming current password. """ form = PasswordResetForm() if form.validate_on_submit(): # Update user current_user.password = crypto_manager.hash(form.password.data) try: correct = True db.session.commit() flash(_('Password updated correctly'), 'success') return redirect(url_for('admin.profile_edit')) except Exception: correct = False current_app.logger.exception('Failed to update user password') flash(_('Error updating password, contact an administrator'), 'error') return render_template('admin/profile/change_password.html', form=form) finally: if not correct: db.session.rollback() return render_template('admin/profile/change_password.html', form=form)
normal
{ "blob_id": "cde62c5032109bb22aa81d813e30097dad80a9c3", "index": 4924, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\n@bp_admin.route('/profile', methods=['GET', 'POST'])\n@login_required\ndef profile_edit():\n \"\"\"Show user profile edition form.\"\"\"\n form = ProfileForm(obj=current_user)\n if form.validate_on_submit():\n form.populate_obj(current_user)\n try:\n correct = True\n db.session.commit()\n flash(_('Profile updated correctly'), 'success')\n return render_template('admin/profile/edit.html', form=form)\n except IntegrityError:\n correct = False\n form.errors.email.append(_('Email is already registered'))\n return render_template('admin/profile/edit.html', form=form)\n except Exception:\n correct = False\n flash(_('Failed to update profile, contact an administrator'),\n 'error')\n return render_template('admin/profile/edit.html', form=form)\n finally:\n if not correct:\n db.session.rollback()\n return render_template('admin/profile/edit.html', form=form)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\n@bp_admin.route('/profile', methods=['GET', 'POST'])\n@login_required\ndef profile_edit():\n \"\"\"Show user profile edition form.\"\"\"\n form = ProfileForm(obj=current_user)\n if form.validate_on_submit():\n form.populate_obj(current_user)\n try:\n correct = True\n db.session.commit()\n flash(_('Profile updated correctly'), 'success')\n return render_template('admin/profile/edit.html', form=form)\n except IntegrityError:\n correct = False\n form.errors.email.append(_('Email is already registered'))\n return render_template('admin/profile/edit.html', form=form)\n except Exception:\n correct = False\n flash(_('Failed to update profile, contact an administrator'),\n 'error')\n return render_template('admin/profile/edit.html', form=form)\n finally:\n if not correct:\n db.session.rollback()\n return render_template('admin/profile/edit.html', form=form)\n\n\n@bp_admin.route('/profile/change-password', methods=['GET', 'POST'])\n@fresh_login_required\ndef change_password():\n \"\"\"Show form to update user password.\n\n Requires confirming current password.\n \"\"\"\n form = PasswordResetForm()\n if form.validate_on_submit():\n current_user.password = crypto_manager.hash(form.password.data)\n try:\n correct = True\n db.session.commit()\n flash(_('Password updated correctly'), 'success')\n return redirect(url_for('admin.profile_edit'))\n except Exception:\n correct = False\n current_app.logger.exception('Failed to update user password')\n flash(_('Error updating password, contact an administrator'),\n 'error')\n return render_template('admin/profile/change_password.html',\n form=form)\n finally:\n if not correct:\n db.session.rollback()\n return render_template('admin/profile/change_password.html', form=form)\n", "step-4": "<mask token>\nfrom flask import current_app, flash, redirect, render_template, url_for\nfrom flask_babel import _\nfrom flask_login import current_user, fresh_login_required, login_required\nfrom sqlalchemy.exc import IntegrityError\nfrom akamatsu import crypto_manager, db\nfrom akamatsu.views.admin import bp_admin\nfrom akamatsu.forms import PasswordResetForm, ProfileForm\n\n\n@bp_admin.route('/profile', methods=['GET', 'POST'])\n@login_required\ndef profile_edit():\n \"\"\"Show user profile edition form.\"\"\"\n form = ProfileForm(obj=current_user)\n if form.validate_on_submit():\n form.populate_obj(current_user)\n try:\n correct = True\n db.session.commit()\n flash(_('Profile updated correctly'), 'success')\n return render_template('admin/profile/edit.html', form=form)\n except IntegrityError:\n correct = False\n form.errors.email.append(_('Email is already registered'))\n return render_template('admin/profile/edit.html', form=form)\n except Exception:\n correct = False\n flash(_('Failed to update profile, contact an administrator'),\n 'error')\n return render_template('admin/profile/edit.html', form=form)\n finally:\n if not correct:\n db.session.rollback()\n return render_template('admin/profile/edit.html', form=form)\n\n\n@bp_admin.route('/profile/change-password', methods=['GET', 'POST'])\n@fresh_login_required\ndef change_password():\n \"\"\"Show form to update user password.\n\n Requires confirming current password.\n \"\"\"\n form = PasswordResetForm()\n if form.validate_on_submit():\n current_user.password = crypto_manager.hash(form.password.data)\n try:\n correct = True\n db.session.commit()\n flash(_('Password updated correctly'), 'success')\n return redirect(url_for('admin.profile_edit'))\n except Exception:\n correct = False\n current_app.logger.exception('Failed to update user password')\n flash(_('Error updating password, contact an administrator'),\n 'error')\n return render_template('admin/profile/change_password.html',\n form=form)\n finally:\n if not correct:\n db.session.rollback()\n return render_template('admin/profile/change_password.html', form=form)\n", "step-5": "# -*- coding: utf-8 -*-\n#\n# Akamatsu CMS\n# https://github.com/rmed/akamatsu\n#\n# MIT License\n#\n# Copyright (c) 2020 Rafael Medina García <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"This module contains user profile views.\"\"\"\n\nfrom flask import current_app, flash, redirect, render_template, url_for\nfrom flask_babel import _\nfrom flask_login import current_user, fresh_login_required, login_required\nfrom sqlalchemy.exc import IntegrityError\n\nfrom akamatsu import crypto_manager, db\nfrom akamatsu.views.admin import bp_admin\nfrom akamatsu.forms import PasswordResetForm, ProfileForm\n\n\n@bp_admin.route('/profile', methods=['GET', 'POST'])\n@login_required\ndef profile_edit():\n \"\"\"Show user profile edition form.\"\"\"\n form = ProfileForm(obj=current_user)\n\n if form.validate_on_submit():\n form.populate_obj(current_user)\n\n try:\n correct = True\n db.session.commit()\n\n flash(_('Profile updated correctly'), 'success')\n\n return render_template('admin/profile/edit.html', form=form)\n\n except IntegrityError:\n # Email already exists\n correct = False\n form.errors.email.append(_('Email is already registered'))\n\n return render_template('admin/profile/edit.html', form=form)\n\n except Exception:\n # Catch anything unknown\n correct = False\n\n flash(_('Failed to update profile, contact an administrator'), 'error')\n\n return render_template('admin/profile/edit.html', form=form)\n\n finally:\n if not correct:\n db.session.rollback()\n\n return render_template('admin/profile/edit.html', form=form)\n\n\n@bp_admin.route('/profile/change-password', methods=['GET', 'POST'])\n@fresh_login_required\ndef change_password():\n \"\"\"Show form to update user password.\n\n Requires confirming current password.\n \"\"\"\n form = PasswordResetForm()\n\n if form.validate_on_submit():\n # Update user\n current_user.password = crypto_manager.hash(form.password.data)\n\n try:\n correct = True\n db.session.commit()\n\n flash(_('Password updated correctly'), 'success')\n\n return redirect(url_for('admin.profile_edit'))\n\n except Exception:\n correct = False\n current_app.logger.exception('Failed to update user password')\n\n flash(_('Error updating password, contact an administrator'), 'error')\n\n return render_template('admin/profile/change_password.html', form=form)\n\n finally:\n if not correct:\n db.session.rollback()\n\n return render_template('admin/profile/change_password.html', form=form)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/usr/bin/env python3 '''Глава 9. Распутываем Всемирную паутину''' '''1. Если вы еще не установили Flask, сделайте это сейчас. Это также установит werkzeug, jinja2 и, возможно, другие пакеты.''' # pip3 install flask print('\n================================ RESTART ================================\n') '''2. Создайте скелет сайта с помощью веб-сервера Flask. Убедитесь, что сервер начинает свою работу по адресу Localhost на стандартном порте 5000. Если ваш компьютер уже использует порт 5000 для чего-то еще, воспользуйтесь другим портом.''' '''from flask import Flask app = Flask(__name__) @app.route("/") def hello(): return "Hello World!" if __name__ == "__main__": app.run(port=5000, debug=True)''' print('\n================================ RESTART ================================\n') '''3. Добавьте функцию home() для обработки запросов к домашней странице. Пусть она возвращает строку It's alive!.''' '''from flask import Flask app = Flask(__name__) @app.route("/") def home(): return "It's alive!" if __name__ == "__main__": app.run(debug=True)''' print('\n================================ RESTART ================================\n') '''4. Создайте шаблон для jinja2, который называется home1.html и содержит следующий контент: <html> <head> <title>It's alive!</title> <body> I'm of course referring to {{thing}}, which is {{height}} feet tall and {{color}}. </body> </html>''' print('\n================================ RESTART ================================\n') '''5. Модифицируйте функцию home() вашего сервера, чтобы она использовала шаблон home1.html. Передайте ей три параметра для команды GET: thing, height и color.''' '''Перейдите в своем клиенте по следующему адресу: http://localhost:5000/?thing=Octothorpe&height=7&color=green''' from flask import Flask, render_template, request app = Flask(__name__) @app.route('/') def home(): thing = request.args.get('thing') height = request.args.get('height') color = request.args.get('color') return render_template('home1.html', thing=thing, height=height, color=color) if __name__ == "__main__": app.run(debug=True)
normal
{ "blob_id": "664f9d5aa981c3590043fae1d0c80441bda4fbb1", "index": 2499, "step-1": "<mask token>\n\n\[email protected]('/')\ndef home():\n thing = request.args.get('thing')\n height = request.args.get('height')\n color = request.args.get('color')\n return render_template('home1.html', thing=thing, height=height, color=\n color)\n\n\n<mask token>\n", "step-2": "<mask token>\nprint(\n \"\"\"\n================================ RESTART ================================\n\"\"\"\n )\n<mask token>\nprint(\n \"\"\"\n================================ RESTART ================================\n\"\"\"\n )\n<mask token>\nprint(\n \"\"\"\n================================ RESTART ================================\n\"\"\"\n )\n<mask token>\nprint(\n \"\"\"\n================================ RESTART ================================\n\"\"\"\n )\n<mask token>\n\n\[email protected]('/')\ndef home():\n thing = request.args.get('thing')\n height = request.args.get('height')\n color = request.args.get('color')\n return render_template('home1.html', thing=thing, height=height, color=\n color)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-3": "<mask token>\nprint(\n \"\"\"\n================================ RESTART ================================\n\"\"\"\n )\n<mask token>\nprint(\n \"\"\"\n================================ RESTART ================================\n\"\"\"\n )\n<mask token>\nprint(\n \"\"\"\n================================ RESTART ================================\n\"\"\"\n )\n<mask token>\nprint(\n \"\"\"\n================================ RESTART ================================\n\"\"\"\n )\n<mask token>\napp = Flask(__name__)\n\n\[email protected]('/')\ndef home():\n thing = request.args.get('thing')\n height = request.args.get('height')\n color = request.args.get('color')\n return render_template('home1.html', thing=thing, height=height, color=\n color)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-4": "<mask token>\nprint(\n \"\"\"\n================================ RESTART ================================\n\"\"\"\n )\n<mask token>\nprint(\n \"\"\"\n================================ RESTART ================================\n\"\"\"\n )\n<mask token>\nprint(\n \"\"\"\n================================ RESTART ================================\n\"\"\"\n )\n<mask token>\nprint(\n \"\"\"\n================================ RESTART ================================\n\"\"\"\n )\n<mask token>\nfrom flask import Flask, render_template, request\napp = Flask(__name__)\n\n\[email protected]('/')\ndef home():\n thing = request.args.get('thing')\n height = request.args.get('height')\n color = request.args.get('color')\n return render_template('home1.html', thing=thing, height=height, color=\n color)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-5": "#!/usr/bin/env python3\n\n'''Глава 9. Распутываем Всемирную паутину'''\n\n'''1. Если вы еще не установили Flask, сделайте это сейчас.\nЭто также установит werkzeug, jinja2 и, возможно, другие пакеты.'''\n\n# pip3 install flask\n\nprint('\\n================================ RESTART ================================\\n')\n\n'''2. Создайте скелет сайта с помощью веб-сервера Flask. \nУбедитесь, что сервер начинает свою работу по адресу Localhost на стандартном порте 5000. \nЕсли ваш компьютер уже использует порт 5000 для чего-то еще, воспользуйтесь другим портом.'''\n\n'''from flask import Flask\napp = Flask(__name__)\n\[email protected](\"/\")\ndef hello():\n return \"Hello World!\"\n\nif __name__ == \"__main__\":\n app.run(port=5000, debug=True)'''\n\nprint('\\n================================ RESTART ================================\\n')\n\n'''3. Добавьте функцию home() для обработки запросов к домашней странице. Пусть она возвращает строку It's alive!.'''\n\n'''from flask import Flask\napp = Flask(__name__)\n\[email protected](\"/\")\ndef home():\n return \"It's alive!\"\n\nif __name__ == \"__main__\":\n app.run(debug=True)'''\n\nprint('\\n================================ RESTART ================================\\n')\n\n'''4. Создайте шаблон для jinja2, который называется home1.html и содержит следующий контент:\n<html>\n<head>\n<title>It's alive!</title>\n<body>\nI'm of course referring to {{thing}}, which is {{height}} feet tall and {{color}}.\n</body>\n</html>'''\n\nprint('\\n================================ RESTART ================================\\n')\n\n'''5. Модифицируйте функцию home() вашего сервера, чтобы она использовала шаблон home1.html. \nПередайте ей три параметра для команды GET: thing, height и color.'''\n\n'''Перейдите в своем клиенте по следующему адресу:\nhttp://localhost:5000/?thing=Octothorpe&height=7&color=green'''\n\nfrom flask import Flask, render_template, request\napp = Flask(__name__)\n\[email protected]('/')\ndef home():\n thing = request.args.get('thing')\n height = request.args.get('height')\n color = request.args.get('color')\n return render_template('home1.html', thing=thing, height=height, color=color)\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import numpy as np import imutils import cv2 image = cv2.imread("D:\\Github\\python-opencv\\images\\trex.png") cv2.imshow("Original", image) cv2.waitKey(0) (h, w) = image.shape[:2] # get height and width of the image center = (w/2, h/2) # which point to rotate around M = cv2.getRotationMatrix2D(center, 45, 1.0) # rotation matrix rotated = cv2.warpAffine(image, M, (w, h)) # apply the rotation cv2. imshow("Rotated by 45 degrees", rotated) cv2.waitKey(0) M = cv2.getRotationMatrix2D(center, -90, 1.0) rotated = cv2.warpAffine(image, M, (w, h)) cv2.imshow("Rotated by -90 degrees", rotated) cv2.waitKey(0) rotated = imutils.rotate(image, 180) cv2.imshow("Rotated by 180", rotated) cv2.waitKey(0)
normal
{ "blob_id": "4462fec6e0edc25530c93ffeeae2372c86fef2cc", "index": 528, "step-1": "<mask token>\n", "step-2": "<mask token>\ncv2.imshow('Original', image)\ncv2.waitKey(0)\n<mask token>\ncv2.imshow('Rotated by 45 degrees', rotated)\ncv2.waitKey(0)\n<mask token>\ncv2.imshow('Rotated by -90 degrees', rotated)\ncv2.waitKey(0)\n<mask token>\ncv2.imshow('Rotated by 180', rotated)\ncv2.waitKey(0)\n", "step-3": "<mask token>\nimage = cv2.imread('D:\\\\Github\\\\python-opencv\\\\images\\\\trex.png')\ncv2.imshow('Original', image)\ncv2.waitKey(0)\nh, w = image.shape[:2]\ncenter = w / 2, h / 2\nM = cv2.getRotationMatrix2D(center, 45, 1.0)\nrotated = cv2.warpAffine(image, M, (w, h))\ncv2.imshow('Rotated by 45 degrees', rotated)\ncv2.waitKey(0)\nM = cv2.getRotationMatrix2D(center, -90, 1.0)\nrotated = cv2.warpAffine(image, M, (w, h))\ncv2.imshow('Rotated by -90 degrees', rotated)\ncv2.waitKey(0)\nrotated = imutils.rotate(image, 180)\ncv2.imshow('Rotated by 180', rotated)\ncv2.waitKey(0)\n", "step-4": "import numpy as np\nimport imutils\nimport cv2\nimage = cv2.imread('D:\\\\Github\\\\python-opencv\\\\images\\\\trex.png')\ncv2.imshow('Original', image)\ncv2.waitKey(0)\nh, w = image.shape[:2]\ncenter = w / 2, h / 2\nM = cv2.getRotationMatrix2D(center, 45, 1.0)\nrotated = cv2.warpAffine(image, M, (w, h))\ncv2.imshow('Rotated by 45 degrees', rotated)\ncv2.waitKey(0)\nM = cv2.getRotationMatrix2D(center, -90, 1.0)\nrotated = cv2.warpAffine(image, M, (w, h))\ncv2.imshow('Rotated by -90 degrees', rotated)\ncv2.waitKey(0)\nrotated = imutils.rotate(image, 180)\ncv2.imshow('Rotated by 180', rotated)\ncv2.waitKey(0)\n", "step-5": "import numpy as np\nimport imutils\nimport cv2\n\nimage = cv2.imread(\"D:\\\\Github\\\\python-opencv\\\\images\\\\trex.png\")\ncv2.imshow(\"Original\", image)\ncv2.waitKey(0)\n\n(h, w) = image.shape[:2] # get height and width of the image\ncenter = (w/2, h/2) # which point to rotate around\n\nM = cv2.getRotationMatrix2D(center, 45, 1.0) # rotation matrix\nrotated = cv2.warpAffine(image, M, (w, h)) # apply the rotation\ncv2. imshow(\"Rotated by 45 degrees\", rotated)\ncv2.waitKey(0)\n\nM = cv2.getRotationMatrix2D(center, -90, 1.0)\nrotated = cv2.warpAffine(image, M, (w, h))\ncv2.imshow(\"Rotated by -90 degrees\", rotated)\ncv2.waitKey(0)\n\nrotated = imutils.rotate(image, 180)\ncv2.imshow(\"Rotated by 180\", rotated)\ncv2.waitKey(0)", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from . import by_trips from . import by_slope
normal
{ "blob_id": "74fae3636b1c1b0b79d0c6bec8698581b063eb9c", "index": 8944, "step-1": "<mask token>\n", "step-2": "from . import by_trips\nfrom . import by_slope\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
''' Write the necessary code to display the area and perimeter of a rectangle that has a width of 2.4 and a height of 6.4. ''' x, y = 2.4, 6.4 perimeter = (x*2)+(y*2) area = x*y print("Perimeter is "+str(perimeter) + ", Area is " + str(area))
normal
{ "blob_id": "a7de079866d7ac80260b438043cf0403f598cebc", "index": 5091, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint('Perimeter is ' + str(perimeter) + ', Area is ' + str(area))\n", "step-3": "<mask token>\nx, y = 2.4, 6.4\nperimeter = x * 2 + y * 2\narea = x * y\nprint('Perimeter is ' + str(perimeter) + ', Area is ' + str(area))\n", "step-4": "'''\n\nWrite the necessary code to display the area and perimeter of a rectangle that has a width of 2.4 and a height of 6.4.\n\n'''\nx, y = 2.4, 6.4\nperimeter = (x*2)+(y*2)\narea = x*y\nprint(\"Perimeter is \"+str(perimeter) + \", Area is \" + str(area))", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import pyximport pyximport.install(build_in_temp=False,inplace=True) import Cython.Compiler.Options Cython.Compiler.Options.annotate = True import numpy as np from test1 import c_test,c_test_result_workaround a = np.ascontiguousarray(np.array([ [1,2,3],[1,2,3],[1,2,3] ], dtype=np.long), dtype=np.long) print '\nStart Value:\n',a a_transposed = a.T ai = a_transposed[0] i = ai[0] j = ai[1] k = ai[2] print '\nExpected Value:\n',[i,j,k] b = np.ascontiguousarray(np.empty((3,), dtype=np.long,order='C')) x = c_test(a,b) print '\nProblem Result:\n',np.asarray(x) y = c_test_result_workaround(a,b) print '\nWork-Around Result:\n',np.asarray(y)
normal
{ "blob_id": "0276181055f2c70562c1f557a16d00ba7107d003", "index": 1219, "step-1": "\n\nimport pyximport\npyximport.install(build_in_temp=False,inplace=True)\nimport Cython.Compiler.Options\nCython.Compiler.Options.annotate = True\nimport numpy as np\nfrom test1 import c_test,c_test_result_workaround\n\na = np.ascontiguousarray(np.array([ [1,2,3],[1,2,3],[1,2,3] ], dtype=np.long), dtype=np.long)\nprint '\\nStart Value:\\n',a\n\na_transposed = a.T\nai = a_transposed[0]\ni = ai[0]\nj = ai[1]\nk = ai[2]\nprint '\\nExpected Value:\\n',[i,j,k]\n\nb = np.ascontiguousarray(np.empty((3,), dtype=np.long,order='C'))\nx = c_test(a,b)\nprint '\\nProblem Result:\\n',np.asarray(x)\n\n\ny = c_test_result_workaround(a,b)\nprint '\\nWork-Around Result:\\n',np.asarray(y)", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import torch.nn as nn import torch from torch.distributions.categorical import Categorical import torch.nn.functional as F from torch.optim import Adam import gym import numpy as np Device = torch.device("cuda:0") class ActorCriticNet(nn.Module): def __init__(self, observation_space, action_space, hidden_sizes=[32,32], activation=nn.Tanh): super().__init__() obs_dim = observation_space.shape[0] action_dim = action_space.n self.base_net = nn.Sequential( nn.Linear(obs_dim, hidden_sizes[0]), # nn.Linear(hidden_sizes[0], hidden_sizes[1]), ) self.pi = nn.Linear(hidden_sizes[1], action_dim) self.vf = nn.Linear(hidden_sizes[1],1) self.to(Device) def forward(self, obs): obs = torch.Tensor(obs).to(Device) x = F.relu(self.base_net(obs)) action_logits = F.softmax(self.pi(x), dim=-1) value = self.vf(x) return action_logits, value class Agent(object): def __init__(self, model=None, lr=1e-2, gamma=0.99): self.gamma = gamma self.AC = model self.optimizer = Adam(AC.parameters(), lr=lr) self.logp_as = [] self.values = [] self.rewards = [] def choose_action(self, obs): action_logits, value = self.AC(obs) distribution = Categorical(action_logits) action = distribution.sample() self.logp_as.append(distribution.log_prob(action)) self.values.append(value) return action.item() def learn(self): R = 0 policy_losses = [] value_losses = [] returns = [] for r in self.rewards[::-1]: R = r + self.gamma * R returns.insert(0, R) returns = torch.tensor(returns).to(Device) returns = (returns - returns.mean()) / (returns.std() + 0.00001) for logp_a, value, R in zip(self.logp_as, self.values, returns): advantage = R - value.item() # calculate actor (policy) loss policy_losses.append(-logp_a * advantage) # calculate critic (value) loss using L1 smooth loss value_losses.append(F.smooth_l1_loss(value, torch.tensor([R]).to(Device))) self.optimizer.zero_grad() loss = torch.stack(policy_losses).sum() + torch.stack(value_losses).sum() loss.backward(retain_graph=True) self.optimizer.step() self.rewards = [] self.values = [] self.logp_as = [] # Build env env = gym.make('CartPole-v1') state = env.reset() # Learning setting lr = 3e-2 EPISODES=30000 GAMMA = 0.99 hidden_sizes = [128,128] show_every = 100 AC = ActorCriticNet(env.observation_space, env.action_space, hidden_sizes) agent = Agent(AC, lr=lr, gamma=GAMMA) for episode in range(EPISODES): # For every episode init done = False obs = env.reset() I = 1 T = 0 # Logs episode_reward = 0 running_reward = 0 if episode % show_every == 0: is_render = True else: is_render = False while not done: # Render if is_render: env.render("human") # Predict action and value action = agent.choose_action(obs) # Step the env next_obs, reward, done, _ = env.step(action) # Update obs obs = next_obs agent.rewards.append(reward) T += 1 # Logs episode_reward += reward # Learn once agent.learn() # Update cumulative reward running_reward = 0.05 * episode_reward + (1 - 0.05) * running_reward print(f"episode_{episode} \t ep_reward = {episode_reward} \t ep_len = {T}") if running_reward > env.spec.reward_threshold: print("Solved! Running reward is now {} and " "the last episode runs to {} time steps!".format(running_reward, T)) break
normal
{ "blob_id": "e1ab4b034c949b8158c6ccc1e8e3f4a960a38c72", "index": 4382, "step-1": "<mask token>\n\n\nclass Agent(object):\n\n def __init__(self, model=None, lr=0.01, gamma=0.99):\n self.gamma = gamma\n self.AC = model\n self.optimizer = Adam(AC.parameters(), lr=lr)\n self.logp_as = []\n self.values = []\n self.rewards = []\n\n def choose_action(self, obs):\n action_logits, value = self.AC(obs)\n distribution = Categorical(action_logits)\n action = distribution.sample()\n self.logp_as.append(distribution.log_prob(action))\n self.values.append(value)\n return action.item()\n\n def learn(self):\n R = 0\n policy_losses = []\n value_losses = []\n returns = []\n for r in self.rewards[::-1]:\n R = r + self.gamma * R\n returns.insert(0, R)\n returns = torch.tensor(returns).to(Device)\n returns = (returns - returns.mean()) / (returns.std() + 1e-05)\n for logp_a, value, R in zip(self.logp_as, self.values, returns):\n advantage = R - value.item()\n policy_losses.append(-logp_a * advantage)\n value_losses.append(F.smooth_l1_loss(value, torch.tensor([R]).\n to(Device)))\n self.optimizer.zero_grad()\n loss = torch.stack(policy_losses).sum() + torch.stack(value_losses\n ).sum()\n loss.backward(retain_graph=True)\n self.optimizer.step()\n self.rewards = []\n self.values = []\n self.logp_as = []\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass ActorCriticNet(nn.Module):\n\n def __init__(self, observation_space, action_space, hidden_sizes=[32, \n 32], activation=nn.Tanh):\n super().__init__()\n obs_dim = observation_space.shape[0]\n action_dim = action_space.n\n self.base_net = nn.Sequential(nn.Linear(obs_dim, hidden_sizes[0]))\n self.pi = nn.Linear(hidden_sizes[1], action_dim)\n self.vf = nn.Linear(hidden_sizes[1], 1)\n self.to(Device)\n\n def forward(self, obs):\n obs = torch.Tensor(obs).to(Device)\n x = F.relu(self.base_net(obs))\n action_logits = F.softmax(self.pi(x), dim=-1)\n value = self.vf(x)\n return action_logits, value\n\n\nclass Agent(object):\n\n def __init__(self, model=None, lr=0.01, gamma=0.99):\n self.gamma = gamma\n self.AC = model\n self.optimizer = Adam(AC.parameters(), lr=lr)\n self.logp_as = []\n self.values = []\n self.rewards = []\n\n def choose_action(self, obs):\n action_logits, value = self.AC(obs)\n distribution = Categorical(action_logits)\n action = distribution.sample()\n self.logp_as.append(distribution.log_prob(action))\n self.values.append(value)\n return action.item()\n\n def learn(self):\n R = 0\n policy_losses = []\n value_losses = []\n returns = []\n for r in self.rewards[::-1]:\n R = r + self.gamma * R\n returns.insert(0, R)\n returns = torch.tensor(returns).to(Device)\n returns = (returns - returns.mean()) / (returns.std() + 1e-05)\n for logp_a, value, R in zip(self.logp_as, self.values, returns):\n advantage = R - value.item()\n policy_losses.append(-logp_a * advantage)\n value_losses.append(F.smooth_l1_loss(value, torch.tensor([R]).\n to(Device)))\n self.optimizer.zero_grad()\n loss = torch.stack(policy_losses).sum() + torch.stack(value_losses\n ).sum()\n loss.backward(retain_graph=True)\n self.optimizer.step()\n self.rewards = []\n self.values = []\n self.logp_as = []\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass ActorCriticNet(nn.Module):\n\n def __init__(self, observation_space, action_space, hidden_sizes=[32, \n 32], activation=nn.Tanh):\n super().__init__()\n obs_dim = observation_space.shape[0]\n action_dim = action_space.n\n self.base_net = nn.Sequential(nn.Linear(obs_dim, hidden_sizes[0]))\n self.pi = nn.Linear(hidden_sizes[1], action_dim)\n self.vf = nn.Linear(hidden_sizes[1], 1)\n self.to(Device)\n\n def forward(self, obs):\n obs = torch.Tensor(obs).to(Device)\n x = F.relu(self.base_net(obs))\n action_logits = F.softmax(self.pi(x), dim=-1)\n value = self.vf(x)\n return action_logits, value\n\n\nclass Agent(object):\n\n def __init__(self, model=None, lr=0.01, gamma=0.99):\n self.gamma = gamma\n self.AC = model\n self.optimizer = Adam(AC.parameters(), lr=lr)\n self.logp_as = []\n self.values = []\n self.rewards = []\n\n def choose_action(self, obs):\n action_logits, value = self.AC(obs)\n distribution = Categorical(action_logits)\n action = distribution.sample()\n self.logp_as.append(distribution.log_prob(action))\n self.values.append(value)\n return action.item()\n\n def learn(self):\n R = 0\n policy_losses = []\n value_losses = []\n returns = []\n for r in self.rewards[::-1]:\n R = r + self.gamma * R\n returns.insert(0, R)\n returns = torch.tensor(returns).to(Device)\n returns = (returns - returns.mean()) / (returns.std() + 1e-05)\n for logp_a, value, R in zip(self.logp_as, self.values, returns):\n advantage = R - value.item()\n policy_losses.append(-logp_a * advantage)\n value_losses.append(F.smooth_l1_loss(value, torch.tensor([R]).\n to(Device)))\n self.optimizer.zero_grad()\n loss = torch.stack(policy_losses).sum() + torch.stack(value_losses\n ).sum()\n loss.backward(retain_graph=True)\n self.optimizer.step()\n self.rewards = []\n self.values = []\n self.logp_as = []\n\n\n<mask token>\nfor episode in range(EPISODES):\n done = False\n obs = env.reset()\n I = 1\n T = 0\n episode_reward = 0\n running_reward = 0\n if episode % show_every == 0:\n is_render = True\n else:\n is_render = False\n while not done:\n if is_render:\n env.render('human')\n action = agent.choose_action(obs)\n next_obs, reward, done, _ = env.step(action)\n obs = next_obs\n agent.rewards.append(reward)\n T += 1\n episode_reward += reward\n agent.learn()\n running_reward = 0.05 * episode_reward + (1 - 0.05) * running_reward\n print(f'episode_{episode} \\t ep_reward = {episode_reward} \\t ep_len = {T}')\n if running_reward > env.spec.reward_threshold:\n print(\n 'Solved! Running reward is now {} and the last episode runs to {} time steps!'\n .format(running_reward, T))\n break\n", "step-4": "import torch.nn as nn\nimport torch\nfrom torch.distributions.categorical import Categorical\nimport torch.nn.functional as F\nfrom torch.optim import Adam\nimport gym\nimport numpy as np\nDevice = torch.device('cuda:0')\n\n\nclass ActorCriticNet(nn.Module):\n\n def __init__(self, observation_space, action_space, hidden_sizes=[32, \n 32], activation=nn.Tanh):\n super().__init__()\n obs_dim = observation_space.shape[0]\n action_dim = action_space.n\n self.base_net = nn.Sequential(nn.Linear(obs_dim, hidden_sizes[0]))\n self.pi = nn.Linear(hidden_sizes[1], action_dim)\n self.vf = nn.Linear(hidden_sizes[1], 1)\n self.to(Device)\n\n def forward(self, obs):\n obs = torch.Tensor(obs).to(Device)\n x = F.relu(self.base_net(obs))\n action_logits = F.softmax(self.pi(x), dim=-1)\n value = self.vf(x)\n return action_logits, value\n\n\nclass Agent(object):\n\n def __init__(self, model=None, lr=0.01, gamma=0.99):\n self.gamma = gamma\n self.AC = model\n self.optimizer = Adam(AC.parameters(), lr=lr)\n self.logp_as = []\n self.values = []\n self.rewards = []\n\n def choose_action(self, obs):\n action_logits, value = self.AC(obs)\n distribution = Categorical(action_logits)\n action = distribution.sample()\n self.logp_as.append(distribution.log_prob(action))\n self.values.append(value)\n return action.item()\n\n def learn(self):\n R = 0\n policy_losses = []\n value_losses = []\n returns = []\n for r in self.rewards[::-1]:\n R = r + self.gamma * R\n returns.insert(0, R)\n returns = torch.tensor(returns).to(Device)\n returns = (returns - returns.mean()) / (returns.std() + 1e-05)\n for logp_a, value, R in zip(self.logp_as, self.values, returns):\n advantage = R - value.item()\n policy_losses.append(-logp_a * advantage)\n value_losses.append(F.smooth_l1_loss(value, torch.tensor([R]).\n to(Device)))\n self.optimizer.zero_grad()\n loss = torch.stack(policy_losses).sum() + torch.stack(value_losses\n ).sum()\n loss.backward(retain_graph=True)\n self.optimizer.step()\n self.rewards = []\n self.values = []\n self.logp_as = []\n\n\nenv = gym.make('CartPole-v1')\nstate = env.reset()\nlr = 0.03\nEPISODES = 30000\nGAMMA = 0.99\nhidden_sizes = [128, 128]\nshow_every = 100\nAC = ActorCriticNet(env.observation_space, env.action_space, hidden_sizes)\nagent = Agent(AC, lr=lr, gamma=GAMMA)\nfor episode in range(EPISODES):\n done = False\n obs = env.reset()\n I = 1\n T = 0\n episode_reward = 0\n running_reward = 0\n if episode % show_every == 0:\n is_render = True\n else:\n is_render = False\n while not done:\n if is_render:\n env.render('human')\n action = agent.choose_action(obs)\n next_obs, reward, done, _ = env.step(action)\n obs = next_obs\n agent.rewards.append(reward)\n T += 1\n episode_reward += reward\n agent.learn()\n running_reward = 0.05 * episode_reward + (1 - 0.05) * running_reward\n print(f'episode_{episode} \\t ep_reward = {episode_reward} \\t ep_len = {T}')\n if running_reward > env.spec.reward_threshold:\n print(\n 'Solved! Running reward is now {} and the last episode runs to {} time steps!'\n .format(running_reward, T))\n break\n", "step-5": "import torch.nn as nn\nimport torch\nfrom torch.distributions.categorical import Categorical\nimport torch.nn.functional as F\nfrom torch.optim import Adam\n\nimport gym\nimport numpy as np\n\nDevice = torch.device(\"cuda:0\")\n\nclass ActorCriticNet(nn.Module):\n def __init__(self, observation_space, action_space,\n hidden_sizes=[32,32], activation=nn.Tanh):\n super().__init__()\n\n obs_dim = observation_space.shape[0]\n action_dim = action_space.n\n self.base_net = nn.Sequential(\n nn.Linear(obs_dim, hidden_sizes[0]),\n # nn.Linear(hidden_sizes[0], hidden_sizes[1]),\n )\n self.pi = nn.Linear(hidden_sizes[1], action_dim)\n self.vf = nn.Linear(hidden_sizes[1],1)\n self.to(Device)\n \n def forward(self, obs):\n obs = torch.Tensor(obs).to(Device)\n x = F.relu(self.base_net(obs))\n action_logits = F.softmax(self.pi(x), dim=-1)\n value = self.vf(x)\n return action_logits, value\n\nclass Agent(object):\n def __init__(self, model=None, lr=1e-2, gamma=0.99):\n self.gamma = gamma\n self.AC = model\n self.optimizer = Adam(AC.parameters(), lr=lr)\n self.logp_as = []\n self.values = []\n self.rewards = []\n\n def choose_action(self, obs):\n action_logits, value = self.AC(obs)\n distribution = Categorical(action_logits)\n action = distribution.sample()\n self.logp_as.append(distribution.log_prob(action))\n self.values.append(value)\n return action.item()\n \n def learn(self):\n\n R = 0\n policy_losses = []\n value_losses = []\n returns = []\n\n for r in self.rewards[::-1]:\n R = r + self.gamma * R\n returns.insert(0, R)\n returns = torch.tensor(returns).to(Device)\n returns = (returns - returns.mean()) / (returns.std() + 0.00001)\n\n for logp_a, value, R in zip(self.logp_as, self.values, returns):\n advantage = R - value.item()\n # calculate actor (policy) loss \n policy_losses.append(-logp_a * advantage)\n # calculate critic (value) loss using L1 smooth loss\n value_losses.append(F.smooth_l1_loss(value, torch.tensor([R]).to(Device)))\n\n self.optimizer.zero_grad()\n loss = torch.stack(policy_losses).sum() + torch.stack(value_losses).sum()\n loss.backward(retain_graph=True)\n self.optimizer.step()\n\n self.rewards = []\n self.values = []\n self.logp_as = []\n \n\n# Build env\nenv = gym.make('CartPole-v1')\nstate = env.reset()\n\n# Learning setting\nlr = 3e-2\nEPISODES=30000\nGAMMA = 0.99\nhidden_sizes = [128,128]\nshow_every = 100\n\nAC = ActorCriticNet(env.observation_space, env.action_space, hidden_sizes)\nagent = Agent(AC, lr=lr, gamma=GAMMA)\n\nfor episode in range(EPISODES):\n # For every episode init\n done = False\n obs = env.reset()\n I = 1\n T = 0\n\n # Logs\n episode_reward = 0\n running_reward = 0\n if episode % show_every == 0:\n is_render = True\n else:\n is_render = False\n\n while not done:\n # Render\n if is_render:\n env.render(\"human\")\n \n # Predict action and value\n action = agent.choose_action(obs)\n\n # Step the env\n next_obs, reward, done, _ = env.step(action)\n\n # Update obs\n obs = next_obs\n agent.rewards.append(reward)\n T += 1\n\n # Logs\n episode_reward += reward\n \n # Learn once\n agent.learn()\n\n # Update cumulative reward\n running_reward = 0.05 * episode_reward + (1 - 0.05) * running_reward\n \n print(f\"episode_{episode} \\t ep_reward = {episode_reward} \\t ep_len = {T}\")\n if running_reward > env.spec.reward_threshold:\n print(\"Solved! Running reward is now {} and \"\n \"the last episode runs to {} time steps!\".format(running_reward, T))\n break\n", "step-ids": [ 4, 7, 8, 10, 11 ] }
[ 4, 7, 8, 10, 11 ]
import os import json from threading import Thread import time from time import sleep from flask import Flask, json, render_template, request import redis from collections import OrderedDict import requests from Queue import Queue REGISTRAR_URL = 'http://cuteparty-registrar1.cfapps.io/update' app = Flask(__name__) port = int(os.getenv("PORT")) vcap = json.loads(os.environ['VCAP_SERVICES']) svc = vcap['rediscloud'][0]['credentials'] db = redis.StrictRedis(host=svc["hostname"], port=svc["port"], password=svc["password"],db=0) application_name = json.loads(os.environ['VCAP_APPLICATION'])['application_name'] class Producer(Thread): """ Background thread for fetching instance info """ def __init__(self,queue): """ Constructor """ Thread.__init__(self) self.queue = queue def run(self): """ This is the run implementation of the background thread , which fetchs the instaces info. """ while True : try: instance_id = os.getenv("CF_INSTANCE_INDEX") mydict = db.hgetall(application_name) if instance_id not in mydict : self.queue.put(instance_id) except : pass finally: pass class Consumer(Thread): """ Backgrdound thread for fetching from Queue and updating redis """ def __init__(self,queue): """ Constrcutor """ Thread.__init__(self) self.queue = queue def run(self): """ Run method for background thread which updates redis """ while True : try : instance_id = self.queue.get() db.hset(application_name,instance_id,1) except: pass finally: pass class MasterUpdater(Thread): """ This background thread will update the aggregator/registrar app at provided url """ def __init__(self,db,appname): """ Constructor """ Thread.__init__(self) self.db = db self.appname = appname def run(self): """ Run implementation of background thread which updates the aggregator """ while True : try: appinfo = self.db.hgetall(self.appname) appinfo_str = json.dumps(appinfo) data = {'applicationname':self.appname,'appinfo':appinfo_str} response = requests.post(REGISTRAR_URL, data=data) time.sleep(2) except : pass def init_workers(): """ This method is for starting all worker threads. We are using three workers right now . 1. One for fetching latest instances info and adds to Queue 2. One for fetching from Queue and updating Redis 3. For updating the aggregator app , about this applications info. All are deamon threads. """ party_queue = Queue() p = Producer(party_queue) p.daemon = True c = Consumer(party_queue) c.deamon= True m = MasterUpdater(db,application_name) m.deamon = True p.start() c.start() m.start() @app.route('/addthread') def addthread(): """ This endpoint is for adding threads to the application. Loadbalancer decids to go for which instances and based on that thread is added to it. """ instance_id = os.getenv("CF_INSTANCE_INDEX") print 'Instance Id ****************%s'%instance_id thread_count = int(db.hget(application_name,instance_id)) thread_count+=1 print 'Threadcount ****************%s'%thread_count result = db.hset(application_name,str(instance_id),str(thread_count)) print 'HSET result %s'%result print db.hgetall(application_name) return json.dumps({'message':'success'}) @app.route('/deletethread') def deletethread(): """ This endpoint is for deleting threads to the application. Loadbalancer decids to go for which instances and based on that thread is deleted from it. """ instance_id = os.getenv("CF_INSTANCE_INDEX") print 'Instance Id **************%s'%instance_id thread_count = int(db.hget(application_name,instance_id)) thread_count-=1 db.hset(application_name,instance_id,thread_count) return json.dumps({'message':'success'}) @app.route('/instances') def instances(): """ This will list out all the instances and threads per application. An application can see only it's threads and instances. """ mydict = db.hgetall(application_name) ordered = OrderedDict() for key in sorted(mydict): ordered.__setitem__(key,mydict.get(key)) mylist = [] return render_template('robots.html', mydict=ordered) @app.route('/') def index(): """ Main entry point """ return render_template('index.html') if __name__ == "__main__": init_workers() app.run(host='0.0.0.0', port=port, debug=True)
normal
{ "blob_id": "b976dab3c621bb929eb488fa7f4394666efec2ed", "index": 4410, "step-1": "import os\nimport json\nfrom threading import Thread\nimport time\nfrom time import sleep\nfrom flask import Flask, json, render_template, request\nimport redis\nfrom collections import OrderedDict\nimport requests\n\nfrom Queue import Queue\n\nREGISTRAR_URL = 'http://cuteparty-registrar1.cfapps.io/update'\n\napp = Flask(__name__)\nport = int(os.getenv(\"PORT\"))\nvcap = json.loads(os.environ['VCAP_SERVICES'])\nsvc = vcap['rediscloud'][0]['credentials']\n\ndb = redis.StrictRedis(host=svc[\"hostname\"], port=svc[\"port\"], password=svc[\"password\"],db=0)\n\napplication_name = json.loads(os.environ['VCAP_APPLICATION'])['application_name']\n\nclass Producer(Thread):\n \"\"\"\n Background thread for fetching instance info\n \"\"\"\n def __init__(self,queue):\n \"\"\"\n Constructor \n \"\"\"\n Thread.__init__(self)\n self.queue = queue \n def run(self):\n \"\"\"\n This is the run implementation of the background thread , which fetchs the instaces info.\n \"\"\"\n while True :\n try:\n instance_id = os.getenv(\"CF_INSTANCE_INDEX\")\n mydict = db.hgetall(application_name)\n if instance_id not in mydict :\n self.queue.put(instance_id)\n except :\n pass\n finally:\n pass\nclass Consumer(Thread):\n \"\"\"\n Backgrdound thread for fetching from Queue and updating redis\n \"\"\"\n def __init__(self,queue):\n \"\"\"\n Constrcutor\n \"\"\"\n Thread.__init__(self)\n self.queue = queue\n \n def run(self):\n \"\"\"\n Run method for background thread which updates redis\n \"\"\"\n while True :\n try :\n instance_id = self.queue.get()\n db.hset(application_name,instance_id,1)\n except:\n pass\n finally:\n pass\n \nclass MasterUpdater(Thread):\n \"\"\"\n This background thread will update the aggregator/registrar app at provided url\n \"\"\"\n def __init__(self,db,appname):\n \"\"\"\n Constructor\n \"\"\"\n Thread.__init__(self)\n self.db = db\n self.appname = appname\n def run(self):\n \"\"\"\n Run implementation of background thread which updates the aggregator\n \"\"\"\n while True :\n try:\n appinfo = self.db.hgetall(self.appname)\n appinfo_str = json.dumps(appinfo)\n data = {'applicationname':self.appname,'appinfo':appinfo_str}\n response = requests.post(REGISTRAR_URL, data=data)\n time.sleep(2)\n except :\n pass\ndef init_workers():\n \"\"\"\n This method is for starting all worker threads.\n We are using three workers right now .\n 1. One for fetching latest instances info and adds to Queue\n 2. One for fetching from Queue and updating Redis\n 3. For updating the aggregator app , about this applications info.\n All are deamon threads.\n \"\"\"\n party_queue = Queue()\n p = Producer(party_queue)\n p.daemon = True\n c = Consumer(party_queue)\n c.deamon= True\n m = MasterUpdater(db,application_name)\n m.deamon = True\n p.start()\n c.start()\n m.start()\n \n\[email protected]('/addthread')\ndef addthread():\n \"\"\"\n This endpoint is for adding threads to the application.\n Loadbalancer decids to go for which instances and based on that thread is added to it. \n \"\"\"\n instance_id = os.getenv(\"CF_INSTANCE_INDEX\")\n print 'Instance Id ****************%s'%instance_id\n thread_count = int(db.hget(application_name,instance_id))\n thread_count+=1\n print 'Threadcount ****************%s'%thread_count\n result = db.hset(application_name,str(instance_id),str(thread_count))\n print 'HSET result %s'%result\n print db.hgetall(application_name)\n return json.dumps({'message':'success'})\[email protected]('/deletethread')\ndef deletethread():\n \"\"\"\n This endpoint is for deleting threads to the application.\n Loadbalancer decids to go for which instances and based on that thread is deleted from it. \n \"\"\"\n instance_id = os.getenv(\"CF_INSTANCE_INDEX\") \n print 'Instance Id **************%s'%instance_id\n thread_count = int(db.hget(application_name,instance_id))\n thread_count-=1\n db.hset(application_name,instance_id,thread_count)\n \n return json.dumps({'message':'success'})\n\n\[email protected]('/instances')\ndef instances():\n \"\"\"\n This will list out all the instances and threads per application.\n An application can see only it's threads and instances. \n \"\"\"\n mydict = db.hgetall(application_name)\n ordered = OrderedDict()\n for key in sorted(mydict):\n ordered.__setitem__(key,mydict.get(key))\n mylist = []\n return render_template('robots.html', mydict=ordered)\n\n\[email protected]('/')\ndef index():\n \"\"\"\n Main entry point\n \"\"\"\n return render_template('index.html')\n\nif __name__ == \"__main__\":\n init_workers()\n app.run(host='0.0.0.0', port=port, debug=True)\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
#颜色选择对话框 import tkinter import tkinter.colorchooser root = tkinter.Tk() root.minsize(300,300) #添加颜色选择按钮 def select(): #打开颜色选择器 result = tkinter.colorchooser.askcolor(title = '内裤颜色种类',initialcolor = 'purple') print(result) #改变按钮颜色 btn1['bg'] = result[1] btn1 = tkinter.Button(root,text = '请选择你的内裤颜色',command = select) btn1.pack() root.mainloop()
normal
{ "blob_id": "dc261b29c1c11bb8449ff20a7f2fd120bef9efca", "index": 6090, "step-1": "<mask token>\n\n\ndef select():\n result = tkinter.colorchooser.askcolor(title='内裤颜色种类', initialcolor=\n 'purple')\n print(result)\n btn1['bg'] = result[1]\n\n\n<mask token>\n", "step-2": "<mask token>\nroot.minsize(300, 300)\n\n\ndef select():\n result = tkinter.colorchooser.askcolor(title='内裤颜色种类', initialcolor=\n 'purple')\n print(result)\n btn1['bg'] = result[1]\n\n\n<mask token>\nbtn1.pack()\nroot.mainloop()\n", "step-3": "<mask token>\nroot = tkinter.Tk()\nroot.minsize(300, 300)\n\n\ndef select():\n result = tkinter.colorchooser.askcolor(title='内裤颜色种类', initialcolor=\n 'purple')\n print(result)\n btn1['bg'] = result[1]\n\n\nbtn1 = tkinter.Button(root, text='请选择你的内裤颜色', command=select)\nbtn1.pack()\nroot.mainloop()\n", "step-4": "import tkinter\nimport tkinter.colorchooser\nroot = tkinter.Tk()\nroot.minsize(300, 300)\n\n\ndef select():\n result = tkinter.colorchooser.askcolor(title='内裤颜色种类', initialcolor=\n 'purple')\n print(result)\n btn1['bg'] = result[1]\n\n\nbtn1 = tkinter.Button(root, text='请选择你的内裤颜色', command=select)\nbtn1.pack()\nroot.mainloop()\n", "step-5": "#颜色选择对话框\nimport tkinter\nimport tkinter.colorchooser\n\nroot = tkinter.Tk()\nroot.minsize(300,300)\n\n#添加颜色选择按钮\ndef select():\n #打开颜色选择器\n result = tkinter.colorchooser.askcolor(title = '内裤颜色种类',initialcolor = 'purple')\n print(result)\n #改变按钮颜色\n btn1['bg'] = result[1]\n\nbtn1 = tkinter.Button(root,text = '请选择你的内裤颜色',command = select)\nbtn1.pack()\n\n\n\n\nroot.mainloop()", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
from __future__ import print_function, division import os from os.path import exists, join, basename, dirname from os import makedirs import numpy as np import datetime import time import argparse import torch import torch.nn as nn import torch.optim as optim from lib.dataloader import DataLoader from lib.im_pair_dataset import ImagePairDataset from lib.normalization import NormalizeImageDict from lib.torch_util import save_checkpoint from lib.torch_util import BatchTensorToVars from lib.eval_util_dynamic import pfdataset_pck, pfpascal_val_dataloader # import DCCNet from models.model_dynamic import DCCNet from models.loss_dynamic import weak_loss # Seed and CUDA use_cuda = torch.cuda.is_available() torch.manual_seed(1) if use_cuda: torch.cuda.manual_seed(1) np.random.seed(1) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False print('DCCNet training script') # Argument parsing parser = argparse.ArgumentParser(description='Compute PF Pascal matches') parser.add_argument('--checkpoint', type=str, default='') parser.add_argument('--image_size', type=int, default=400) parser.add_argument('--dataset_image_path', type=str, default='datasets/pf-pascal/', help='path to PF Pascal dataset') parser.add_argument('--dataset_csv_path', type=str, default='datasets/pf-pascal/image_pairs/', help='path to PF Pascal training csv') parser.add_argument('--num_epochs', type=int, default=5, help='number of training epochs') parser.add_argument('--batch_size', type=int, default=16, help='training batch size') parser.add_argument('--lr', type=float, default=0.0005, help='learning rate') parser.add_argument('--result_model_fn', type=str, default='checkpoint_adam', help='trained model filename') parser.add_argument('--result-model-dir', type=str, default='../model/checkpoints', help='path to trained models folder') parser.add_argument('--fe_finetune_params', type=int, default=0, help='number of layers to finetune') parser.add_argument('--exp_name', type=str, default='exp_delete', help='experiment name') # DCCNet args parser.add_argument('--ncons_kernel_sizes', nargs='+', type=int, default=[5,5,5], help='kernels sizes in neigh. cons.') parser.add_argument('--ncons_channels', nargs='+', type=int, default=[16,16,1], help='channels in neigh. cons') parser.add_argument('--sce_kernel_size',type=int,default=25,help='kernel size in sce.') parser.add_argument('--sce_hidden_dim',type=int,default=1024,help='hidden dim in sce') parser.add_argument('--scaleloss_weight',type=float,default=1.0,help='whether use scale loss, if use the weight for scale loss') parser.add_argument('--att_scale_ncons_kernel_sizes', nargs='+', type=int, default=[5,5,5], help='kernels sizes in dynamic fusion net.') parser.add_argument('--att_scale_ncons_channels', nargs='+', type=int, default=[16,16,1], help='channels in dynamic fusion net') args = parser.parse_args() print(args) # Create model print('Creating CNN model...') model = DCCNet(use_cuda=use_cuda, checkpoint=args.checkpoint, ncons_kernel_sizes=args.ncons_kernel_sizes, ncons_channels=args.ncons_channels, sce_kernel_size=args.sce_kernel_size, sce_hidden_dim=args.sce_hidden_dim, att_scale_ncons_kernel_sizes=args.att_scale_ncons_kernel_sizes, att_scale_ncons_channels=args.att_scale_ncons_channels, ) #Multi-GPU support model = nn.DataParallel(model) # Set which parts of the model to train if args.fe_finetune_params>0: for i in range(args.fe_finetune_params): for p in model.module.FeatureExtraction.model[-1][-(i+1)].parameters(): p.requires_grad=True print('Trainable parameters:') count = 0 for i,param in enumerate(model.named_parameters()): name,p = param if p.requires_grad: count+=1 print(str(count)+": "+name+"\t"+str(p.shape)+"\t") print(model) # Optimizer print('using Adam optimizer') optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr) cnn_image_size=(args.image_size,args.image_size) Dataset = ImagePairDataset train_csv = 'train_pairs.csv' #val_pairs_nocoords.csv: for compute loss, with flip column in csv, no coordinates #val_pairs.csv: for compute pck, with coordinates val_nocoordinates_csv = 'val_pairs_nocoords.csv' val_csv = 'image_pairs/val_pairs.csv' normalization_tnf = NormalizeImageDict(['source_image','target_image']) batch_preprocessing_fn = BatchTensorToVars(use_cuda=use_cuda) # Dataset and dataloader dataset = Dataset(transform=normalization_tnf, dataset_image_path=args.dataset_image_path, dataset_csv_path=args.dataset_csv_path, dataset_csv_file = train_csv, output_size=cnn_image_size, ) dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True, num_workers=0) dataset_val = Dataset(transform=normalization_tnf, dataset_image_path=args.dataset_image_path, dataset_csv_path=args.dataset_csv_path, dataset_csv_file=val_nocoordinates_csv, output_size=cnn_image_size) # compute val loss dataloader_val = DataLoader(dataset_val, batch_size=args.batch_size, shuffle=True, num_workers=4) # compute val pck dataloader_val_pck = pfpascal_val_dataloader(image_size=args.image_size, eval_dataset_path=args.dataset_image_path, csv_file=val_csv) #load pfpascal val dataset # Define checkpoint name checkpoint_dir = os.path.join(args.result_model_dir,args.exp_name) checkpoint_name = os.path.join(args.result_model_dir,args.exp_name, datetime.datetime.now().strftime("%Y-%m-%d_%H:%M")+'_'+args.result_model_fn + '.pth.tar') log_name = os.path.join(args.result_model_dir,args.exp_name, 'logmain_'+args.exp_name+'.txt') if not exists(dirname(log_name)): makedirs(dirname(log_name)) print('Checkpoint name: '+checkpoint_name) # Train best_val_pck = float("-inf") loss_fn = lambda model,batch: weak_loss(model, batch, normalization='softmax', scaleloss_weight=args.scaleloss_weight) # define epoch function def process_epoch(mode,epoch,model,loss_fn,optimizer,dataloader,batch_preprocessing_fn,use_cuda=True,log_interval=50): epoch_loss = 0 for batch_idx, batch in enumerate(dataloader): st = time.time() if mode=='train': optimizer.zero_grad() tnf_batch = batch_preprocessing_fn(batch) loss = loss_fn(model,tnf_batch) loss_np = loss.data.cpu().numpy()[0] #loss_np = loss.data.cpu().numpy() epoch_loss += loss_np if mode=='train': loss.backward() optimizer.step() else: loss=None if batch_idx % log_interval == 0: print(mode.capitalize()+' Epoch: {} [{}/{} ({:.0f}%)]\t\tLoss: {:.12f}\t\tcost time: {:.1f}'.format( epoch, batch_idx , len(dataloader), 100. * batch_idx / len(dataloader), loss_np,time.time()-st)) epoch_loss /= len(dataloader) print(mode.capitalize()+' set: Average loss: {:.12f}'.format(epoch_loss)) return epoch_loss train_loss = np.zeros(args.num_epochs) val_loss = np.zeros(args.num_epochs) val_pcks = np.zeros(args.num_epochs) model.module.FeatureExtraction.eval() print('Starting training...') for epoch in range(1, args.num_epochs+1): st = time.time() train_loss_curepoch = process_epoch('train',epoch,model,loss_fn,optimizer,dataloader,batch_preprocessing_fn,log_interval=1) time_train = time.time()-st st = time.time() val_loss_curepoch = process_epoch('val', epoch, model, loss_fn, optimizer, dataloader_val, batch_preprocessing_fn, log_interval=1) time_valloss = time.time()-st st = time.time() val_pck_curepoch = pfdataset_pck(dataloader=dataloader_val_pck,model=model,verbose=False) time_valpck = time.time()-st train_loss[epoch - 1] = train_loss_curepoch val_loss[epoch - 1] = val_loss_curepoch val_pcks[epoch-1] = val_pck_curepoch # remember best loss is_best = val_pcks[epoch - 1] > best_val_pck best_val_pck = max(val_pcks[epoch - 1], best_val_pck) save_checkpoint({ 'epoch': epoch, 'args': args, 'state_dict': model.state_dict(), 'optimizer' : optimizer.state_dict(), 'train_loss': train_loss, 'val_loss': val_loss, 'val_pck': val_pcks, 'best_val_pck':best_val_pck, }, is_best,checkpoint_name,save_all_epochs=False) message = 'Epoch{}\tTrain_loss{:.6f}\tcost time{:.1f}\tVal_loss{:.6f}\tcost time{:.1f}\tVal_pck{:.6f}\tcost time{:.1f}\n'.format\ (epoch, train_loss_curepoch, time_train, val_loss_curepoch, time_valloss,val_pck_curepoch,time_valpck,) print(message) with open(log_name, "a") as log_file: log_file.write('%s\n' % message) print('Done!')
normal
{ "blob_id": "0c97569c77fb3598d83eba607960328bb2134dd2", "index": 333, "step-1": "<mask token>\n", "step-2": "<mask token>\ntorch.manual_seed(1)\nif use_cuda:\n torch.cuda.manual_seed(1)\nnp.random.seed(1)\n<mask token>\nprint('DCCNet training script')\n<mask token>\nparser.add_argument('--checkpoint', type=str, default='')\nparser.add_argument('--image_size', type=int, default=400)\nparser.add_argument('--dataset_image_path', type=str, default=\n 'datasets/pf-pascal/', help='path to PF Pascal dataset')\nparser.add_argument('--dataset_csv_path', type=str, default=\n 'datasets/pf-pascal/image_pairs/', help='path to PF Pascal training csv')\nparser.add_argument('--num_epochs', type=int, default=5, help=\n 'number of training epochs')\nparser.add_argument('--batch_size', type=int, default=16, help=\n 'training batch size')\nparser.add_argument('--lr', type=float, default=0.0005, help='learning rate')\nparser.add_argument('--result_model_fn', type=str, default=\n 'checkpoint_adam', help='trained model filename')\nparser.add_argument('--result-model-dir', type=str, default=\n '../model/checkpoints', help='path to trained models folder')\nparser.add_argument('--fe_finetune_params', type=int, default=0, help=\n 'number of layers to finetune')\nparser.add_argument('--exp_name', type=str, default='exp_delete', help=\n 'experiment name')\nparser.add_argument('--ncons_kernel_sizes', nargs='+', type=int, default=[5,\n 5, 5], help='kernels sizes in neigh. cons.')\nparser.add_argument('--ncons_channels', nargs='+', type=int, default=[16, \n 16, 1], help='channels in neigh. cons')\nparser.add_argument('--sce_kernel_size', type=int, default=25, help=\n 'kernel size in sce.')\nparser.add_argument('--sce_hidden_dim', type=int, default=1024, help=\n 'hidden dim in sce')\nparser.add_argument('--scaleloss_weight', type=float, default=1.0, help=\n 'whether use scale loss, if use the weight for scale loss')\nparser.add_argument('--att_scale_ncons_kernel_sizes', nargs='+', type=int,\n default=[5, 5, 5], help='kernels sizes in dynamic fusion net.')\nparser.add_argument('--att_scale_ncons_channels', nargs='+', type=int,\n default=[16, 16, 1], help='channels in dynamic fusion net')\n<mask token>\nprint(args)\nprint('Creating CNN model...')\n<mask token>\nif args.fe_finetune_params > 0:\n for i in range(args.fe_finetune_params):\n for p in model.module.FeatureExtraction.model[-1][-(i + 1)].parameters(\n ):\n p.requires_grad = True\nprint('Trainable parameters:')\n<mask token>\nfor i, param in enumerate(model.named_parameters()):\n name, p = param\n if p.requires_grad:\n count += 1\n print(str(count) + ': ' + name + '\\t' + str(p.shape) + '\\t')\nprint(model)\nprint('using Adam optimizer')\n<mask token>\nif not exists(dirname(log_name)):\n makedirs(dirname(log_name))\nprint('Checkpoint name: ' + checkpoint_name)\n<mask token>\n\n\ndef process_epoch(mode, epoch, model, loss_fn, optimizer, dataloader,\n batch_preprocessing_fn, use_cuda=True, log_interval=50):\n epoch_loss = 0\n for batch_idx, batch in enumerate(dataloader):\n st = time.time()\n if mode == 'train':\n optimizer.zero_grad()\n tnf_batch = batch_preprocessing_fn(batch)\n loss = loss_fn(model, tnf_batch)\n loss_np = loss.data.cpu().numpy()[0]\n epoch_loss += loss_np\n if mode == 'train':\n loss.backward()\n optimizer.step()\n else:\n loss = None\n if batch_idx % log_interval == 0:\n print(mode.capitalize() +\n ' Epoch: {} [{}/{} ({:.0f}%)]\\t\\tLoss: {:.12f}\\t\\tcost time: {:.1f}'\n .format(epoch, batch_idx, len(dataloader), 100.0 *\n batch_idx / len(dataloader), loss_np, time.time() - st))\n epoch_loss /= len(dataloader)\n print(mode.capitalize() + ' set: Average loss: {:.12f}'.format(epoch_loss))\n return epoch_loss\n\n\n<mask token>\nmodel.module.FeatureExtraction.eval()\nprint('Starting training...')\nfor epoch in range(1, args.num_epochs + 1):\n st = time.time()\n train_loss_curepoch = process_epoch('train', epoch, model, loss_fn,\n optimizer, dataloader, batch_preprocessing_fn, log_interval=1)\n time_train = time.time() - st\n st = time.time()\n val_loss_curepoch = process_epoch('val', epoch, model, loss_fn,\n optimizer, dataloader_val, batch_preprocessing_fn, log_interval=1)\n time_valloss = time.time() - st\n st = time.time()\n val_pck_curepoch = pfdataset_pck(dataloader=dataloader_val_pck, model=\n model, verbose=False)\n time_valpck = time.time() - st\n train_loss[epoch - 1] = train_loss_curepoch\n val_loss[epoch - 1] = val_loss_curepoch\n val_pcks[epoch - 1] = val_pck_curepoch\n is_best = val_pcks[epoch - 1] > best_val_pck\n best_val_pck = max(val_pcks[epoch - 1], best_val_pck)\n save_checkpoint({'epoch': epoch, 'args': args, 'state_dict': model.\n state_dict(), 'optimizer': optimizer.state_dict(), 'train_loss':\n train_loss, 'val_loss': val_loss, 'val_pck': val_pcks,\n 'best_val_pck': best_val_pck}, is_best, checkpoint_name,\n save_all_epochs=False)\n message = (\n \"\"\"Epoch{}\tTrain_loss{:.6f}\tcost time{:.1f}\tVal_loss{:.6f}\tcost time{:.1f}\tVal_pck{:.6f}\tcost time{:.1f}\n\"\"\"\n .format(epoch, train_loss_curepoch, time_train, val_loss_curepoch,\n time_valloss, val_pck_curepoch, time_valpck))\n print(message)\n with open(log_name, 'a') as log_file:\n log_file.write('%s\\n' % message)\nprint('Done!')\n", "step-3": "<mask token>\nuse_cuda = torch.cuda.is_available()\ntorch.manual_seed(1)\nif use_cuda:\n torch.cuda.manual_seed(1)\nnp.random.seed(1)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\nprint('DCCNet training script')\nparser = argparse.ArgumentParser(description='Compute PF Pascal matches')\nparser.add_argument('--checkpoint', type=str, default='')\nparser.add_argument('--image_size', type=int, default=400)\nparser.add_argument('--dataset_image_path', type=str, default=\n 'datasets/pf-pascal/', help='path to PF Pascal dataset')\nparser.add_argument('--dataset_csv_path', type=str, default=\n 'datasets/pf-pascal/image_pairs/', help='path to PF Pascal training csv')\nparser.add_argument('--num_epochs', type=int, default=5, help=\n 'number of training epochs')\nparser.add_argument('--batch_size', type=int, default=16, help=\n 'training batch size')\nparser.add_argument('--lr', type=float, default=0.0005, help='learning rate')\nparser.add_argument('--result_model_fn', type=str, default=\n 'checkpoint_adam', help='trained model filename')\nparser.add_argument('--result-model-dir', type=str, default=\n '../model/checkpoints', help='path to trained models folder')\nparser.add_argument('--fe_finetune_params', type=int, default=0, help=\n 'number of layers to finetune')\nparser.add_argument('--exp_name', type=str, default='exp_delete', help=\n 'experiment name')\nparser.add_argument('--ncons_kernel_sizes', nargs='+', type=int, default=[5,\n 5, 5], help='kernels sizes in neigh. cons.')\nparser.add_argument('--ncons_channels', nargs='+', type=int, default=[16, \n 16, 1], help='channels in neigh. cons')\nparser.add_argument('--sce_kernel_size', type=int, default=25, help=\n 'kernel size in sce.')\nparser.add_argument('--sce_hidden_dim', type=int, default=1024, help=\n 'hidden dim in sce')\nparser.add_argument('--scaleloss_weight', type=float, default=1.0, help=\n 'whether use scale loss, if use the weight for scale loss')\nparser.add_argument('--att_scale_ncons_kernel_sizes', nargs='+', type=int,\n default=[5, 5, 5], help='kernels sizes in dynamic fusion net.')\nparser.add_argument('--att_scale_ncons_channels', nargs='+', type=int,\n default=[16, 16, 1], help='channels in dynamic fusion net')\nargs = parser.parse_args()\nprint(args)\nprint('Creating CNN model...')\nmodel = DCCNet(use_cuda=use_cuda, checkpoint=args.checkpoint,\n ncons_kernel_sizes=args.ncons_kernel_sizes, ncons_channels=args.\n ncons_channels, sce_kernel_size=args.sce_kernel_size, sce_hidden_dim=\n args.sce_hidden_dim, att_scale_ncons_kernel_sizes=args.\n att_scale_ncons_kernel_sizes, att_scale_ncons_channels=args.\n att_scale_ncons_channels)\nmodel = nn.DataParallel(model)\nif args.fe_finetune_params > 0:\n for i in range(args.fe_finetune_params):\n for p in model.module.FeatureExtraction.model[-1][-(i + 1)].parameters(\n ):\n p.requires_grad = True\nprint('Trainable parameters:')\ncount = 0\nfor i, param in enumerate(model.named_parameters()):\n name, p = param\n if p.requires_grad:\n count += 1\n print(str(count) + ': ' + name + '\\t' + str(p.shape) + '\\t')\nprint(model)\nprint('using Adam optimizer')\noptimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()\n ), lr=args.lr)\ncnn_image_size = args.image_size, args.image_size\nDataset = ImagePairDataset\ntrain_csv = 'train_pairs.csv'\nval_nocoordinates_csv = 'val_pairs_nocoords.csv'\nval_csv = 'image_pairs/val_pairs.csv'\nnormalization_tnf = NormalizeImageDict(['source_image', 'target_image'])\nbatch_preprocessing_fn = BatchTensorToVars(use_cuda=use_cuda)\ndataset = Dataset(transform=normalization_tnf, dataset_image_path=args.\n dataset_image_path, dataset_csv_path=args.dataset_csv_path,\n dataset_csv_file=train_csv, output_size=cnn_image_size)\ndataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=0)\ndataset_val = Dataset(transform=normalization_tnf, dataset_image_path=args.\n dataset_image_path, dataset_csv_path=args.dataset_csv_path,\n dataset_csv_file=val_nocoordinates_csv, output_size=cnn_image_size)\ndataloader_val = DataLoader(dataset_val, batch_size=args.batch_size,\n shuffle=True, num_workers=4)\ndataloader_val_pck = pfpascal_val_dataloader(image_size=args.image_size,\n eval_dataset_path=args.dataset_image_path, csv_file=val_csv)\ncheckpoint_dir = os.path.join(args.result_model_dir, args.exp_name)\ncheckpoint_name = os.path.join(args.result_model_dir, args.exp_name, \n datetime.datetime.now().strftime('%Y-%m-%d_%H:%M') + '_' + args.\n result_model_fn + '.pth.tar')\nlog_name = os.path.join(args.result_model_dir, args.exp_name, 'logmain_' +\n args.exp_name + '.txt')\nif not exists(dirname(log_name)):\n makedirs(dirname(log_name))\nprint('Checkpoint name: ' + checkpoint_name)\nbest_val_pck = float('-inf')\nloss_fn = lambda model, batch: weak_loss(model, batch, normalization=\n 'softmax', scaleloss_weight=args.scaleloss_weight)\n\n\ndef process_epoch(mode, epoch, model, loss_fn, optimizer, dataloader,\n batch_preprocessing_fn, use_cuda=True, log_interval=50):\n epoch_loss = 0\n for batch_idx, batch in enumerate(dataloader):\n st = time.time()\n if mode == 'train':\n optimizer.zero_grad()\n tnf_batch = batch_preprocessing_fn(batch)\n loss = loss_fn(model, tnf_batch)\n loss_np = loss.data.cpu().numpy()[0]\n epoch_loss += loss_np\n if mode == 'train':\n loss.backward()\n optimizer.step()\n else:\n loss = None\n if batch_idx % log_interval == 0:\n print(mode.capitalize() +\n ' Epoch: {} [{}/{} ({:.0f}%)]\\t\\tLoss: {:.12f}\\t\\tcost time: {:.1f}'\n .format(epoch, batch_idx, len(dataloader), 100.0 *\n batch_idx / len(dataloader), loss_np, time.time() - st))\n epoch_loss /= len(dataloader)\n print(mode.capitalize() + ' set: Average loss: {:.12f}'.format(epoch_loss))\n return epoch_loss\n\n\ntrain_loss = np.zeros(args.num_epochs)\nval_loss = np.zeros(args.num_epochs)\nval_pcks = np.zeros(args.num_epochs)\nmodel.module.FeatureExtraction.eval()\nprint('Starting training...')\nfor epoch in range(1, args.num_epochs + 1):\n st = time.time()\n train_loss_curepoch = process_epoch('train', epoch, model, loss_fn,\n optimizer, dataloader, batch_preprocessing_fn, log_interval=1)\n time_train = time.time() - st\n st = time.time()\n val_loss_curepoch = process_epoch('val', epoch, model, loss_fn,\n optimizer, dataloader_val, batch_preprocessing_fn, log_interval=1)\n time_valloss = time.time() - st\n st = time.time()\n val_pck_curepoch = pfdataset_pck(dataloader=dataloader_val_pck, model=\n model, verbose=False)\n time_valpck = time.time() - st\n train_loss[epoch - 1] = train_loss_curepoch\n val_loss[epoch - 1] = val_loss_curepoch\n val_pcks[epoch - 1] = val_pck_curepoch\n is_best = val_pcks[epoch - 1] > best_val_pck\n best_val_pck = max(val_pcks[epoch - 1], best_val_pck)\n save_checkpoint({'epoch': epoch, 'args': args, 'state_dict': model.\n state_dict(), 'optimizer': optimizer.state_dict(), 'train_loss':\n train_loss, 'val_loss': val_loss, 'val_pck': val_pcks,\n 'best_val_pck': best_val_pck}, is_best, checkpoint_name,\n save_all_epochs=False)\n message = (\n \"\"\"Epoch{}\tTrain_loss{:.6f}\tcost time{:.1f}\tVal_loss{:.6f}\tcost time{:.1f}\tVal_pck{:.6f}\tcost time{:.1f}\n\"\"\"\n .format(epoch, train_loss_curepoch, time_train, val_loss_curepoch,\n time_valloss, val_pck_curepoch, time_valpck))\n print(message)\n with open(log_name, 'a') as log_file:\n log_file.write('%s\\n' % message)\nprint('Done!')\n", "step-4": "from __future__ import print_function, division\nimport os\nfrom os.path import exists, join, basename, dirname\nfrom os import makedirs\nimport numpy as np\nimport datetime\nimport time\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom lib.dataloader import DataLoader\nfrom lib.im_pair_dataset import ImagePairDataset\nfrom lib.normalization import NormalizeImageDict\nfrom lib.torch_util import save_checkpoint\nfrom lib.torch_util import BatchTensorToVars\nfrom lib.eval_util_dynamic import pfdataset_pck, pfpascal_val_dataloader\nfrom models.model_dynamic import DCCNet\nfrom models.loss_dynamic import weak_loss\nuse_cuda = torch.cuda.is_available()\ntorch.manual_seed(1)\nif use_cuda:\n torch.cuda.manual_seed(1)\nnp.random.seed(1)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\nprint('DCCNet training script')\nparser = argparse.ArgumentParser(description='Compute PF Pascal matches')\nparser.add_argument('--checkpoint', type=str, default='')\nparser.add_argument('--image_size', type=int, default=400)\nparser.add_argument('--dataset_image_path', type=str, default=\n 'datasets/pf-pascal/', help='path to PF Pascal dataset')\nparser.add_argument('--dataset_csv_path', type=str, default=\n 'datasets/pf-pascal/image_pairs/', help='path to PF Pascal training csv')\nparser.add_argument('--num_epochs', type=int, default=5, help=\n 'number of training epochs')\nparser.add_argument('--batch_size', type=int, default=16, help=\n 'training batch size')\nparser.add_argument('--lr', type=float, default=0.0005, help='learning rate')\nparser.add_argument('--result_model_fn', type=str, default=\n 'checkpoint_adam', help='trained model filename')\nparser.add_argument('--result-model-dir', type=str, default=\n '../model/checkpoints', help='path to trained models folder')\nparser.add_argument('--fe_finetune_params', type=int, default=0, help=\n 'number of layers to finetune')\nparser.add_argument('--exp_name', type=str, default='exp_delete', help=\n 'experiment name')\nparser.add_argument('--ncons_kernel_sizes', nargs='+', type=int, default=[5,\n 5, 5], help='kernels sizes in neigh. cons.')\nparser.add_argument('--ncons_channels', nargs='+', type=int, default=[16, \n 16, 1], help='channels in neigh. cons')\nparser.add_argument('--sce_kernel_size', type=int, default=25, help=\n 'kernel size in sce.')\nparser.add_argument('--sce_hidden_dim', type=int, default=1024, help=\n 'hidden dim in sce')\nparser.add_argument('--scaleloss_weight', type=float, default=1.0, help=\n 'whether use scale loss, if use the weight for scale loss')\nparser.add_argument('--att_scale_ncons_kernel_sizes', nargs='+', type=int,\n default=[5, 5, 5], help='kernels sizes in dynamic fusion net.')\nparser.add_argument('--att_scale_ncons_channels', nargs='+', type=int,\n default=[16, 16, 1], help='channels in dynamic fusion net')\nargs = parser.parse_args()\nprint(args)\nprint('Creating CNN model...')\nmodel = DCCNet(use_cuda=use_cuda, checkpoint=args.checkpoint,\n ncons_kernel_sizes=args.ncons_kernel_sizes, ncons_channels=args.\n ncons_channels, sce_kernel_size=args.sce_kernel_size, sce_hidden_dim=\n args.sce_hidden_dim, att_scale_ncons_kernel_sizes=args.\n att_scale_ncons_kernel_sizes, att_scale_ncons_channels=args.\n att_scale_ncons_channels)\nmodel = nn.DataParallel(model)\nif args.fe_finetune_params > 0:\n for i in range(args.fe_finetune_params):\n for p in model.module.FeatureExtraction.model[-1][-(i + 1)].parameters(\n ):\n p.requires_grad = True\nprint('Trainable parameters:')\ncount = 0\nfor i, param in enumerate(model.named_parameters()):\n name, p = param\n if p.requires_grad:\n count += 1\n print(str(count) + ': ' + name + '\\t' + str(p.shape) + '\\t')\nprint(model)\nprint('using Adam optimizer')\noptimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()\n ), lr=args.lr)\ncnn_image_size = args.image_size, args.image_size\nDataset = ImagePairDataset\ntrain_csv = 'train_pairs.csv'\nval_nocoordinates_csv = 'val_pairs_nocoords.csv'\nval_csv = 'image_pairs/val_pairs.csv'\nnormalization_tnf = NormalizeImageDict(['source_image', 'target_image'])\nbatch_preprocessing_fn = BatchTensorToVars(use_cuda=use_cuda)\ndataset = Dataset(transform=normalization_tnf, dataset_image_path=args.\n dataset_image_path, dataset_csv_path=args.dataset_csv_path,\n dataset_csv_file=train_csv, output_size=cnn_image_size)\ndataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=0)\ndataset_val = Dataset(transform=normalization_tnf, dataset_image_path=args.\n dataset_image_path, dataset_csv_path=args.dataset_csv_path,\n dataset_csv_file=val_nocoordinates_csv, output_size=cnn_image_size)\ndataloader_val = DataLoader(dataset_val, batch_size=args.batch_size,\n shuffle=True, num_workers=4)\ndataloader_val_pck = pfpascal_val_dataloader(image_size=args.image_size,\n eval_dataset_path=args.dataset_image_path, csv_file=val_csv)\ncheckpoint_dir = os.path.join(args.result_model_dir, args.exp_name)\ncheckpoint_name = os.path.join(args.result_model_dir, args.exp_name, \n datetime.datetime.now().strftime('%Y-%m-%d_%H:%M') + '_' + args.\n result_model_fn + '.pth.tar')\nlog_name = os.path.join(args.result_model_dir, args.exp_name, 'logmain_' +\n args.exp_name + '.txt')\nif not exists(dirname(log_name)):\n makedirs(dirname(log_name))\nprint('Checkpoint name: ' + checkpoint_name)\nbest_val_pck = float('-inf')\nloss_fn = lambda model, batch: weak_loss(model, batch, normalization=\n 'softmax', scaleloss_weight=args.scaleloss_weight)\n\n\ndef process_epoch(mode, epoch, model, loss_fn, optimizer, dataloader,\n batch_preprocessing_fn, use_cuda=True, log_interval=50):\n epoch_loss = 0\n for batch_idx, batch in enumerate(dataloader):\n st = time.time()\n if mode == 'train':\n optimizer.zero_grad()\n tnf_batch = batch_preprocessing_fn(batch)\n loss = loss_fn(model, tnf_batch)\n loss_np = loss.data.cpu().numpy()[0]\n epoch_loss += loss_np\n if mode == 'train':\n loss.backward()\n optimizer.step()\n else:\n loss = None\n if batch_idx % log_interval == 0:\n print(mode.capitalize() +\n ' Epoch: {} [{}/{} ({:.0f}%)]\\t\\tLoss: {:.12f}\\t\\tcost time: {:.1f}'\n .format(epoch, batch_idx, len(dataloader), 100.0 *\n batch_idx / len(dataloader), loss_np, time.time() - st))\n epoch_loss /= len(dataloader)\n print(mode.capitalize() + ' set: Average loss: {:.12f}'.format(epoch_loss))\n return epoch_loss\n\n\ntrain_loss = np.zeros(args.num_epochs)\nval_loss = np.zeros(args.num_epochs)\nval_pcks = np.zeros(args.num_epochs)\nmodel.module.FeatureExtraction.eval()\nprint('Starting training...')\nfor epoch in range(1, args.num_epochs + 1):\n st = time.time()\n train_loss_curepoch = process_epoch('train', epoch, model, loss_fn,\n optimizer, dataloader, batch_preprocessing_fn, log_interval=1)\n time_train = time.time() - st\n st = time.time()\n val_loss_curepoch = process_epoch('val', epoch, model, loss_fn,\n optimizer, dataloader_val, batch_preprocessing_fn, log_interval=1)\n time_valloss = time.time() - st\n st = time.time()\n val_pck_curepoch = pfdataset_pck(dataloader=dataloader_val_pck, model=\n model, verbose=False)\n time_valpck = time.time() - st\n train_loss[epoch - 1] = train_loss_curepoch\n val_loss[epoch - 1] = val_loss_curepoch\n val_pcks[epoch - 1] = val_pck_curepoch\n is_best = val_pcks[epoch - 1] > best_val_pck\n best_val_pck = max(val_pcks[epoch - 1], best_val_pck)\n save_checkpoint({'epoch': epoch, 'args': args, 'state_dict': model.\n state_dict(), 'optimizer': optimizer.state_dict(), 'train_loss':\n train_loss, 'val_loss': val_loss, 'val_pck': val_pcks,\n 'best_val_pck': best_val_pck}, is_best, checkpoint_name,\n save_all_epochs=False)\n message = (\n \"\"\"Epoch{}\tTrain_loss{:.6f}\tcost time{:.1f}\tVal_loss{:.6f}\tcost time{:.1f}\tVal_pck{:.6f}\tcost time{:.1f}\n\"\"\"\n .format(epoch, train_loss_curepoch, time_train, val_loss_curepoch,\n time_valloss, val_pck_curepoch, time_valpck))\n print(message)\n with open(log_name, 'a') as log_file:\n log_file.write('%s\\n' % message)\nprint('Done!')\n", "step-5": "from __future__ import print_function, division\nimport os\nfrom os.path import exists, join, basename, dirname\nfrom os import makedirs\nimport numpy as np\nimport datetime\nimport time\nimport argparse\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom lib.dataloader import DataLoader\nfrom lib.im_pair_dataset import ImagePairDataset\nfrom lib.normalization import NormalizeImageDict\nfrom lib.torch_util import save_checkpoint\nfrom lib.torch_util import BatchTensorToVars\nfrom lib.eval_util_dynamic import pfdataset_pck, pfpascal_val_dataloader\n\n# import DCCNet\nfrom models.model_dynamic import DCCNet\nfrom models.loss_dynamic import weak_loss\n\n\n# Seed and CUDA\nuse_cuda = torch.cuda.is_available()\ntorch.manual_seed(1)\nif use_cuda:\n torch.cuda.manual_seed(1)\nnp.random.seed(1)\n\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\nprint('DCCNet training script')\n\n# Argument parsing\nparser = argparse.ArgumentParser(description='Compute PF Pascal matches')\nparser.add_argument('--checkpoint', type=str, default='')\nparser.add_argument('--image_size', type=int, default=400)\nparser.add_argument('--dataset_image_path', type=str, default='datasets/pf-pascal/', help='path to PF Pascal dataset')\nparser.add_argument('--dataset_csv_path', type=str, default='datasets/pf-pascal/image_pairs/', help='path to PF Pascal training csv')\nparser.add_argument('--num_epochs', type=int, default=5, help='number of training epochs')\nparser.add_argument('--batch_size', type=int, default=16, help='training batch size')\nparser.add_argument('--lr', type=float, default=0.0005, help='learning rate')\nparser.add_argument('--result_model_fn', type=str, default='checkpoint_adam', help='trained model filename')\nparser.add_argument('--result-model-dir', type=str, default='../model/checkpoints', help='path to trained models folder')\nparser.add_argument('--fe_finetune_params', type=int, default=0, help='number of layers to finetune')\nparser.add_argument('--exp_name', type=str, default='exp_delete', help='experiment name')\n\n# DCCNet args\nparser.add_argument('--ncons_kernel_sizes', nargs='+', type=int, default=[5,5,5], help='kernels sizes in neigh. cons.')\nparser.add_argument('--ncons_channels', nargs='+', type=int, default=[16,16,1], help='channels in neigh. cons')\n\nparser.add_argument('--sce_kernel_size',type=int,default=25,help='kernel size in sce.')\nparser.add_argument('--sce_hidden_dim',type=int,default=1024,help='hidden dim in sce')\nparser.add_argument('--scaleloss_weight',type=float,default=1.0,help='whether use scale loss, if use the weight for scale loss')\nparser.add_argument('--att_scale_ncons_kernel_sizes', nargs='+', type=int, default=[5,5,5], help='kernels sizes in dynamic fusion net.')\nparser.add_argument('--att_scale_ncons_channels', nargs='+', type=int, default=[16,16,1], help='channels in dynamic fusion net')\n\nargs = parser.parse_args()\nprint(args)\n\n# Create model\nprint('Creating CNN model...')\nmodel = DCCNet(use_cuda=use_cuda,\n checkpoint=args.checkpoint,\n ncons_kernel_sizes=args.ncons_kernel_sizes,\n ncons_channels=args.ncons_channels,\n sce_kernel_size=args.sce_kernel_size,\n sce_hidden_dim=args.sce_hidden_dim,\n att_scale_ncons_kernel_sizes=args.att_scale_ncons_kernel_sizes,\n att_scale_ncons_channels=args.att_scale_ncons_channels,\n )\n\n#Multi-GPU support\nmodel = nn.DataParallel(model)\n\n# Set which parts of the model to train\nif args.fe_finetune_params>0:\n for i in range(args.fe_finetune_params):\n for p in model.module.FeatureExtraction.model[-1][-(i+1)].parameters():\n p.requires_grad=True\n\nprint('Trainable parameters:')\ncount = 0\nfor i,param in enumerate(model.named_parameters()):\n name,p = param\n if p.requires_grad:\n count+=1\n print(str(count)+\": \"+name+\"\\t\"+str(p.shape)+\"\\t\")\n\nprint(model)\n\n\n# Optimizer\nprint('using Adam optimizer')\noptimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr)\n \ncnn_image_size=(args.image_size,args.image_size)\n\nDataset = ImagePairDataset\ntrain_csv = 'train_pairs.csv'\n#val_pairs_nocoords.csv: for compute loss, with flip column in csv, no coordinates\n#val_pairs.csv: for compute pck, with coordinates\nval_nocoordinates_csv = 'val_pairs_nocoords.csv'\nval_csv = 'image_pairs/val_pairs.csv'\n\n\nnormalization_tnf = NormalizeImageDict(['source_image','target_image'])\nbatch_preprocessing_fn = BatchTensorToVars(use_cuda=use_cuda) \n\n# Dataset and dataloader\ndataset = Dataset(transform=normalization_tnf,\n\t dataset_image_path=args.dataset_image_path,\n\t dataset_csv_path=args.dataset_csv_path,\n dataset_csv_file = train_csv,\n output_size=cnn_image_size,\n )\n\ndataloader = DataLoader(dataset, batch_size=args.batch_size,\n shuffle=True, \n num_workers=0)\n\ndataset_val = Dataset(transform=normalization_tnf,\n dataset_image_path=args.dataset_image_path,\n dataset_csv_path=args.dataset_csv_path,\n dataset_csv_file=val_nocoordinates_csv,\n output_size=cnn_image_size)\n\n# compute val loss\ndataloader_val = DataLoader(dataset_val, batch_size=args.batch_size,\n shuffle=True, num_workers=4)\n\n# compute val pck\ndataloader_val_pck = pfpascal_val_dataloader(image_size=args.image_size, eval_dataset_path=args.dataset_image_path, csv_file=val_csv) #load pfpascal val dataset\n\n# Define checkpoint name\ncheckpoint_dir = os.path.join(args.result_model_dir,args.exp_name)\ncheckpoint_name = os.path.join(args.result_model_dir,args.exp_name,\n datetime.datetime.now().strftime(\"%Y-%m-%d_%H:%M\")+'_'+args.result_model_fn + '.pth.tar')\nlog_name = os.path.join(args.result_model_dir,args.exp_name, 'logmain_'+args.exp_name+'.txt')\nif not exists(dirname(log_name)):\n makedirs(dirname(log_name))\nprint('Checkpoint name: '+checkpoint_name)\n \n# Train\nbest_val_pck = float(\"-inf\")\n\nloss_fn = lambda model,batch: weak_loss(model, batch, normalization='softmax', scaleloss_weight=args.scaleloss_weight)\n\n# define epoch function\ndef process_epoch(mode,epoch,model,loss_fn,optimizer,dataloader,batch_preprocessing_fn,use_cuda=True,log_interval=50):\n epoch_loss = 0\n for batch_idx, batch in enumerate(dataloader):\n\n st = time.time()\n\n if mode=='train': \n optimizer.zero_grad()\n tnf_batch = batch_preprocessing_fn(batch)\n loss = loss_fn(model,tnf_batch)\n loss_np = loss.data.cpu().numpy()[0]\n #loss_np = loss.data.cpu().numpy()\n epoch_loss += loss_np\n if mode=='train':\n loss.backward()\n optimizer.step()\n else:\n loss=None\n if batch_idx % log_interval == 0:\n print(mode.capitalize()+' Epoch: {} [{}/{} ({:.0f}%)]\\t\\tLoss: {:.12f}\\t\\tcost time: {:.1f}'.format(\n epoch, batch_idx , len(dataloader),\n 100. * batch_idx / len(dataloader), loss_np,time.time()-st))\n epoch_loss /= len(dataloader)\n print(mode.capitalize()+' set: Average loss: {:.12f}'.format(epoch_loss))\n return epoch_loss\n\ntrain_loss = np.zeros(args.num_epochs)\nval_loss = np.zeros(args.num_epochs)\nval_pcks = np.zeros(args.num_epochs)\n\nmodel.module.FeatureExtraction.eval()\n\n\nprint('Starting training...')\nfor epoch in range(1, args.num_epochs+1):\n st = time.time()\n train_loss_curepoch = process_epoch('train',epoch,model,loss_fn,optimizer,dataloader,batch_preprocessing_fn,log_interval=1)\n time_train = time.time()-st\n\n st = time.time()\n\n val_loss_curepoch = process_epoch('val', epoch, model, loss_fn, optimizer, dataloader_val, batch_preprocessing_fn, log_interval=1)\n\n time_valloss = time.time()-st\n\n st = time.time()\n val_pck_curepoch = pfdataset_pck(dataloader=dataloader_val_pck,model=model,verbose=False)\n time_valpck = time.time()-st\n\n train_loss[epoch - 1] = train_loss_curepoch\n val_loss[epoch - 1] = val_loss_curepoch\n val_pcks[epoch-1] = val_pck_curepoch\n\n # remember best loss\n is_best = val_pcks[epoch - 1] > best_val_pck\n best_val_pck = max(val_pcks[epoch - 1], best_val_pck)\n save_checkpoint({\n 'epoch': epoch,\n 'args': args,\n 'state_dict': model.state_dict(),\n 'optimizer' : optimizer.state_dict(),\n 'train_loss': train_loss,\n 'val_loss': val_loss,\n 'val_pck': val_pcks,\n 'best_val_pck':best_val_pck,\n }, is_best,checkpoint_name,save_all_epochs=False)\n\n message = 'Epoch{}\\tTrain_loss{:.6f}\\tcost time{:.1f}\\tVal_loss{:.6f}\\tcost time{:.1f}\\tVal_pck{:.6f}\\tcost time{:.1f}\\n'.format\\\n (epoch, train_loss_curepoch, time_train, val_loss_curepoch, time_valloss,val_pck_curepoch,time_valpck,)\n print(message)\n with open(log_name, \"a\") as log_file:\n log_file.write('%s\\n' % message)\n\n\nprint('Done!')\n", "step-ids": [ 0, 2, 3, 4, 5 ] }
[ 0, 2, 3, 4, 5 ]
# -*- coding: utf-8 -*- from ..general.utils import log_errors from googleapiclient import discovery from oauth2client.client import SignedJwtAssertionCredentials from django.conf import settings from celery import shared_task from logging import getLogger import httplib2 _logger = getLogger(__name__) def create_events_calendar(): """ Create an events calendar if none already exists. This function mostly exists for creating calendars for dev environments, not used in prod. """ service = get_calendar_service() if not service: return calendar = { 'summary': 'Ting som skjer i Telemarkgruppa', 'timeZone': 'Europe/Oslo', } cal_insert_response = service.calendars().insert(body=calendar).execute() public_acl = { 'role': 'reader', 'scope': { 'type': 'default' } } acl_insert_response = service.acl().insert(calendarId=cal_insert_response['id'], body=public_acl).execute() return acl_insert_response def get_calendar_service(): name = 'calendar' version = 'v3' scope = 'https://www.googleapis.com/auth/calendar' # Provide a mock fallback for test environments where real interaction with # Google calendar is not needed if not hasattr(settings, 'GOOGLE_API_PRIVATE_KEY'): _logger.info('Skipping Google calendar integration due to missing GOOGLE_API_PRIVATE_KEY ' 'in settings.') return # Prepare credentials, and authorize HTTP object with them. credentials = SignedJwtAssertionCredentials(settings.GOOGLE_API_EMAIL, settings.GOOGLE_API_PRIVATE_KEY, scope) http = credentials.authorize(http=httplib2.Http()) # Construct a service object via the discovery service. service = discovery.build(name, version, http=http) return service @shared_task @log_errors def update_google_calendar_event(event_id): from .models import Event event = Event.objects.get(pk=event_id) # If the event doesn't already exist on google calendar, create it if not event.google_calendar_id: _logger.info('Adding missing event to google calendar: %s', event.name) add_google_calender_event(event.id) return # Authenticate and construct service. service = get_calendar_service() if not service: return payload = get_google_calendar_payload_for_event(event) results = service.events().update(calendarId=settings.GOOGLE_CALENDAR_ID, eventId=event.google_calendar_id, body=payload).execute() _logger.info('Google calendar event for %s updated: %s', event.name, results) @shared_task @log_errors def add_google_calender_event(event_id): from .models import Event event = Event.objects.get(pk=event_id) if not event: _logger.warning('Could not find event to add to Google Calendar: %d', event_id) return google_payload = get_google_calendar_payload_for_event(event) service = get_calendar_service() if not service: return results = service.events().insert(calendarId=settings.GOOGLE_CALENDAR_ID, body=google_payload).execute() if results.get('id'): event.google_calendar_id = results['id'] event.save() _logger.info("Google Calendar event for event '%s' created successfully", event.name) else: _logger.error("New Google Calendar event did not have id in response, was: %s", results) @shared_task @log_errors def delete_google_calendar_event(google_calendar_event_id): service = get_calendar_service() if not service: return result = service.events().delete(calendarId=settings.GOOGLE_CALENDAR_ID, eventId=google_calendar_event_id).execute() _logger.info('Google calendar event %s deleted: %s', google_calendar_event_id, result) def get_google_calendar_payload_for_event(event): return { 'summary': event.name, 'location': event.location, 'description': event.summary, 'start': { 'dateTime': event.startdate.isoformat(), 'timeZone': 'Europe/Oslo', }, 'end': { 'dateTime': event.enddate.isoformat(), 'timeZone': 'Europe/Oslo', } }
normal
{ "blob_id": "36fb0d936be5c5d305c4076fd1c497664c9b770a", "index": 8374, "step-1": "<mask token>\n\n\ndef create_events_calendar():\n \"\"\" Create an events calendar if none already exists. This function mostly exists for\n creating calendars for dev environments, not used in prod.\n \"\"\"\n service = get_calendar_service()\n if not service:\n return\n calendar = {'summary': 'Ting som skjer i Telemarkgruppa', 'timeZone':\n 'Europe/Oslo'}\n cal_insert_response = service.calendars().insert(body=calendar).execute()\n public_acl = {'role': 'reader', 'scope': {'type': 'default'}}\n acl_insert_response = service.acl().insert(calendarId=\n cal_insert_response['id'], body=public_acl).execute()\n return acl_insert_response\n\n\ndef get_calendar_service():\n name = 'calendar'\n version = 'v3'\n scope = 'https://www.googleapis.com/auth/calendar'\n if not hasattr(settings, 'GOOGLE_API_PRIVATE_KEY'):\n _logger.info(\n 'Skipping Google calendar integration due to missing GOOGLE_API_PRIVATE_KEY in settings.'\n )\n return\n credentials = SignedJwtAssertionCredentials(settings.GOOGLE_API_EMAIL,\n settings.GOOGLE_API_PRIVATE_KEY, scope)\n http = credentials.authorize(http=httplib2.Http())\n service = discovery.build(name, version, http=http)\n return service\n\n\n@shared_task\n@log_errors\ndef update_google_calendar_event(event_id):\n from .models import Event\n event = Event.objects.get(pk=event_id)\n if not event.google_calendar_id:\n _logger.info('Adding missing event to google calendar: %s', event.name)\n add_google_calender_event(event.id)\n return\n service = get_calendar_service()\n if not service:\n return\n payload = get_google_calendar_payload_for_event(event)\n results = service.events().update(calendarId=settings.\n GOOGLE_CALENDAR_ID, eventId=event.google_calendar_id, body=payload\n ).execute()\n _logger.info('Google calendar event for %s updated: %s', event.name,\n results)\n\n\n<mask token>\n\n\ndef get_google_calendar_payload_for_event(event):\n return {'summary': event.name, 'location': event.location,\n 'description': event.summary, 'start': {'dateTime': event.startdate\n .isoformat(), 'timeZone': 'Europe/Oslo'}, 'end': {'dateTime': event\n .enddate.isoformat(), 'timeZone': 'Europe/Oslo'}}\n", "step-2": "<mask token>\n\n\ndef create_events_calendar():\n \"\"\" Create an events calendar if none already exists. This function mostly exists for\n creating calendars for dev environments, not used in prod.\n \"\"\"\n service = get_calendar_service()\n if not service:\n return\n calendar = {'summary': 'Ting som skjer i Telemarkgruppa', 'timeZone':\n 'Europe/Oslo'}\n cal_insert_response = service.calendars().insert(body=calendar).execute()\n public_acl = {'role': 'reader', 'scope': {'type': 'default'}}\n acl_insert_response = service.acl().insert(calendarId=\n cal_insert_response['id'], body=public_acl).execute()\n return acl_insert_response\n\n\ndef get_calendar_service():\n name = 'calendar'\n version = 'v3'\n scope = 'https://www.googleapis.com/auth/calendar'\n if not hasattr(settings, 'GOOGLE_API_PRIVATE_KEY'):\n _logger.info(\n 'Skipping Google calendar integration due to missing GOOGLE_API_PRIVATE_KEY in settings.'\n )\n return\n credentials = SignedJwtAssertionCredentials(settings.GOOGLE_API_EMAIL,\n settings.GOOGLE_API_PRIVATE_KEY, scope)\n http = credentials.authorize(http=httplib2.Http())\n service = discovery.build(name, version, http=http)\n return service\n\n\n@shared_task\n@log_errors\ndef update_google_calendar_event(event_id):\n from .models import Event\n event = Event.objects.get(pk=event_id)\n if not event.google_calendar_id:\n _logger.info('Adding missing event to google calendar: %s', event.name)\n add_google_calender_event(event.id)\n return\n service = get_calendar_service()\n if not service:\n return\n payload = get_google_calendar_payload_for_event(event)\n results = service.events().update(calendarId=settings.\n GOOGLE_CALENDAR_ID, eventId=event.google_calendar_id, body=payload\n ).execute()\n _logger.info('Google calendar event for %s updated: %s', event.name,\n results)\n\n\n@shared_task\n@log_errors\ndef add_google_calender_event(event_id):\n from .models import Event\n event = Event.objects.get(pk=event_id)\n if not event:\n _logger.warning('Could not find event to add to Google Calendar: %d',\n event_id)\n return\n google_payload = get_google_calendar_payload_for_event(event)\n service = get_calendar_service()\n if not service:\n return\n results = service.events().insert(calendarId=settings.\n GOOGLE_CALENDAR_ID, body=google_payload).execute()\n if results.get('id'):\n event.google_calendar_id = results['id']\n event.save()\n _logger.info(\n \"Google Calendar event for event '%s' created successfully\",\n event.name)\n else:\n _logger.error(\n 'New Google Calendar event did not have id in response, was: %s',\n results)\n\n\n@shared_task\n@log_errors\ndef delete_google_calendar_event(google_calendar_event_id):\n service = get_calendar_service()\n if not service:\n return\n result = service.events().delete(calendarId=settings.GOOGLE_CALENDAR_ID,\n eventId=google_calendar_event_id).execute()\n _logger.info('Google calendar event %s deleted: %s',\n google_calendar_event_id, result)\n\n\ndef get_google_calendar_payload_for_event(event):\n return {'summary': event.name, 'location': event.location,\n 'description': event.summary, 'start': {'dateTime': event.startdate\n .isoformat(), 'timeZone': 'Europe/Oslo'}, 'end': {'dateTime': event\n .enddate.isoformat(), 'timeZone': 'Europe/Oslo'}}\n", "step-3": "<mask token>\n_logger = getLogger(__name__)\n\n\ndef create_events_calendar():\n \"\"\" Create an events calendar if none already exists. This function mostly exists for\n creating calendars for dev environments, not used in prod.\n \"\"\"\n service = get_calendar_service()\n if not service:\n return\n calendar = {'summary': 'Ting som skjer i Telemarkgruppa', 'timeZone':\n 'Europe/Oslo'}\n cal_insert_response = service.calendars().insert(body=calendar).execute()\n public_acl = {'role': 'reader', 'scope': {'type': 'default'}}\n acl_insert_response = service.acl().insert(calendarId=\n cal_insert_response['id'], body=public_acl).execute()\n return acl_insert_response\n\n\ndef get_calendar_service():\n name = 'calendar'\n version = 'v3'\n scope = 'https://www.googleapis.com/auth/calendar'\n if not hasattr(settings, 'GOOGLE_API_PRIVATE_KEY'):\n _logger.info(\n 'Skipping Google calendar integration due to missing GOOGLE_API_PRIVATE_KEY in settings.'\n )\n return\n credentials = SignedJwtAssertionCredentials(settings.GOOGLE_API_EMAIL,\n settings.GOOGLE_API_PRIVATE_KEY, scope)\n http = credentials.authorize(http=httplib2.Http())\n service = discovery.build(name, version, http=http)\n return service\n\n\n@shared_task\n@log_errors\ndef update_google_calendar_event(event_id):\n from .models import Event\n event = Event.objects.get(pk=event_id)\n if not event.google_calendar_id:\n _logger.info('Adding missing event to google calendar: %s', event.name)\n add_google_calender_event(event.id)\n return\n service = get_calendar_service()\n if not service:\n return\n payload = get_google_calendar_payload_for_event(event)\n results = service.events().update(calendarId=settings.\n GOOGLE_CALENDAR_ID, eventId=event.google_calendar_id, body=payload\n ).execute()\n _logger.info('Google calendar event for %s updated: %s', event.name,\n results)\n\n\n@shared_task\n@log_errors\ndef add_google_calender_event(event_id):\n from .models import Event\n event = Event.objects.get(pk=event_id)\n if not event:\n _logger.warning('Could not find event to add to Google Calendar: %d',\n event_id)\n return\n google_payload = get_google_calendar_payload_for_event(event)\n service = get_calendar_service()\n if not service:\n return\n results = service.events().insert(calendarId=settings.\n GOOGLE_CALENDAR_ID, body=google_payload).execute()\n if results.get('id'):\n event.google_calendar_id = results['id']\n event.save()\n _logger.info(\n \"Google Calendar event for event '%s' created successfully\",\n event.name)\n else:\n _logger.error(\n 'New Google Calendar event did not have id in response, was: %s',\n results)\n\n\n@shared_task\n@log_errors\ndef delete_google_calendar_event(google_calendar_event_id):\n service = get_calendar_service()\n if not service:\n return\n result = service.events().delete(calendarId=settings.GOOGLE_CALENDAR_ID,\n eventId=google_calendar_event_id).execute()\n _logger.info('Google calendar event %s deleted: %s',\n google_calendar_event_id, result)\n\n\ndef get_google_calendar_payload_for_event(event):\n return {'summary': event.name, 'location': event.location,\n 'description': event.summary, 'start': {'dateTime': event.startdate\n .isoformat(), 'timeZone': 'Europe/Oslo'}, 'end': {'dateTime': event\n .enddate.isoformat(), 'timeZone': 'Europe/Oslo'}}\n", "step-4": "from ..general.utils import log_errors\nfrom googleapiclient import discovery\nfrom oauth2client.client import SignedJwtAssertionCredentials\nfrom django.conf import settings\nfrom celery import shared_task\nfrom logging import getLogger\nimport httplib2\n_logger = getLogger(__name__)\n\n\ndef create_events_calendar():\n \"\"\" Create an events calendar if none already exists. This function mostly exists for\n creating calendars for dev environments, not used in prod.\n \"\"\"\n service = get_calendar_service()\n if not service:\n return\n calendar = {'summary': 'Ting som skjer i Telemarkgruppa', 'timeZone':\n 'Europe/Oslo'}\n cal_insert_response = service.calendars().insert(body=calendar).execute()\n public_acl = {'role': 'reader', 'scope': {'type': 'default'}}\n acl_insert_response = service.acl().insert(calendarId=\n cal_insert_response['id'], body=public_acl).execute()\n return acl_insert_response\n\n\ndef get_calendar_service():\n name = 'calendar'\n version = 'v3'\n scope = 'https://www.googleapis.com/auth/calendar'\n if not hasattr(settings, 'GOOGLE_API_PRIVATE_KEY'):\n _logger.info(\n 'Skipping Google calendar integration due to missing GOOGLE_API_PRIVATE_KEY in settings.'\n )\n return\n credentials = SignedJwtAssertionCredentials(settings.GOOGLE_API_EMAIL,\n settings.GOOGLE_API_PRIVATE_KEY, scope)\n http = credentials.authorize(http=httplib2.Http())\n service = discovery.build(name, version, http=http)\n return service\n\n\n@shared_task\n@log_errors\ndef update_google_calendar_event(event_id):\n from .models import Event\n event = Event.objects.get(pk=event_id)\n if not event.google_calendar_id:\n _logger.info('Adding missing event to google calendar: %s', event.name)\n add_google_calender_event(event.id)\n return\n service = get_calendar_service()\n if not service:\n return\n payload = get_google_calendar_payload_for_event(event)\n results = service.events().update(calendarId=settings.\n GOOGLE_CALENDAR_ID, eventId=event.google_calendar_id, body=payload\n ).execute()\n _logger.info('Google calendar event for %s updated: %s', event.name,\n results)\n\n\n@shared_task\n@log_errors\ndef add_google_calender_event(event_id):\n from .models import Event\n event = Event.objects.get(pk=event_id)\n if not event:\n _logger.warning('Could not find event to add to Google Calendar: %d',\n event_id)\n return\n google_payload = get_google_calendar_payload_for_event(event)\n service = get_calendar_service()\n if not service:\n return\n results = service.events().insert(calendarId=settings.\n GOOGLE_CALENDAR_ID, body=google_payload).execute()\n if results.get('id'):\n event.google_calendar_id = results['id']\n event.save()\n _logger.info(\n \"Google Calendar event for event '%s' created successfully\",\n event.name)\n else:\n _logger.error(\n 'New Google Calendar event did not have id in response, was: %s',\n results)\n\n\n@shared_task\n@log_errors\ndef delete_google_calendar_event(google_calendar_event_id):\n service = get_calendar_service()\n if not service:\n return\n result = service.events().delete(calendarId=settings.GOOGLE_CALENDAR_ID,\n eventId=google_calendar_event_id).execute()\n _logger.info('Google calendar event %s deleted: %s',\n google_calendar_event_id, result)\n\n\ndef get_google_calendar_payload_for_event(event):\n return {'summary': event.name, 'location': event.location,\n 'description': event.summary, 'start': {'dateTime': event.startdate\n .isoformat(), 'timeZone': 'Europe/Oslo'}, 'end': {'dateTime': event\n .enddate.isoformat(), 'timeZone': 'Europe/Oslo'}}\n", "step-5": "# -*- coding: utf-8 -*-\n\nfrom ..general.utils import log_errors\n\nfrom googleapiclient import discovery\nfrom oauth2client.client import SignedJwtAssertionCredentials\nfrom django.conf import settings\nfrom celery import shared_task\nfrom logging import getLogger\nimport httplib2\n\n_logger = getLogger(__name__)\n\ndef create_events_calendar():\n \"\"\" Create an events calendar if none already exists. This function mostly exists for\n creating calendars for dev environments, not used in prod.\n \"\"\"\n service = get_calendar_service()\n if not service:\n return\n calendar = {\n 'summary': 'Ting som skjer i Telemarkgruppa',\n 'timeZone': 'Europe/Oslo',\n }\n cal_insert_response = service.calendars().insert(body=calendar).execute()\n public_acl = {\n 'role': 'reader',\n 'scope': {\n 'type': 'default'\n }\n }\n acl_insert_response = service.acl().insert(calendarId=cal_insert_response['id'], body=public_acl).execute()\n return acl_insert_response\n\n\ndef get_calendar_service():\n name = 'calendar'\n version = 'v3'\n scope = 'https://www.googleapis.com/auth/calendar'\n\n # Provide a mock fallback for test environments where real interaction with\n # Google calendar is not needed\n if not hasattr(settings, 'GOOGLE_API_PRIVATE_KEY'):\n _logger.info('Skipping Google calendar integration due to missing GOOGLE_API_PRIVATE_KEY '\n 'in settings.')\n return\n\n # Prepare credentials, and authorize HTTP object with them.\n credentials = SignedJwtAssertionCredentials(settings.GOOGLE_API_EMAIL,\n settings.GOOGLE_API_PRIVATE_KEY, scope)\n http = credentials.authorize(http=httplib2.Http())\n\n # Construct a service object via the discovery service.\n service = discovery.build(name, version, http=http)\n return service\n\n\n@shared_task\n@log_errors\ndef update_google_calendar_event(event_id):\n from .models import Event\n event = Event.objects.get(pk=event_id)\n\n # If the event doesn't already exist on google calendar, create it\n if not event.google_calendar_id:\n _logger.info('Adding missing event to google calendar: %s', event.name)\n add_google_calender_event(event.id)\n return\n\n # Authenticate and construct service.\n service = get_calendar_service()\n\n if not service:\n return\n\n payload = get_google_calendar_payload_for_event(event)\n results = service.events().update(calendarId=settings.GOOGLE_CALENDAR_ID,\n eventId=event.google_calendar_id, body=payload).execute()\n _logger.info('Google calendar event for %s updated: %s', event.name, results)\n\n\n@shared_task\n@log_errors\ndef add_google_calender_event(event_id):\n from .models import Event\n event = Event.objects.get(pk=event_id)\n\n if not event:\n _logger.warning('Could not find event to add to Google Calendar: %d', event_id)\n return\n\n google_payload = get_google_calendar_payload_for_event(event)\n service = get_calendar_service()\n if not service:\n return\n\n results = service.events().insert(calendarId=settings.GOOGLE_CALENDAR_ID,\n body=google_payload).execute()\n if results.get('id'):\n event.google_calendar_id = results['id']\n event.save()\n _logger.info(\"Google Calendar event for event '%s' created successfully\", event.name)\n else:\n _logger.error(\"New Google Calendar event did not have id in response, was: %s\", results)\n\n\n@shared_task\n@log_errors\ndef delete_google_calendar_event(google_calendar_event_id):\n service = get_calendar_service()\n if not service:\n return\n\n result = service.events().delete(calendarId=settings.GOOGLE_CALENDAR_ID,\n eventId=google_calendar_event_id).execute()\n _logger.info('Google calendar event %s deleted: %s', google_calendar_event_id, result)\n\n\ndef get_google_calendar_payload_for_event(event):\n return {\n 'summary': event.name,\n 'location': event.location,\n 'description': event.summary,\n 'start': {\n 'dateTime': event.startdate.isoformat(),\n 'timeZone': 'Europe/Oslo',\n },\n 'end': {\n 'dateTime': event.enddate.isoformat(),\n 'timeZone': 'Europe/Oslo',\n }\n }\n", "step-ids": [ 4, 6, 7, 8, 9 ] }
[ 4, 6, 7, 8, 9 ]
import math import random from PILL import Image, ImageDraw for i in range(1,1025): pass for j in range(1,1025): pass epipedo[i][j] for i in range(1,21): pass im = Image.new("RGB", (512, 512), "white") x=random.choice(1,1025) y=random.choice(1,1025) r=random.choice(10,51) draw = ImageDraw.Draw(im) draw.ellipse((x-r, y-r, x+r, y+r), fill=(255,255,0), outline ='red') for j in range(1,4):#apothikeuw ta stoixeia tou kathe kuklou(kentro kai aktina) pass if j==1: pass kukloi[i][1]=x if j==2: pass kukloi[i][2]=y if j==3: pass kukloi[i][3]=r for i in range(1,21): pass for k in range(i,20):#sugkrinw kathe kuklo me tous upoloipous xwris na epanalambanontai oi idioi elegxoi pass a=math.pow(kukloi[k+1][2]-kukloi[i][2], 2) b=math.pow(kukloi[k+1][1]-kukloi[i][1], 2) d=math.sqrt(a+b) if math.fabs(kukloi[i][3]-kykloi[k+1][3])<d and d<kukloi[i][3]+kykloi[k+1][3]: pass temkuk=0#oi temonomenoi kukloi temkuk=temkuk+1 print "temnontai",temkuk, "kukloi"# emfanizei tous temonomenous kuklous im.show()#kai tin eikona
normal
{ "blob_id": "a2d2ffe5ed6a844341f7ad731357bb837cee4787", "index": 6193, "step-1": "import math\r\nimport random\r\nfrom PILL import Image, ImageDraw\r\nfor i in range(1,1025):\r\n pass\r\n for j in range(1,1025):\r\n pass\r\n epipedo[i][j]\r\nfor i in range(1,21):\r\n pass\r\n im = Image.new(\"RGB\", (512, 512), \"white\")\r\n x=random.choice(1,1025)\r\n y=random.choice(1,1025)\r\n r=random.choice(10,51)\r\n draw = ImageDraw.Draw(im)\r\n draw.ellipse((x-r, y-r, x+r, y+r), fill=(255,255,0), outline ='red')\r\n for j in range(1,4):#apothikeuw ta stoixeia tou kathe kuklou(kentro kai aktina)\r\n pass\r\n if j==1:\r\n pass\r\n kukloi[i][1]=x\r\n if j==2:\r\n pass\r\n kukloi[i][2]=y\r\n if j==3:\r\n pass\r\n kukloi[i][3]=r\r\nfor i in range(1,21):\r\n pass\r\n for k in range(i,20):#sugkrinw kathe kuklo me tous upoloipous xwris na epanalambanontai oi idioi elegxoi\r\n pass\r\n a=math.pow(kukloi[k+1][2]-kukloi[i][2], 2)\r\n b=math.pow(kukloi[k+1][1]-kukloi[i][1], 2)\r\n d=math.sqrt(a+b)\r\n if math.fabs(kukloi[i][3]-kykloi[k+1][3])<d and d<kukloi[i][3]+kykloi[k+1][3]:\r\n pass\r\n temkuk=0#oi temonomenoi kukloi\r\n temkuk=temkuk+1\r\nprint \"temnontai\",temkuk, \"kukloi\"# emfanizei tous temonomenous kuklous\r\nim.show()#kai tin eikona\r\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
# Enunciado: faça um programa que leia um ano qualquer e mostre se ele é BISEXTO. ano = int(input('\nInforme o ano: ')) ano1 = ano % 4 ano2 = ano % 100 if ano1 == 0 and ano2 != 0: print('\nO ano de {} é Bissexto !!'.format(ano)) else: print('\nO ano de {} não foi Bissexto !!'.format(ano))
normal
{ "blob_id": "daeb11000978d14a05ea62113dcf6e30d6a98b15", "index": 3590, "step-1": "<mask token>\n", "step-2": "<mask token>\nif ano1 == 0 and ano2 != 0:\n print('\\nO ano de {} é Bissexto !!'.format(ano))\nelse:\n print('\\nO ano de {} não foi Bissexto !!'.format(ano))\n", "step-3": "ano = int(input('\\nInforme o ano: '))\nano1 = ano % 4\nano2 = ano % 100\nif ano1 == 0 and ano2 != 0:\n print('\\nO ano de {} é Bissexto !!'.format(ano))\nelse:\n print('\\nO ano de {} não foi Bissexto !!'.format(ano))\n", "step-4": "# Enunciado: faça um programa que leia um ano qualquer e mostre se ele é BISEXTO.\n\nano = int(input('\\nInforme o ano: '))\n\nano1 = ano % 4\nano2 = ano % 100\n\nif ano1 == 0 and ano2 != 0:\n print('\\nO ano de {} é Bissexto !!'.format(ano))\nelse:\n print('\\nO ano de {} não foi Bissexto !!'.format(ano))\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/usr/bin/python3 """ Test of Rectangle class """ from contextlib import redirect_stdout import io import unittest from random import randrange from models.base import Base from models.rectangle import Rectangle from models.square import Square class TestRectangle(unittest.TestCase): """ Test Rectangle methods """ def setUp(self): """ setUp """ Base._Base__nb_objects = 0 def tearDown(self): """ tearDown destroys any existing objects and processes """ pass def test_type(self): """ Test type """ r1 = Rectangle(1, 2) self.assertTrue(type(r1) is Rectangle) def test_inheritance(self): """Tests if Rectangle inherits Base.""" self.assertTrue(issubclass(Rectangle, Base)) def test_constructor_no_args(self): """Tests constructor signature.""" with self.assertRaises(TypeError) as e: r = Rectangle() s = "__init__() missing 2 required positional arguments: 'width' \ and 'height'" self.assertEqual(str(e.exception), s) def test_constructor_many_args(self): """Tests constructor signature.""" with self.assertRaises(TypeError) as e: r = Rectangle(1, 2, 3, 4, 5, 6) s = "__init__() takes from 3 to 6 positional arguments but 7 were \ given" self.assertEqual(str(e.exception), s) def test_constructor_one_args(self): """Tests constructor signature.""" with self.assertRaises(TypeError) as e: r = Rectangle(1) s = "__init__() missing 1 required positional argument: 'height'" self.assertEqual(str(e.exception), s) def test_instantiation(self): """Tests instantiation.""" r = Rectangle(10, 20) self.assertEqual(str(type(r)), "<class 'models.rectangle.Rectangle'>") self.assertTrue(isinstance(r, Base)) d = {'_Rectangle__height': 20, '_Rectangle__width': 10, '_Rectangle__x': 0, '_Rectangle__y': 0, 'id': 1} self.assertDictEqual(r.__dict__, d) with self.assertRaises(TypeError) as e: r = Rectangle("1", 2) msg = "width must be an integer" self.assertEqual(str(e.exception), msg) with self.assertRaises(TypeError) as e: r = Rectangle(1, "2") msg = "height must be an integer" self.assertEqual(str(e.exception), msg) with self.assertRaises(TypeError) as e: r = Rectangle(1, 2, "3") msg = "x must be an integer" self.assertEqual(str(e.exception), msg) with self.assertRaises(TypeError) as e: r = Rectangle(1, 2, 3, "4") msg = "y must be an integer" self.assertEqual(str(e.exception), msg) with self.assertRaises(ValueError) as e: r = Rectangle(-1, 2) msg = "width must be > 0" self.assertEqual(str(e.exception), msg) with self.assertRaises(ValueError) as e: r = Rectangle(1, -2) msg = "height must be > 0" self.assertEqual(str(e.exception), msg) with self.assertRaises(ValueError) as e: r = Rectangle(0, 2) msg = "width must be > 0" self.assertEqual(str(e.exception), msg) with self.assertRaises(ValueError) as e: r = Rectangle(1, 0) msg = "height must be > 0" self.assertEqual(str(e.exception), msg) with self.assertRaises(ValueError) as e: r = Rectangle(1, 2, -3) msg = "x must be >= 0" self.assertEqual(str(e.exception), msg) with self.assertRaises(ValueError) as e: r = Rectangle(1, 2, 3, -4) msg = "y must be >= 0" self.assertEqual(str(e.exception), msg) def test_id_inherited(self): """Tests if id is inherited from Base.""" Base._Base__nb_objects = 98 r = Rectangle(2, 4) self.assertEqual(r.id, 99) # -- # def test_validate_type(self): """Tests property validation.""" r = Rectangle(1, 2) attributes = ["x", "y", "width", "height"] t = (3.14, -1.1, float('inf'), float('-inf'), True, "str", (2,), [4], {5}, {6: 7}, None) for attribute in attributes: s = "{} must be an integer".format(attribute) for invalid_type in t: with self.assertRaises(TypeError) as e: setattr(r, attribute, invalid_type) self.assertEqual(str(e.exception), s) def test_validate_value_negative_gt(self): """Tests property validation.""" r = Rectangle(1, 2) attributes = ["width", "height"] for attribute in attributes: s = "{} must be > 0".format(attribute) with self.assertRaises(ValueError) as e: setattr(r, attribute, -(randrange(10) + 1)) self.assertEqual(str(e.exception), s) def test_validate_value_negative_ge(self): """Tests property validation.""" r = Rectangle(1, 2) attributes = ["x", "y"] for attribute in attributes: s = "{} must be >= 0".format(attribute) with self.assertRaises(ValueError) as e: setattr(r, attribute, -(randrange(10) + 1)) self.assertEqual(str(e.exception), s) def test_validate_value_zero(self): """Tests property validation.""" r = Rectangle(1, 2) attributes = ["width", "height"] for attribute in attributes: s = "{} must be > 0".format(attribute) with self.assertRaises(ValueError) as e: setattr(r, attribute, 0) self.assertEqual(str(e.exception), s) def test_property(self): """Tests property setting/getting.""" r = Rectangle(1, 2) attributes = ["x", "y", "width", "height"] for attribute in attributes: n = randrange(10) + 1 setattr(r, attribute, n) self.assertEqual(getattr(r, attribute), n) def test_property_range_zero(self): """Tests property setting/getting.""" r = Rectangle(1, 2) r.x = 0 r.y = 0 self.assertEqual(r.x, 0) self.assertEqual(r.y, 0) def test_area_no_args(self): """Tests area() method signature.""" r = Rectangle(5, 6) with self.assertRaises(TypeError) as e: Rectangle.area() s = "area() missing 1 required positional argument: 'self'" self.assertEqual(str(e.exception), s) def test_area(self): """Tests area() method compuation.""" r = Rectangle(5, 6) self.assertEqual(r.area(), 30) w = randrange(10) + 1 h = randrange(10) + 1 r.width = w r.height = h self.assertEqual(r.area(), w * h) w = randrange(10) + 1 h = randrange(10) + 1 r = Rectangle(w, h, 7, 8, 9) self.assertEqual(r.area(), w * h) w = randrange(10) + 1 h = randrange(10) + 1 r = Rectangle(w, h, y=7, x=8, id=9) self.assertEqual(r.area(), w * h) def test_display_no_args(self): """Tests display() method signature.""" r = Rectangle(9, 8) with self.assertRaises(TypeError) as e: Rectangle.display() s = "display() missing 1 required positional argument: 'self'" self.assertEqual(str(e.exception), s) def test_display_simple(self): """Tests display() method output.""" r = Rectangle(1, 1) f = io.StringIO() with redirect_stdout(f): r.display() s = "#\n" self.assertEqual(f.getvalue(), s) r.width = 2 r.height = 2 f = io.StringIO() with redirect_stdout(f): r.display() s = "##\n##\n" self.assertEqual(f.getvalue(), s) r = Rectangle(2, 2, 2, 2) f = io.StringIO() with redirect_stdout(f): r.display() s = "\n\n ##\n ##\n" self.assertEqual(f.getvalue(), s) def test_K_str_no_args(self): """Tests __str__() method signature.""" r = Rectangle(5, 2) with self.assertRaises(TypeError) as e: Rectangle.__str__() s = "__str__() missing 1 required positional argument: 'self'" self.assertEqual(str(e.exception), s) def test_K_str(self): """Tests __str__() method return.""" r = Rectangle(5, 2) s = '[Rectangle] (1) 0/0 - 5/2' self.assertEqual(str(r), s) r = Rectangle(1, 1, 1) s = '[Rectangle] (2) 1/0 - 1/1' self.assertEqual(str(r), s) r = Rectangle(3, 4, 5, 6) s = '[Rectangle] (3) 5/6 - 3/4' self.assertEqual(str(r), s) Base._Base__nb_objects = 0 r1 = Rectangle(4, 6, 2, 1, 12) self.assertEqual(str(r1), "[Rectangle] (12) 2/1 - 4/6") r2 = Rectangle(5, 5, 1) self.assertEqual(str(r2), "[Rectangle] (1) 1/0 - 5/5") def test_update_no_args(self): """Tests update() method """ r = Rectangle(5, 2) with self.assertRaises(TypeError) as e: Rectangle.update() s = "update() missing 1 required positional argument: 'self'" self.assertEqual(str(e.exception), s) d = r.__dict__.copy() r.update() self.assertEqual(r.__dict__, d) def test_update_args(self): """Tests update() postional args.""" r = Rectangle(5, 2) d = r.__dict__.copy() r.update(10) d["id"] = 10 self.assertEqual(r.__dict__, d) r.update(10, 5) d["_Rectangle__width"] = 5 self.assertEqual(r.__dict__, d) r.update(10, 5, 17) d["_Rectangle__height"] = 17 self.assertEqual(r.__dict__, d) r.update(10, 5, 17, 20) d["_Rectangle__x"] = 20 self.assertEqual(r.__dict__, d) r.update(10, 5, 17, 20, 25) d["_Rectangle__y"] = 25 self.assertEqual(r.__dict__, d) def test_update_args_bad(self): """Tests update() positional arg bad values.""" r = Rectangle(5, 2) d = r.__dict__.copy() r.update(10) d["id"] = 10 self.assertEqual(r.__dict__, d) with self.assertRaises(ValueError) as e: r.update(10, -5) s = "width must be > 0" self.assertEqual(str(e.exception), s) with self.assertRaises(ValueError) as e: r.update(10, 5, -17) s = "height must be > 0" self.assertEqual(str(e.exception), s) with self.assertRaises(ValueError) as e: r.update(10, 5, 17, -20) s = "x must be >= 0" self.assertEqual(str(e.exception), s) with self.assertRaises(ValueError) as e: r.update(10, 5, 17, 20, -25) s = "y must be >= 0" self.assertEqual(str(e.exception), s) def test_update_kwargs(self): """Tests update() keyword args.""" r = Rectangle(5, 2) d = r.__dict__.copy() r.update(id=10) d["id"] = 10 self.assertEqual(r.__dict__, d) r.update(width=5) d["_Rectangle__width"] = 5 self.assertEqual(r.__dict__, d) r.update(height=17) d["_Rectangle__height"] = 17 self.assertEqual(r.__dict__, d) r.update(x=20) d["_Rectangle__x"] = 20 self.assertEqual(r.__dict__, d) r.update(y=25) d["_Rectangle__y"] = 25 self.assertEqual(r.__dict__, d) def test_update_kwargs_2(self): """Tests update() keyword args.""" r = Rectangle(5, 2) d = r.__dict__.copy() r.update(id=10) d["id"] = 10 self.assertEqual(r.__dict__, d) r.update(id=10, width=5) d["_Rectangle__width"] = 5 self.assertEqual(r.__dict__, d) r.update(id=10, width=5, height=17) d["_Rectangle__height"] = 17 self.assertEqual(r.__dict__, d) r.update(id=10, width=5, height=17, x=20) d["_Rectangle__x"] = 20 self.assertEqual(r.__dict__, d) r.update(id=10, width=5, height=17, x=20, y=25) d["_Rectangle__y"] = 25 self.assertEqual(r.__dict__, d) r.update(y=25, id=10, height=17, x=20, width=5) self.assertEqual(r.__dict__, d) Base._Base__nb_objects = 0 r1 = Rectangle(10, 10, 10, 10) self.assertEqual(str(r1), "[Rectangle] (1) 10/10 - 10/10") r1.update(height=1) self.assertEqual(str(r1), "[Rectangle] (1) 10/10 - 10/1") r1.update(width=1, x=2) self.assertEqual(str(r1), "[Rectangle] (1) 2/10 - 1/1") r1.update(y=1, width=2, x=3, id=89) self.assertEqual(str(r1), "[Rectangle] (89) 3/1 - 2/1") r1.update(x=1, height=2, y=3, width=4) self.assertEqual(str(r1), "[Rectangle] (89) 1/3 - 4/2") Base._Base__nb_objects = 0 r1 = Rectangle(10, 10, 10, 10) self.assertEqual(str(r1), "[Rectangle] (1) 10/10 - 10/10") r1.update(89) self.assertEqual(str(r1), "[Rectangle] (89) 10/10 - 10/10") r1.update(89, 2) self.assertEqual(str(r1), "[Rectangle] (89) 10/10 - 2/10") r1.update(89, 2, 3) self.assertEqual(str(r1), "[Rectangle] (89) 10/10 - 2/3") r1.update(89, 2, 3, 4) self.assertEqual(str(r1), "[Rectangle] (89) 4/10 - 2/3") r1.update(89, 2, 3, 4, 5) self.assertEqual(str(r1), "[Rectangle] (89) 4/5 - 2/3") def test_to_dictionary(self): """Tests to_dictionary() """ with self.assertRaises(TypeError) as e: Rectangle.to_dictionary() s = "to_dictionary() missing 1 required positional argument: 'self'" self.assertEqual(str(e.exception), s) r = Rectangle(1, 2) d = {'x': 0, 'y': 0, 'width': 1, 'id': 1, 'height': 2} self.assertEqual(r.to_dictionary(), d) r = Rectangle(1, 2, 3, 4, 5) d = {'x': 3, 'y': 4, 'width': 1, 'id': 5, 'height': 2} self.assertEqual(r.to_dictionary(), d) r.x = 10 r.y = 20 r.width = 30 r.height = 40 d = {'x': 10, 'y': 20, 'width': 30, 'id': 5, 'height': 40} self.assertEqual(r.to_dictionary(), d) r1 = Rectangle(10, 2, 1, 9) r1_dictionary = r1.to_dictionary() r2 = Rectangle(1, 1) r2.update(**r1_dictionary) self.assertEqual(str(r1), str(r2)) self.assertNotEqual(r1, r2)
normal
{ "blob_id": "ca00091b7ebcb9ee45b77c919c458c75e3db5b1e", "index": 4783, "step-1": "<mask token>\n\n\nclass TestRectangle(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n \"\"\" setUp \"\"\"\n Base._Base__nb_objects = 0\n\n def tearDown(self):\n \"\"\" tearDown destroys any existing objects and processes \"\"\"\n pass\n\n def test_type(self):\n \"\"\" Test type \"\"\"\n r1 = Rectangle(1, 2)\n self.assertTrue(type(r1) is Rectangle)\n\n def test_inheritance(self):\n \"\"\"Tests if Rectangle inherits Base.\"\"\"\n self.assertTrue(issubclass(Rectangle, Base))\n <mask token>\n <mask token>\n\n def test_constructor_one_args(self):\n \"\"\"Tests constructor signature.\"\"\"\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1)\n s = \"__init__() missing 1 required positional argument: 'height'\"\n self.assertEqual(str(e.exception), s)\n\n def test_instantiation(self):\n \"\"\"Tests instantiation.\"\"\"\n r = Rectangle(10, 20)\n self.assertEqual(str(type(r)), \"<class 'models.rectangle.Rectangle'>\")\n self.assertTrue(isinstance(r, Base))\n d = {'_Rectangle__height': 20, '_Rectangle__width': 10,\n '_Rectangle__x': 0, '_Rectangle__y': 0, 'id': 1}\n self.assertDictEqual(r.__dict__, d)\n with self.assertRaises(TypeError) as e:\n r = Rectangle('1', 2)\n msg = 'width must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, '2')\n msg = 'height must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, 2, '3')\n msg = 'x must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, 2, 3, '4')\n msg = 'y must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(-1, 2)\n msg = 'width must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, -2)\n msg = 'height must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(0, 2)\n msg = 'width must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 0)\n msg = 'height must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 2, -3)\n msg = 'x must be >= 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 2, 3, -4)\n msg = 'y must be >= 0'\n self.assertEqual(str(e.exception), msg)\n\n def test_id_inherited(self):\n \"\"\"Tests if id is inherited from Base.\"\"\"\n Base._Base__nb_objects = 98\n r = Rectangle(2, 4)\n self.assertEqual(r.id, 99)\n\n def test_validate_type(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['x', 'y', 'width', 'height']\n t = 3.14, -1.1, float('inf'), float('-inf'), True, 'str', (2,), [4], {5\n }, {(6): 7}, None\n for attribute in attributes:\n s = '{} must be an integer'.format(attribute)\n for invalid_type in t:\n with self.assertRaises(TypeError) as e:\n setattr(r, attribute, invalid_type)\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_negative_gt(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['width', 'height']\n for attribute in attributes:\n s = '{} must be > 0'.format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, -(randrange(10) + 1))\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_negative_ge(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['x', 'y']\n for attribute in attributes:\n s = '{} must be >= 0'.format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, -(randrange(10) + 1))\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_zero(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['width', 'height']\n for attribute in attributes:\n s = '{} must be > 0'.format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, 0)\n self.assertEqual(str(e.exception), s)\n\n def test_property(self):\n \"\"\"Tests property setting/getting.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['x', 'y', 'width', 'height']\n for attribute in attributes:\n n = randrange(10) + 1\n setattr(r, attribute, n)\n self.assertEqual(getattr(r, attribute), n)\n\n def test_property_range_zero(self):\n \"\"\"Tests property setting/getting.\"\"\"\n r = Rectangle(1, 2)\n r.x = 0\n r.y = 0\n self.assertEqual(r.x, 0)\n self.assertEqual(r.y, 0)\n\n def test_area_no_args(self):\n \"\"\"Tests area() method signature.\"\"\"\n r = Rectangle(5, 6)\n with self.assertRaises(TypeError) as e:\n Rectangle.area()\n s = \"area() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n <mask token>\n\n def test_display_no_args(self):\n \"\"\"Tests display() method signature.\"\"\"\n r = Rectangle(9, 8)\n with self.assertRaises(TypeError) as e:\n Rectangle.display()\n s = \"display() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n def test_display_simple(self):\n \"\"\"Tests display() method output.\"\"\"\n r = Rectangle(1, 1)\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = '#\\n'\n self.assertEqual(f.getvalue(), s)\n r.width = 2\n r.height = 2\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = '##\\n##\\n'\n self.assertEqual(f.getvalue(), s)\n r = Rectangle(2, 2, 2, 2)\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = '\\n\\n ##\\n ##\\n'\n self.assertEqual(f.getvalue(), s)\n <mask token>\n\n def test_K_str(self):\n \"\"\"Tests __str__() method return.\"\"\"\n r = Rectangle(5, 2)\n s = '[Rectangle] (1) 0/0 - 5/2'\n self.assertEqual(str(r), s)\n r = Rectangle(1, 1, 1)\n s = '[Rectangle] (2) 1/0 - 1/1'\n self.assertEqual(str(r), s)\n r = Rectangle(3, 4, 5, 6)\n s = '[Rectangle] (3) 5/6 - 3/4'\n self.assertEqual(str(r), s)\n Base._Base__nb_objects = 0\n r1 = Rectangle(4, 6, 2, 1, 12)\n self.assertEqual(str(r1), '[Rectangle] (12) 2/1 - 4/6')\n r2 = Rectangle(5, 5, 1)\n self.assertEqual(str(r2), '[Rectangle] (1) 1/0 - 5/5')\n\n def test_update_no_args(self):\n \"\"\"Tests update() method \"\"\"\n r = Rectangle(5, 2)\n with self.assertRaises(TypeError) as e:\n Rectangle.update()\n s = \"update() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n d = r.__dict__.copy()\n r.update()\n self.assertEqual(r.__dict__, d)\n\n def test_update_args(self):\n \"\"\"Tests update() postional args.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n r.update(10)\n d['id'] = 10\n self.assertEqual(r.__dict__, d)\n r.update(10, 5)\n d['_Rectangle__width'] = 5\n self.assertEqual(r.__dict__, d)\n r.update(10, 5, 17)\n d['_Rectangle__height'] = 17\n self.assertEqual(r.__dict__, d)\n r.update(10, 5, 17, 20)\n d['_Rectangle__x'] = 20\n self.assertEqual(r.__dict__, d)\n r.update(10, 5, 17, 20, 25)\n d['_Rectangle__y'] = 25\n self.assertEqual(r.__dict__, d)\n\n def test_update_args_bad(self):\n \"\"\"Tests update() positional arg bad values.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n r.update(10)\n d['id'] = 10\n self.assertEqual(r.__dict__, d)\n with self.assertRaises(ValueError) as e:\n r.update(10, -5)\n s = 'width must be > 0'\n self.assertEqual(str(e.exception), s)\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, -17)\n s = 'height must be > 0'\n self.assertEqual(str(e.exception), s)\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, 17, -20)\n s = 'x must be >= 0'\n self.assertEqual(str(e.exception), s)\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, 17, 20, -25)\n s = 'y must be >= 0'\n self.assertEqual(str(e.exception), s)\n\n def test_update_kwargs(self):\n \"\"\"Tests update() keyword args.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n r.update(id=10)\n d['id'] = 10\n self.assertEqual(r.__dict__, d)\n r.update(width=5)\n d['_Rectangle__width'] = 5\n self.assertEqual(r.__dict__, d)\n r.update(height=17)\n d['_Rectangle__height'] = 17\n self.assertEqual(r.__dict__, d)\n r.update(x=20)\n d['_Rectangle__x'] = 20\n self.assertEqual(r.__dict__, d)\n r.update(y=25)\n d['_Rectangle__y'] = 25\n self.assertEqual(r.__dict__, d)\n <mask token>\n\n def test_to_dictionary(self):\n \"\"\"Tests to_dictionary() \"\"\"\n with self.assertRaises(TypeError) as e:\n Rectangle.to_dictionary()\n s = \"to_dictionary() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n r = Rectangle(1, 2)\n d = {'x': 0, 'y': 0, 'width': 1, 'id': 1, 'height': 2}\n self.assertEqual(r.to_dictionary(), d)\n r = Rectangle(1, 2, 3, 4, 5)\n d = {'x': 3, 'y': 4, 'width': 1, 'id': 5, 'height': 2}\n self.assertEqual(r.to_dictionary(), d)\n r.x = 10\n r.y = 20\n r.width = 30\n r.height = 40\n d = {'x': 10, 'y': 20, 'width': 30, 'id': 5, 'height': 40}\n self.assertEqual(r.to_dictionary(), d)\n r1 = Rectangle(10, 2, 1, 9)\n r1_dictionary = r1.to_dictionary()\n r2 = Rectangle(1, 1)\n r2.update(**r1_dictionary)\n self.assertEqual(str(r1), str(r2))\n self.assertNotEqual(r1, r2)\n", "step-2": "<mask token>\n\n\nclass TestRectangle(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n \"\"\" setUp \"\"\"\n Base._Base__nb_objects = 0\n\n def tearDown(self):\n \"\"\" tearDown destroys any existing objects and processes \"\"\"\n pass\n\n def test_type(self):\n \"\"\" Test type \"\"\"\n r1 = Rectangle(1, 2)\n self.assertTrue(type(r1) is Rectangle)\n\n def test_inheritance(self):\n \"\"\"Tests if Rectangle inherits Base.\"\"\"\n self.assertTrue(issubclass(Rectangle, Base))\n <mask token>\n <mask token>\n\n def test_constructor_one_args(self):\n \"\"\"Tests constructor signature.\"\"\"\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1)\n s = \"__init__() missing 1 required positional argument: 'height'\"\n self.assertEqual(str(e.exception), s)\n\n def test_instantiation(self):\n \"\"\"Tests instantiation.\"\"\"\n r = Rectangle(10, 20)\n self.assertEqual(str(type(r)), \"<class 'models.rectangle.Rectangle'>\")\n self.assertTrue(isinstance(r, Base))\n d = {'_Rectangle__height': 20, '_Rectangle__width': 10,\n '_Rectangle__x': 0, '_Rectangle__y': 0, 'id': 1}\n self.assertDictEqual(r.__dict__, d)\n with self.assertRaises(TypeError) as e:\n r = Rectangle('1', 2)\n msg = 'width must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, '2')\n msg = 'height must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, 2, '3')\n msg = 'x must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, 2, 3, '4')\n msg = 'y must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(-1, 2)\n msg = 'width must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, -2)\n msg = 'height must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(0, 2)\n msg = 'width must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 0)\n msg = 'height must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 2, -3)\n msg = 'x must be >= 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 2, 3, -4)\n msg = 'y must be >= 0'\n self.assertEqual(str(e.exception), msg)\n\n def test_id_inherited(self):\n \"\"\"Tests if id is inherited from Base.\"\"\"\n Base._Base__nb_objects = 98\n r = Rectangle(2, 4)\n self.assertEqual(r.id, 99)\n\n def test_validate_type(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['x', 'y', 'width', 'height']\n t = 3.14, -1.1, float('inf'), float('-inf'), True, 'str', (2,), [4], {5\n }, {(6): 7}, None\n for attribute in attributes:\n s = '{} must be an integer'.format(attribute)\n for invalid_type in t:\n with self.assertRaises(TypeError) as e:\n setattr(r, attribute, invalid_type)\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_negative_gt(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['width', 'height']\n for attribute in attributes:\n s = '{} must be > 0'.format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, -(randrange(10) + 1))\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_negative_ge(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['x', 'y']\n for attribute in attributes:\n s = '{} must be >= 0'.format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, -(randrange(10) + 1))\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_zero(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['width', 'height']\n for attribute in attributes:\n s = '{} must be > 0'.format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, 0)\n self.assertEqual(str(e.exception), s)\n\n def test_property(self):\n \"\"\"Tests property setting/getting.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['x', 'y', 'width', 'height']\n for attribute in attributes:\n n = randrange(10) + 1\n setattr(r, attribute, n)\n self.assertEqual(getattr(r, attribute), n)\n\n def test_property_range_zero(self):\n \"\"\"Tests property setting/getting.\"\"\"\n r = Rectangle(1, 2)\n r.x = 0\n r.y = 0\n self.assertEqual(r.x, 0)\n self.assertEqual(r.y, 0)\n\n def test_area_no_args(self):\n \"\"\"Tests area() method signature.\"\"\"\n r = Rectangle(5, 6)\n with self.assertRaises(TypeError) as e:\n Rectangle.area()\n s = \"area() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n def test_area(self):\n \"\"\"Tests area() method compuation.\"\"\"\n r = Rectangle(5, 6)\n self.assertEqual(r.area(), 30)\n w = randrange(10) + 1\n h = randrange(10) + 1\n r.width = w\n r.height = h\n self.assertEqual(r.area(), w * h)\n w = randrange(10) + 1\n h = randrange(10) + 1\n r = Rectangle(w, h, 7, 8, 9)\n self.assertEqual(r.area(), w * h)\n w = randrange(10) + 1\n h = randrange(10) + 1\n r = Rectangle(w, h, y=7, x=8, id=9)\n self.assertEqual(r.area(), w * h)\n\n def test_display_no_args(self):\n \"\"\"Tests display() method signature.\"\"\"\n r = Rectangle(9, 8)\n with self.assertRaises(TypeError) as e:\n Rectangle.display()\n s = \"display() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n def test_display_simple(self):\n \"\"\"Tests display() method output.\"\"\"\n r = Rectangle(1, 1)\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = '#\\n'\n self.assertEqual(f.getvalue(), s)\n r.width = 2\n r.height = 2\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = '##\\n##\\n'\n self.assertEqual(f.getvalue(), s)\n r = Rectangle(2, 2, 2, 2)\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = '\\n\\n ##\\n ##\\n'\n self.assertEqual(f.getvalue(), s)\n\n def test_K_str_no_args(self):\n \"\"\"Tests __str__() method signature.\"\"\"\n r = Rectangle(5, 2)\n with self.assertRaises(TypeError) as e:\n Rectangle.__str__()\n s = \"__str__() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n def test_K_str(self):\n \"\"\"Tests __str__() method return.\"\"\"\n r = Rectangle(5, 2)\n s = '[Rectangle] (1) 0/0 - 5/2'\n self.assertEqual(str(r), s)\n r = Rectangle(1, 1, 1)\n s = '[Rectangle] (2) 1/0 - 1/1'\n self.assertEqual(str(r), s)\n r = Rectangle(3, 4, 5, 6)\n s = '[Rectangle] (3) 5/6 - 3/4'\n self.assertEqual(str(r), s)\n Base._Base__nb_objects = 0\n r1 = Rectangle(4, 6, 2, 1, 12)\n self.assertEqual(str(r1), '[Rectangle] (12) 2/1 - 4/6')\n r2 = Rectangle(5, 5, 1)\n self.assertEqual(str(r2), '[Rectangle] (1) 1/0 - 5/5')\n\n def test_update_no_args(self):\n \"\"\"Tests update() method \"\"\"\n r = Rectangle(5, 2)\n with self.assertRaises(TypeError) as e:\n Rectangle.update()\n s = \"update() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n d = r.__dict__.copy()\n r.update()\n self.assertEqual(r.__dict__, d)\n\n def test_update_args(self):\n \"\"\"Tests update() postional args.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n r.update(10)\n d['id'] = 10\n self.assertEqual(r.__dict__, d)\n r.update(10, 5)\n d['_Rectangle__width'] = 5\n self.assertEqual(r.__dict__, d)\n r.update(10, 5, 17)\n d['_Rectangle__height'] = 17\n self.assertEqual(r.__dict__, d)\n r.update(10, 5, 17, 20)\n d['_Rectangle__x'] = 20\n self.assertEqual(r.__dict__, d)\n r.update(10, 5, 17, 20, 25)\n d['_Rectangle__y'] = 25\n self.assertEqual(r.__dict__, d)\n\n def test_update_args_bad(self):\n \"\"\"Tests update() positional arg bad values.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n r.update(10)\n d['id'] = 10\n self.assertEqual(r.__dict__, d)\n with self.assertRaises(ValueError) as e:\n r.update(10, -5)\n s = 'width must be > 0'\n self.assertEqual(str(e.exception), s)\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, -17)\n s = 'height must be > 0'\n self.assertEqual(str(e.exception), s)\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, 17, -20)\n s = 'x must be >= 0'\n self.assertEqual(str(e.exception), s)\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, 17, 20, -25)\n s = 'y must be >= 0'\n self.assertEqual(str(e.exception), s)\n\n def test_update_kwargs(self):\n \"\"\"Tests update() keyword args.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n r.update(id=10)\n d['id'] = 10\n self.assertEqual(r.__dict__, d)\n r.update(width=5)\n d['_Rectangle__width'] = 5\n self.assertEqual(r.__dict__, d)\n r.update(height=17)\n d['_Rectangle__height'] = 17\n self.assertEqual(r.__dict__, d)\n r.update(x=20)\n d['_Rectangle__x'] = 20\n self.assertEqual(r.__dict__, d)\n r.update(y=25)\n d['_Rectangle__y'] = 25\n self.assertEqual(r.__dict__, d)\n <mask token>\n\n def test_to_dictionary(self):\n \"\"\"Tests to_dictionary() \"\"\"\n with self.assertRaises(TypeError) as e:\n Rectangle.to_dictionary()\n s = \"to_dictionary() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n r = Rectangle(1, 2)\n d = {'x': 0, 'y': 0, 'width': 1, 'id': 1, 'height': 2}\n self.assertEqual(r.to_dictionary(), d)\n r = Rectangle(1, 2, 3, 4, 5)\n d = {'x': 3, 'y': 4, 'width': 1, 'id': 5, 'height': 2}\n self.assertEqual(r.to_dictionary(), d)\n r.x = 10\n r.y = 20\n r.width = 30\n r.height = 40\n d = {'x': 10, 'y': 20, 'width': 30, 'id': 5, 'height': 40}\n self.assertEqual(r.to_dictionary(), d)\n r1 = Rectangle(10, 2, 1, 9)\n r1_dictionary = r1.to_dictionary()\n r2 = Rectangle(1, 1)\n r2.update(**r1_dictionary)\n self.assertEqual(str(r1), str(r2))\n self.assertNotEqual(r1, r2)\n", "step-3": "<mask token>\n\n\nclass TestRectangle(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n \"\"\" setUp \"\"\"\n Base._Base__nb_objects = 0\n\n def tearDown(self):\n \"\"\" tearDown destroys any existing objects and processes \"\"\"\n pass\n\n def test_type(self):\n \"\"\" Test type \"\"\"\n r1 = Rectangle(1, 2)\n self.assertTrue(type(r1) is Rectangle)\n\n def test_inheritance(self):\n \"\"\"Tests if Rectangle inherits Base.\"\"\"\n self.assertTrue(issubclass(Rectangle, Base))\n <mask token>\n <mask token>\n\n def test_constructor_one_args(self):\n \"\"\"Tests constructor signature.\"\"\"\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1)\n s = \"__init__() missing 1 required positional argument: 'height'\"\n self.assertEqual(str(e.exception), s)\n\n def test_instantiation(self):\n \"\"\"Tests instantiation.\"\"\"\n r = Rectangle(10, 20)\n self.assertEqual(str(type(r)), \"<class 'models.rectangle.Rectangle'>\")\n self.assertTrue(isinstance(r, Base))\n d = {'_Rectangle__height': 20, '_Rectangle__width': 10,\n '_Rectangle__x': 0, '_Rectangle__y': 0, 'id': 1}\n self.assertDictEqual(r.__dict__, d)\n with self.assertRaises(TypeError) as e:\n r = Rectangle('1', 2)\n msg = 'width must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, '2')\n msg = 'height must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, 2, '3')\n msg = 'x must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, 2, 3, '4')\n msg = 'y must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(-1, 2)\n msg = 'width must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, -2)\n msg = 'height must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(0, 2)\n msg = 'width must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 0)\n msg = 'height must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 2, -3)\n msg = 'x must be >= 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 2, 3, -4)\n msg = 'y must be >= 0'\n self.assertEqual(str(e.exception), msg)\n\n def test_id_inherited(self):\n \"\"\"Tests if id is inherited from Base.\"\"\"\n Base._Base__nb_objects = 98\n r = Rectangle(2, 4)\n self.assertEqual(r.id, 99)\n\n def test_validate_type(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['x', 'y', 'width', 'height']\n t = 3.14, -1.1, float('inf'), float('-inf'), True, 'str', (2,), [4], {5\n }, {(6): 7}, None\n for attribute in attributes:\n s = '{} must be an integer'.format(attribute)\n for invalid_type in t:\n with self.assertRaises(TypeError) as e:\n setattr(r, attribute, invalid_type)\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_negative_gt(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['width', 'height']\n for attribute in attributes:\n s = '{} must be > 0'.format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, -(randrange(10) + 1))\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_negative_ge(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['x', 'y']\n for attribute in attributes:\n s = '{} must be >= 0'.format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, -(randrange(10) + 1))\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_zero(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['width', 'height']\n for attribute in attributes:\n s = '{} must be > 0'.format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, 0)\n self.assertEqual(str(e.exception), s)\n\n def test_property(self):\n \"\"\"Tests property setting/getting.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['x', 'y', 'width', 'height']\n for attribute in attributes:\n n = randrange(10) + 1\n setattr(r, attribute, n)\n self.assertEqual(getattr(r, attribute), n)\n\n def test_property_range_zero(self):\n \"\"\"Tests property setting/getting.\"\"\"\n r = Rectangle(1, 2)\n r.x = 0\n r.y = 0\n self.assertEqual(r.x, 0)\n self.assertEqual(r.y, 0)\n\n def test_area_no_args(self):\n \"\"\"Tests area() method signature.\"\"\"\n r = Rectangle(5, 6)\n with self.assertRaises(TypeError) as e:\n Rectangle.area()\n s = \"area() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n def test_area(self):\n \"\"\"Tests area() method compuation.\"\"\"\n r = Rectangle(5, 6)\n self.assertEqual(r.area(), 30)\n w = randrange(10) + 1\n h = randrange(10) + 1\n r.width = w\n r.height = h\n self.assertEqual(r.area(), w * h)\n w = randrange(10) + 1\n h = randrange(10) + 1\n r = Rectangle(w, h, 7, 8, 9)\n self.assertEqual(r.area(), w * h)\n w = randrange(10) + 1\n h = randrange(10) + 1\n r = Rectangle(w, h, y=7, x=8, id=9)\n self.assertEqual(r.area(), w * h)\n\n def test_display_no_args(self):\n \"\"\"Tests display() method signature.\"\"\"\n r = Rectangle(9, 8)\n with self.assertRaises(TypeError) as e:\n Rectangle.display()\n s = \"display() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n def test_display_simple(self):\n \"\"\"Tests display() method output.\"\"\"\n r = Rectangle(1, 1)\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = '#\\n'\n self.assertEqual(f.getvalue(), s)\n r.width = 2\n r.height = 2\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = '##\\n##\\n'\n self.assertEqual(f.getvalue(), s)\n r = Rectangle(2, 2, 2, 2)\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = '\\n\\n ##\\n ##\\n'\n self.assertEqual(f.getvalue(), s)\n\n def test_K_str_no_args(self):\n \"\"\"Tests __str__() method signature.\"\"\"\n r = Rectangle(5, 2)\n with self.assertRaises(TypeError) as e:\n Rectangle.__str__()\n s = \"__str__() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n def test_K_str(self):\n \"\"\"Tests __str__() method return.\"\"\"\n r = Rectangle(5, 2)\n s = '[Rectangle] (1) 0/0 - 5/2'\n self.assertEqual(str(r), s)\n r = Rectangle(1, 1, 1)\n s = '[Rectangle] (2) 1/0 - 1/1'\n self.assertEqual(str(r), s)\n r = Rectangle(3, 4, 5, 6)\n s = '[Rectangle] (3) 5/6 - 3/4'\n self.assertEqual(str(r), s)\n Base._Base__nb_objects = 0\n r1 = Rectangle(4, 6, 2, 1, 12)\n self.assertEqual(str(r1), '[Rectangle] (12) 2/1 - 4/6')\n r2 = Rectangle(5, 5, 1)\n self.assertEqual(str(r2), '[Rectangle] (1) 1/0 - 5/5')\n\n def test_update_no_args(self):\n \"\"\"Tests update() method \"\"\"\n r = Rectangle(5, 2)\n with self.assertRaises(TypeError) as e:\n Rectangle.update()\n s = \"update() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n d = r.__dict__.copy()\n r.update()\n self.assertEqual(r.__dict__, d)\n\n def test_update_args(self):\n \"\"\"Tests update() postional args.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n r.update(10)\n d['id'] = 10\n self.assertEqual(r.__dict__, d)\n r.update(10, 5)\n d['_Rectangle__width'] = 5\n self.assertEqual(r.__dict__, d)\n r.update(10, 5, 17)\n d['_Rectangle__height'] = 17\n self.assertEqual(r.__dict__, d)\n r.update(10, 5, 17, 20)\n d['_Rectangle__x'] = 20\n self.assertEqual(r.__dict__, d)\n r.update(10, 5, 17, 20, 25)\n d['_Rectangle__y'] = 25\n self.assertEqual(r.__dict__, d)\n\n def test_update_args_bad(self):\n \"\"\"Tests update() positional arg bad values.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n r.update(10)\n d['id'] = 10\n self.assertEqual(r.__dict__, d)\n with self.assertRaises(ValueError) as e:\n r.update(10, -5)\n s = 'width must be > 0'\n self.assertEqual(str(e.exception), s)\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, -17)\n s = 'height must be > 0'\n self.assertEqual(str(e.exception), s)\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, 17, -20)\n s = 'x must be >= 0'\n self.assertEqual(str(e.exception), s)\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, 17, 20, -25)\n s = 'y must be >= 0'\n self.assertEqual(str(e.exception), s)\n\n def test_update_kwargs(self):\n \"\"\"Tests update() keyword args.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n r.update(id=10)\n d['id'] = 10\n self.assertEqual(r.__dict__, d)\n r.update(width=5)\n d['_Rectangle__width'] = 5\n self.assertEqual(r.__dict__, d)\n r.update(height=17)\n d['_Rectangle__height'] = 17\n self.assertEqual(r.__dict__, d)\n r.update(x=20)\n d['_Rectangle__x'] = 20\n self.assertEqual(r.__dict__, d)\n r.update(y=25)\n d['_Rectangle__y'] = 25\n self.assertEqual(r.__dict__, d)\n\n def test_update_kwargs_2(self):\n \"\"\"Tests update() keyword args.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n r.update(id=10)\n d['id'] = 10\n self.assertEqual(r.__dict__, d)\n r.update(id=10, width=5)\n d['_Rectangle__width'] = 5\n self.assertEqual(r.__dict__, d)\n r.update(id=10, width=5, height=17)\n d['_Rectangle__height'] = 17\n self.assertEqual(r.__dict__, d)\n r.update(id=10, width=5, height=17, x=20)\n d['_Rectangle__x'] = 20\n self.assertEqual(r.__dict__, d)\n r.update(id=10, width=5, height=17, x=20, y=25)\n d['_Rectangle__y'] = 25\n self.assertEqual(r.__dict__, d)\n r.update(y=25, id=10, height=17, x=20, width=5)\n self.assertEqual(r.__dict__, d)\n Base._Base__nb_objects = 0\n r1 = Rectangle(10, 10, 10, 10)\n self.assertEqual(str(r1), '[Rectangle] (1) 10/10 - 10/10')\n r1.update(height=1)\n self.assertEqual(str(r1), '[Rectangle] (1) 10/10 - 10/1')\n r1.update(width=1, x=2)\n self.assertEqual(str(r1), '[Rectangle] (1) 2/10 - 1/1')\n r1.update(y=1, width=2, x=3, id=89)\n self.assertEqual(str(r1), '[Rectangle] (89) 3/1 - 2/1')\n r1.update(x=1, height=2, y=3, width=4)\n self.assertEqual(str(r1), '[Rectangle] (89) 1/3 - 4/2')\n Base._Base__nb_objects = 0\n r1 = Rectangle(10, 10, 10, 10)\n self.assertEqual(str(r1), '[Rectangle] (1) 10/10 - 10/10')\n r1.update(89)\n self.assertEqual(str(r1), '[Rectangle] (89) 10/10 - 10/10')\n r1.update(89, 2)\n self.assertEqual(str(r1), '[Rectangle] (89) 10/10 - 2/10')\n r1.update(89, 2, 3)\n self.assertEqual(str(r1), '[Rectangle] (89) 10/10 - 2/3')\n r1.update(89, 2, 3, 4)\n self.assertEqual(str(r1), '[Rectangle] (89) 4/10 - 2/3')\n r1.update(89, 2, 3, 4, 5)\n self.assertEqual(str(r1), '[Rectangle] (89) 4/5 - 2/3')\n\n def test_to_dictionary(self):\n \"\"\"Tests to_dictionary() \"\"\"\n with self.assertRaises(TypeError) as e:\n Rectangle.to_dictionary()\n s = \"to_dictionary() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n r = Rectangle(1, 2)\n d = {'x': 0, 'y': 0, 'width': 1, 'id': 1, 'height': 2}\n self.assertEqual(r.to_dictionary(), d)\n r = Rectangle(1, 2, 3, 4, 5)\n d = {'x': 3, 'y': 4, 'width': 1, 'id': 5, 'height': 2}\n self.assertEqual(r.to_dictionary(), d)\n r.x = 10\n r.y = 20\n r.width = 30\n r.height = 40\n d = {'x': 10, 'y': 20, 'width': 30, 'id': 5, 'height': 40}\n self.assertEqual(r.to_dictionary(), d)\n r1 = Rectangle(10, 2, 1, 9)\n r1_dictionary = r1.to_dictionary()\n r2 = Rectangle(1, 1)\n r2.update(**r1_dictionary)\n self.assertEqual(str(r1), str(r2))\n self.assertNotEqual(r1, r2)\n", "step-4": "<mask token>\n\n\nclass TestRectangle(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n \"\"\" setUp \"\"\"\n Base._Base__nb_objects = 0\n\n def tearDown(self):\n \"\"\" tearDown destroys any existing objects and processes \"\"\"\n pass\n\n def test_type(self):\n \"\"\" Test type \"\"\"\n r1 = Rectangle(1, 2)\n self.assertTrue(type(r1) is Rectangle)\n\n def test_inheritance(self):\n \"\"\"Tests if Rectangle inherits Base.\"\"\"\n self.assertTrue(issubclass(Rectangle, Base))\n\n def test_constructor_no_args(self):\n \"\"\"Tests constructor signature.\"\"\"\n with self.assertRaises(TypeError) as e:\n r = Rectangle()\n s = (\n \"__init__() missing 2 required positional arguments: 'width' and 'height'\"\n )\n self.assertEqual(str(e.exception), s)\n\n def test_constructor_many_args(self):\n \"\"\"Tests constructor signature.\"\"\"\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, 2, 3, 4, 5, 6)\n s = (\n '__init__() takes from 3 to 6 positional arguments but 7 were given'\n )\n self.assertEqual(str(e.exception), s)\n\n def test_constructor_one_args(self):\n \"\"\"Tests constructor signature.\"\"\"\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1)\n s = \"__init__() missing 1 required positional argument: 'height'\"\n self.assertEqual(str(e.exception), s)\n\n def test_instantiation(self):\n \"\"\"Tests instantiation.\"\"\"\n r = Rectangle(10, 20)\n self.assertEqual(str(type(r)), \"<class 'models.rectangle.Rectangle'>\")\n self.assertTrue(isinstance(r, Base))\n d = {'_Rectangle__height': 20, '_Rectangle__width': 10,\n '_Rectangle__x': 0, '_Rectangle__y': 0, 'id': 1}\n self.assertDictEqual(r.__dict__, d)\n with self.assertRaises(TypeError) as e:\n r = Rectangle('1', 2)\n msg = 'width must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, '2')\n msg = 'height must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, 2, '3')\n msg = 'x must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, 2, 3, '4')\n msg = 'y must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(-1, 2)\n msg = 'width must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, -2)\n msg = 'height must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(0, 2)\n msg = 'width must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 0)\n msg = 'height must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 2, -3)\n msg = 'x must be >= 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 2, 3, -4)\n msg = 'y must be >= 0'\n self.assertEqual(str(e.exception), msg)\n\n def test_id_inherited(self):\n \"\"\"Tests if id is inherited from Base.\"\"\"\n Base._Base__nb_objects = 98\n r = Rectangle(2, 4)\n self.assertEqual(r.id, 99)\n\n def test_validate_type(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['x', 'y', 'width', 'height']\n t = 3.14, -1.1, float('inf'), float('-inf'), True, 'str', (2,), [4], {5\n }, {(6): 7}, None\n for attribute in attributes:\n s = '{} must be an integer'.format(attribute)\n for invalid_type in t:\n with self.assertRaises(TypeError) as e:\n setattr(r, attribute, invalid_type)\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_negative_gt(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['width', 'height']\n for attribute in attributes:\n s = '{} must be > 0'.format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, -(randrange(10) + 1))\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_negative_ge(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['x', 'y']\n for attribute in attributes:\n s = '{} must be >= 0'.format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, -(randrange(10) + 1))\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_zero(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['width', 'height']\n for attribute in attributes:\n s = '{} must be > 0'.format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, 0)\n self.assertEqual(str(e.exception), s)\n\n def test_property(self):\n \"\"\"Tests property setting/getting.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['x', 'y', 'width', 'height']\n for attribute in attributes:\n n = randrange(10) + 1\n setattr(r, attribute, n)\n self.assertEqual(getattr(r, attribute), n)\n\n def test_property_range_zero(self):\n \"\"\"Tests property setting/getting.\"\"\"\n r = Rectangle(1, 2)\n r.x = 0\n r.y = 0\n self.assertEqual(r.x, 0)\n self.assertEqual(r.y, 0)\n\n def test_area_no_args(self):\n \"\"\"Tests area() method signature.\"\"\"\n r = Rectangle(5, 6)\n with self.assertRaises(TypeError) as e:\n Rectangle.area()\n s = \"area() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n def test_area(self):\n \"\"\"Tests area() method compuation.\"\"\"\n r = Rectangle(5, 6)\n self.assertEqual(r.area(), 30)\n w = randrange(10) + 1\n h = randrange(10) + 1\n r.width = w\n r.height = h\n self.assertEqual(r.area(), w * h)\n w = randrange(10) + 1\n h = randrange(10) + 1\n r = Rectangle(w, h, 7, 8, 9)\n self.assertEqual(r.area(), w * h)\n w = randrange(10) + 1\n h = randrange(10) + 1\n r = Rectangle(w, h, y=7, x=8, id=9)\n self.assertEqual(r.area(), w * h)\n\n def test_display_no_args(self):\n \"\"\"Tests display() method signature.\"\"\"\n r = Rectangle(9, 8)\n with self.assertRaises(TypeError) as e:\n Rectangle.display()\n s = \"display() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n def test_display_simple(self):\n \"\"\"Tests display() method output.\"\"\"\n r = Rectangle(1, 1)\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = '#\\n'\n self.assertEqual(f.getvalue(), s)\n r.width = 2\n r.height = 2\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = '##\\n##\\n'\n self.assertEqual(f.getvalue(), s)\n r = Rectangle(2, 2, 2, 2)\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = '\\n\\n ##\\n ##\\n'\n self.assertEqual(f.getvalue(), s)\n\n def test_K_str_no_args(self):\n \"\"\"Tests __str__() method signature.\"\"\"\n r = Rectangle(5, 2)\n with self.assertRaises(TypeError) as e:\n Rectangle.__str__()\n s = \"__str__() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n def test_K_str(self):\n \"\"\"Tests __str__() method return.\"\"\"\n r = Rectangle(5, 2)\n s = '[Rectangle] (1) 0/0 - 5/2'\n self.assertEqual(str(r), s)\n r = Rectangle(1, 1, 1)\n s = '[Rectangle] (2) 1/0 - 1/1'\n self.assertEqual(str(r), s)\n r = Rectangle(3, 4, 5, 6)\n s = '[Rectangle] (3) 5/6 - 3/4'\n self.assertEqual(str(r), s)\n Base._Base__nb_objects = 0\n r1 = Rectangle(4, 6, 2, 1, 12)\n self.assertEqual(str(r1), '[Rectangle] (12) 2/1 - 4/6')\n r2 = Rectangle(5, 5, 1)\n self.assertEqual(str(r2), '[Rectangle] (1) 1/0 - 5/5')\n\n def test_update_no_args(self):\n \"\"\"Tests update() method \"\"\"\n r = Rectangle(5, 2)\n with self.assertRaises(TypeError) as e:\n Rectangle.update()\n s = \"update() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n d = r.__dict__.copy()\n r.update()\n self.assertEqual(r.__dict__, d)\n\n def test_update_args(self):\n \"\"\"Tests update() postional args.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n r.update(10)\n d['id'] = 10\n self.assertEqual(r.__dict__, d)\n r.update(10, 5)\n d['_Rectangle__width'] = 5\n self.assertEqual(r.__dict__, d)\n r.update(10, 5, 17)\n d['_Rectangle__height'] = 17\n self.assertEqual(r.__dict__, d)\n r.update(10, 5, 17, 20)\n d['_Rectangle__x'] = 20\n self.assertEqual(r.__dict__, d)\n r.update(10, 5, 17, 20, 25)\n d['_Rectangle__y'] = 25\n self.assertEqual(r.__dict__, d)\n\n def test_update_args_bad(self):\n \"\"\"Tests update() positional arg bad values.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n r.update(10)\n d['id'] = 10\n self.assertEqual(r.__dict__, d)\n with self.assertRaises(ValueError) as e:\n r.update(10, -5)\n s = 'width must be > 0'\n self.assertEqual(str(e.exception), s)\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, -17)\n s = 'height must be > 0'\n self.assertEqual(str(e.exception), s)\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, 17, -20)\n s = 'x must be >= 0'\n self.assertEqual(str(e.exception), s)\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, 17, 20, -25)\n s = 'y must be >= 0'\n self.assertEqual(str(e.exception), s)\n\n def test_update_kwargs(self):\n \"\"\"Tests update() keyword args.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n r.update(id=10)\n d['id'] = 10\n self.assertEqual(r.__dict__, d)\n r.update(width=5)\n d['_Rectangle__width'] = 5\n self.assertEqual(r.__dict__, d)\n r.update(height=17)\n d['_Rectangle__height'] = 17\n self.assertEqual(r.__dict__, d)\n r.update(x=20)\n d['_Rectangle__x'] = 20\n self.assertEqual(r.__dict__, d)\n r.update(y=25)\n d['_Rectangle__y'] = 25\n self.assertEqual(r.__dict__, d)\n\n def test_update_kwargs_2(self):\n \"\"\"Tests update() keyword args.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n r.update(id=10)\n d['id'] = 10\n self.assertEqual(r.__dict__, d)\n r.update(id=10, width=5)\n d['_Rectangle__width'] = 5\n self.assertEqual(r.__dict__, d)\n r.update(id=10, width=5, height=17)\n d['_Rectangle__height'] = 17\n self.assertEqual(r.__dict__, d)\n r.update(id=10, width=5, height=17, x=20)\n d['_Rectangle__x'] = 20\n self.assertEqual(r.__dict__, d)\n r.update(id=10, width=5, height=17, x=20, y=25)\n d['_Rectangle__y'] = 25\n self.assertEqual(r.__dict__, d)\n r.update(y=25, id=10, height=17, x=20, width=5)\n self.assertEqual(r.__dict__, d)\n Base._Base__nb_objects = 0\n r1 = Rectangle(10, 10, 10, 10)\n self.assertEqual(str(r1), '[Rectangle] (1) 10/10 - 10/10')\n r1.update(height=1)\n self.assertEqual(str(r1), '[Rectangle] (1) 10/10 - 10/1')\n r1.update(width=1, x=2)\n self.assertEqual(str(r1), '[Rectangle] (1) 2/10 - 1/1')\n r1.update(y=1, width=2, x=3, id=89)\n self.assertEqual(str(r1), '[Rectangle] (89) 3/1 - 2/1')\n r1.update(x=1, height=2, y=3, width=4)\n self.assertEqual(str(r1), '[Rectangle] (89) 1/3 - 4/2')\n Base._Base__nb_objects = 0\n r1 = Rectangle(10, 10, 10, 10)\n self.assertEqual(str(r1), '[Rectangle] (1) 10/10 - 10/10')\n r1.update(89)\n self.assertEqual(str(r1), '[Rectangle] (89) 10/10 - 10/10')\n r1.update(89, 2)\n self.assertEqual(str(r1), '[Rectangle] (89) 10/10 - 2/10')\n r1.update(89, 2, 3)\n self.assertEqual(str(r1), '[Rectangle] (89) 10/10 - 2/3')\n r1.update(89, 2, 3, 4)\n self.assertEqual(str(r1), '[Rectangle] (89) 4/10 - 2/3')\n r1.update(89, 2, 3, 4, 5)\n self.assertEqual(str(r1), '[Rectangle] (89) 4/5 - 2/3')\n\n def test_to_dictionary(self):\n \"\"\"Tests to_dictionary() \"\"\"\n with self.assertRaises(TypeError) as e:\n Rectangle.to_dictionary()\n s = \"to_dictionary() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n r = Rectangle(1, 2)\n d = {'x': 0, 'y': 0, 'width': 1, 'id': 1, 'height': 2}\n self.assertEqual(r.to_dictionary(), d)\n r = Rectangle(1, 2, 3, 4, 5)\n d = {'x': 3, 'y': 4, 'width': 1, 'id': 5, 'height': 2}\n self.assertEqual(r.to_dictionary(), d)\n r.x = 10\n r.y = 20\n r.width = 30\n r.height = 40\n d = {'x': 10, 'y': 20, 'width': 30, 'id': 5, 'height': 40}\n self.assertEqual(r.to_dictionary(), d)\n r1 = Rectangle(10, 2, 1, 9)\n r1_dictionary = r1.to_dictionary()\n r2 = Rectangle(1, 1)\n r2.update(**r1_dictionary)\n self.assertEqual(str(r1), str(r2))\n self.assertNotEqual(r1, r2)\n", "step-5": "#!/usr/bin/python3\n\"\"\"\nTest of Rectangle class\n\"\"\"\nfrom contextlib import redirect_stdout\nimport io\nimport unittest\nfrom random import randrange\nfrom models.base import Base\nfrom models.rectangle import Rectangle\nfrom models.square import Square\n\n\nclass TestRectangle(unittest.TestCase):\n \"\"\" Test Rectangle methods \"\"\"\n\n def setUp(self):\n \"\"\" setUp \"\"\"\n Base._Base__nb_objects = 0\n\n def tearDown(self):\n \"\"\" tearDown destroys any existing objects and processes \"\"\"\n pass\n\n def test_type(self):\n \"\"\" Test type \"\"\"\n r1 = Rectangle(1, 2)\n self.assertTrue(type(r1) is Rectangle)\n\n def test_inheritance(self):\n \"\"\"Tests if Rectangle inherits Base.\"\"\"\n self.assertTrue(issubclass(Rectangle, Base))\n\n def test_constructor_no_args(self):\n \"\"\"Tests constructor signature.\"\"\"\n with self.assertRaises(TypeError) as e:\n r = Rectangle()\n s = \"__init__() missing 2 required positional arguments: 'width' \\\nand 'height'\"\n self.assertEqual(str(e.exception), s)\n\n def test_constructor_many_args(self):\n \"\"\"Tests constructor signature.\"\"\"\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, 2, 3, 4, 5, 6)\n s = \"__init__() takes from 3 to 6 positional arguments but 7 were \\\ngiven\"\n self.assertEqual(str(e.exception), s)\n\n def test_constructor_one_args(self):\n \"\"\"Tests constructor signature.\"\"\"\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1)\n s = \"__init__() missing 1 required positional argument: 'height'\"\n self.assertEqual(str(e.exception), s)\n\n def test_instantiation(self):\n \"\"\"Tests instantiation.\"\"\"\n r = Rectangle(10, 20)\n self.assertEqual(str(type(r)), \"<class 'models.rectangle.Rectangle'>\")\n self.assertTrue(isinstance(r, Base))\n d = {'_Rectangle__height': 20, '_Rectangle__width': 10,\n '_Rectangle__x': 0, '_Rectangle__y': 0, 'id': 1}\n self.assertDictEqual(r.__dict__, d)\n\n with self.assertRaises(TypeError) as e:\n r = Rectangle(\"1\", 2)\n msg = \"width must be an integer\"\n self.assertEqual(str(e.exception), msg)\n\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, \"2\")\n msg = \"height must be an integer\"\n self.assertEqual(str(e.exception), msg)\n\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, 2, \"3\")\n msg = \"x must be an integer\"\n self.assertEqual(str(e.exception), msg)\n\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, 2, 3, \"4\")\n msg = \"y must be an integer\"\n self.assertEqual(str(e.exception), msg)\n\n with self.assertRaises(ValueError) as e:\n r = Rectangle(-1, 2)\n msg = \"width must be > 0\"\n self.assertEqual(str(e.exception), msg)\n\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, -2)\n msg = \"height must be > 0\"\n self.assertEqual(str(e.exception), msg)\n\n with self.assertRaises(ValueError) as e:\n r = Rectangle(0, 2)\n msg = \"width must be > 0\"\n self.assertEqual(str(e.exception), msg)\n\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 0)\n msg = \"height must be > 0\"\n self.assertEqual(str(e.exception), msg)\n\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 2, -3)\n msg = \"x must be >= 0\"\n self.assertEqual(str(e.exception), msg)\n\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 2, 3, -4)\n msg = \"y must be >= 0\"\n self.assertEqual(str(e.exception), msg)\n\n def test_id_inherited(self):\n \"\"\"Tests if id is inherited from Base.\"\"\"\n Base._Base__nb_objects = 98\n r = Rectangle(2, 4)\n self.assertEqual(r.id, 99)\n\n # -- #\n\n def test_validate_type(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = [\"x\", \"y\", \"width\", \"height\"]\n t = (3.14, -1.1, float('inf'), float('-inf'), True, \"str\", (2,),\n [4], {5}, {6: 7}, None)\n\n for attribute in attributes:\n s = \"{} must be an integer\".format(attribute)\n for invalid_type in t:\n with self.assertRaises(TypeError) as e:\n setattr(r, attribute, invalid_type)\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_negative_gt(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = [\"width\", \"height\"]\n for attribute in attributes:\n s = \"{} must be > 0\".format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, -(randrange(10) + 1))\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_negative_ge(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = [\"x\", \"y\"]\n for attribute in attributes:\n s = \"{} must be >= 0\".format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, -(randrange(10) + 1))\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_zero(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = [\"width\", \"height\"]\n for attribute in attributes:\n s = \"{} must be > 0\".format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, 0)\n self.assertEqual(str(e.exception), s)\n\n def test_property(self):\n \"\"\"Tests property setting/getting.\"\"\"\n r = Rectangle(1, 2)\n attributes = [\"x\", \"y\", \"width\", \"height\"]\n for attribute in attributes:\n n = randrange(10) + 1\n setattr(r, attribute, n)\n self.assertEqual(getattr(r, attribute), n)\n\n def test_property_range_zero(self):\n \"\"\"Tests property setting/getting.\"\"\"\n r = Rectangle(1, 2)\n r.x = 0\n r.y = 0\n self.assertEqual(r.x, 0)\n self.assertEqual(r.y, 0)\n\n def test_area_no_args(self):\n \"\"\"Tests area() method signature.\"\"\"\n r = Rectangle(5, 6)\n with self.assertRaises(TypeError) as e:\n Rectangle.area()\n s = \"area() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n def test_area(self):\n \"\"\"Tests area() method compuation.\"\"\"\n r = Rectangle(5, 6)\n self.assertEqual(r.area(), 30)\n w = randrange(10) + 1\n h = randrange(10) + 1\n r.width = w\n r.height = h\n self.assertEqual(r.area(), w * h)\n w = randrange(10) + 1\n h = randrange(10) + 1\n r = Rectangle(w, h, 7, 8, 9)\n self.assertEqual(r.area(), w * h)\n w = randrange(10) + 1\n h = randrange(10) + 1\n r = Rectangle(w, h, y=7, x=8, id=9)\n self.assertEqual(r.area(), w * h)\n\n def test_display_no_args(self):\n \"\"\"Tests display() method signature.\"\"\"\n r = Rectangle(9, 8)\n with self.assertRaises(TypeError) as e:\n Rectangle.display()\n s = \"display() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n def test_display_simple(self):\n \"\"\"Tests display() method output.\"\"\"\n r = Rectangle(1, 1)\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = \"#\\n\"\n self.assertEqual(f.getvalue(), s)\n r.width = 2\n r.height = 2\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = \"##\\n##\\n\"\n self.assertEqual(f.getvalue(), s)\n\n r = Rectangle(2, 2, 2, 2)\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = \"\\n\\n ##\\n ##\\n\"\n self.assertEqual(f.getvalue(), s)\n\n def test_K_str_no_args(self):\n \"\"\"Tests __str__() method signature.\"\"\"\n r = Rectangle(5, 2)\n with self.assertRaises(TypeError) as e:\n Rectangle.__str__()\n s = \"__str__() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n def test_K_str(self):\n \"\"\"Tests __str__() method return.\"\"\"\n r = Rectangle(5, 2)\n s = '[Rectangle] (1) 0/0 - 5/2'\n self.assertEqual(str(r), s)\n r = Rectangle(1, 1, 1)\n s = '[Rectangle] (2) 1/0 - 1/1'\n self.assertEqual(str(r), s)\n r = Rectangle(3, 4, 5, 6)\n s = '[Rectangle] (3) 5/6 - 3/4'\n self.assertEqual(str(r), s)\n\n Base._Base__nb_objects = 0\n r1 = Rectangle(4, 6, 2, 1, 12)\n self.assertEqual(str(r1), \"[Rectangle] (12) 2/1 - 4/6\")\n\n r2 = Rectangle(5, 5, 1)\n self.assertEqual(str(r2), \"[Rectangle] (1) 1/0 - 5/5\")\n\n def test_update_no_args(self):\n \"\"\"Tests update() method \"\"\"\n r = Rectangle(5, 2)\n with self.assertRaises(TypeError) as e:\n Rectangle.update()\n s = \"update() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n d = r.__dict__.copy()\n r.update()\n self.assertEqual(r.__dict__, d)\n\n def test_update_args(self):\n \"\"\"Tests update() postional args.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n\n r.update(10)\n d[\"id\"] = 10\n self.assertEqual(r.__dict__, d)\n\n r.update(10, 5)\n d[\"_Rectangle__width\"] = 5\n self.assertEqual(r.__dict__, d)\n\n r.update(10, 5, 17)\n d[\"_Rectangle__height\"] = 17\n self.assertEqual(r.__dict__, d)\n\n r.update(10, 5, 17, 20)\n d[\"_Rectangle__x\"] = 20\n self.assertEqual(r.__dict__, d)\n\n r.update(10, 5, 17, 20, 25)\n d[\"_Rectangle__y\"] = 25\n self.assertEqual(r.__dict__, d)\n\n def test_update_args_bad(self):\n \"\"\"Tests update() positional arg bad values.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n\n r.update(10)\n d[\"id\"] = 10\n self.assertEqual(r.__dict__, d)\n\n with self.assertRaises(ValueError) as e:\n r.update(10, -5)\n s = \"width must be > 0\"\n self.assertEqual(str(e.exception), s)\n\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, -17)\n s = \"height must be > 0\"\n self.assertEqual(str(e.exception), s)\n\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, 17, -20)\n s = \"x must be >= 0\"\n self.assertEqual(str(e.exception), s)\n\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, 17, 20, -25)\n s = \"y must be >= 0\"\n self.assertEqual(str(e.exception), s)\n\n def test_update_kwargs(self):\n \"\"\"Tests update() keyword args.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n\n r.update(id=10)\n d[\"id\"] = 10\n self.assertEqual(r.__dict__, d)\n\n r.update(width=5)\n d[\"_Rectangle__width\"] = 5\n self.assertEqual(r.__dict__, d)\n\n r.update(height=17)\n d[\"_Rectangle__height\"] = 17\n self.assertEqual(r.__dict__, d)\n\n r.update(x=20)\n d[\"_Rectangle__x\"] = 20\n self.assertEqual(r.__dict__, d)\n\n r.update(y=25)\n d[\"_Rectangle__y\"] = 25\n self.assertEqual(r.__dict__, d)\n\n def test_update_kwargs_2(self):\n \"\"\"Tests update() keyword args.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n\n r.update(id=10)\n d[\"id\"] = 10\n self.assertEqual(r.__dict__, d)\n\n r.update(id=10, width=5)\n d[\"_Rectangle__width\"] = 5\n self.assertEqual(r.__dict__, d)\n\n r.update(id=10, width=5, height=17)\n d[\"_Rectangle__height\"] = 17\n self.assertEqual(r.__dict__, d)\n\n r.update(id=10, width=5, height=17, x=20)\n d[\"_Rectangle__x\"] = 20\n self.assertEqual(r.__dict__, d)\n\n r.update(id=10, width=5, height=17, x=20, y=25)\n d[\"_Rectangle__y\"] = 25\n self.assertEqual(r.__dict__, d)\n\n r.update(y=25, id=10, height=17, x=20, width=5)\n self.assertEqual(r.__dict__, d)\n\n Base._Base__nb_objects = 0\n r1 = Rectangle(10, 10, 10, 10)\n self.assertEqual(str(r1), \"[Rectangle] (1) 10/10 - 10/10\")\n\n r1.update(height=1)\n self.assertEqual(str(r1), \"[Rectangle] (1) 10/10 - 10/1\")\n\n r1.update(width=1, x=2)\n self.assertEqual(str(r1), \"[Rectangle] (1) 2/10 - 1/1\")\n\n r1.update(y=1, width=2, x=3, id=89)\n self.assertEqual(str(r1), \"[Rectangle] (89) 3/1 - 2/1\")\n\n r1.update(x=1, height=2, y=3, width=4)\n self.assertEqual(str(r1), \"[Rectangle] (89) 1/3 - 4/2\")\n\n Base._Base__nb_objects = 0\n r1 = Rectangle(10, 10, 10, 10)\n self.assertEqual(str(r1), \"[Rectangle] (1) 10/10 - 10/10\")\n\n r1.update(89)\n self.assertEqual(str(r1), \"[Rectangle] (89) 10/10 - 10/10\")\n\n r1.update(89, 2)\n self.assertEqual(str(r1), \"[Rectangle] (89) 10/10 - 2/10\")\n\n r1.update(89, 2, 3)\n self.assertEqual(str(r1), \"[Rectangle] (89) 10/10 - 2/3\")\n\n r1.update(89, 2, 3, 4)\n self.assertEqual(str(r1), \"[Rectangle] (89) 4/10 - 2/3\")\n\n r1.update(89, 2, 3, 4, 5)\n self.assertEqual(str(r1), \"[Rectangle] (89) 4/5 - 2/3\")\n\n def test_to_dictionary(self):\n \"\"\"Tests to_dictionary() \"\"\"\n with self.assertRaises(TypeError) as e:\n Rectangle.to_dictionary()\n s = \"to_dictionary() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n r = Rectangle(1, 2)\n d = {'x': 0, 'y': 0, 'width': 1, 'id': 1, 'height': 2}\n self.assertEqual(r.to_dictionary(), d)\n\n r = Rectangle(1, 2, 3, 4, 5)\n d = {'x': 3, 'y': 4, 'width': 1, 'id': 5, 'height': 2}\n self.assertEqual(r.to_dictionary(), d)\n\n r.x = 10\n r.y = 20\n r.width = 30\n r.height = 40\n d = {'x': 10, 'y': 20, 'width': 30, 'id': 5, 'height': 40}\n self.assertEqual(r.to_dictionary(), d)\n\n r1 = Rectangle(10, 2, 1, 9)\n r1_dictionary = r1.to_dictionary()\n r2 = Rectangle(1, 1)\n r2.update(**r1_dictionary)\n self.assertEqual(str(r1), str(r2))\n self.assertNotEqual(r1, r2)\n", "step-ids": [ 23, 25, 26, 28, 31 ] }
[ 23, 25, 26, 28, 31 ]
from PyQt5.QtWidgets import QWidget, QHBoxLayout, QGraphicsOpacityEffect, \ QPushButton from PyQt5.QtCore import Qt class ToolBar(QWidget): """ Window for entering parameters """ def __init__(self, parent): super().__init__(parent) self._main_wnd = parent self.setAttribute(Qt.WA_StyledBackground, True) self.setObjectName("options") self.setStyleSheet(""" #options, #closeButton { border-radius: 6px; background-color: rgb(0, 0, 0); color: #fff; } QToolBar { background-color: rgb(0, 0, 0); color: #fff; } """) self.setupWidgets() effect = QGraphicsOpacityEffect() effect.setOpacity(0.66) self.setGraphicsEffect(effect) self.setMinimumWidth(220) self.updateWidgets() self.connectSignals() self.setAcceptDrops(True) def mainWnd(self): return self._main_wnd def setupWidgets(self): self._layout = QHBoxLayout() self._layout.setContentsMargins(6, 5, 12, 12) self._layout.setSpacing(0) self._open_file = self.addButton("O", self._main_wnd.onOpenFile) self._layout.addSpacing(8) self._add_text = self.addButton("T", self._main_wnd.onAddText) self._layout.addStretch() self.setLayout(self._layout) def addButton(self, text, action): button = QPushButton(text) button.clicked.connect(action) self._layout.addWidget(button) return button def connectSignals(self): pass def updateWidgets(self): pass
normal
{ "blob_id": "772e2e0a442c1b63330e9b526b76d767646b0c7c", "index": 7819, "step-1": "<mask token>\n\n\nclass ToolBar(QWidget):\n <mask token>\n\n def __init__(self, parent):\n super().__init__(parent)\n self._main_wnd = parent\n self.setAttribute(Qt.WA_StyledBackground, True)\n self.setObjectName('options')\n self.setStyleSheet(\n \"\"\"\n #options, #closeButton {\n border-radius: 6px;\n background-color: rgb(0, 0, 0);\n color: #fff;\n }\n QToolBar {\n background-color: rgb(0, 0, 0);\n color: #fff;\n }\n \"\"\"\n )\n self.setupWidgets()\n effect = QGraphicsOpacityEffect()\n effect.setOpacity(0.66)\n self.setGraphicsEffect(effect)\n self.setMinimumWidth(220)\n self.updateWidgets()\n self.connectSignals()\n self.setAcceptDrops(True)\n <mask token>\n\n def setupWidgets(self):\n self._layout = QHBoxLayout()\n self._layout.setContentsMargins(6, 5, 12, 12)\n self._layout.setSpacing(0)\n self._open_file = self.addButton('O', self._main_wnd.onOpenFile)\n self._layout.addSpacing(8)\n self._add_text = self.addButton('T', self._main_wnd.onAddText)\n self._layout.addStretch()\n self.setLayout(self._layout)\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass ToolBar(QWidget):\n <mask token>\n\n def __init__(self, parent):\n super().__init__(parent)\n self._main_wnd = parent\n self.setAttribute(Qt.WA_StyledBackground, True)\n self.setObjectName('options')\n self.setStyleSheet(\n \"\"\"\n #options, #closeButton {\n border-radius: 6px;\n background-color: rgb(0, 0, 0);\n color: #fff;\n }\n QToolBar {\n background-color: rgb(0, 0, 0);\n color: #fff;\n }\n \"\"\"\n )\n self.setupWidgets()\n effect = QGraphicsOpacityEffect()\n effect.setOpacity(0.66)\n self.setGraphicsEffect(effect)\n self.setMinimumWidth(220)\n self.updateWidgets()\n self.connectSignals()\n self.setAcceptDrops(True)\n <mask token>\n\n def setupWidgets(self):\n self._layout = QHBoxLayout()\n self._layout.setContentsMargins(6, 5, 12, 12)\n self._layout.setSpacing(0)\n self._open_file = self.addButton('O', self._main_wnd.onOpenFile)\n self._layout.addSpacing(8)\n self._add_text = self.addButton('T', self._main_wnd.onAddText)\n self._layout.addStretch()\n self.setLayout(self._layout)\n\n def addButton(self, text, action):\n button = QPushButton(text)\n button.clicked.connect(action)\n self._layout.addWidget(button)\n return button\n <mask token>\n\n def updateWidgets(self):\n pass\n", "step-3": "<mask token>\n\n\nclass ToolBar(QWidget):\n <mask token>\n\n def __init__(self, parent):\n super().__init__(parent)\n self._main_wnd = parent\n self.setAttribute(Qt.WA_StyledBackground, True)\n self.setObjectName('options')\n self.setStyleSheet(\n \"\"\"\n #options, #closeButton {\n border-radius: 6px;\n background-color: rgb(0, 0, 0);\n color: #fff;\n }\n QToolBar {\n background-color: rgb(0, 0, 0);\n color: #fff;\n }\n \"\"\"\n )\n self.setupWidgets()\n effect = QGraphicsOpacityEffect()\n effect.setOpacity(0.66)\n self.setGraphicsEffect(effect)\n self.setMinimumWidth(220)\n self.updateWidgets()\n self.connectSignals()\n self.setAcceptDrops(True)\n\n def mainWnd(self):\n return self._main_wnd\n\n def setupWidgets(self):\n self._layout = QHBoxLayout()\n self._layout.setContentsMargins(6, 5, 12, 12)\n self._layout.setSpacing(0)\n self._open_file = self.addButton('O', self._main_wnd.onOpenFile)\n self._layout.addSpacing(8)\n self._add_text = self.addButton('T', self._main_wnd.onAddText)\n self._layout.addStretch()\n self.setLayout(self._layout)\n\n def addButton(self, text, action):\n button = QPushButton(text)\n button.clicked.connect(action)\n self._layout.addWidget(button)\n return button\n <mask token>\n\n def updateWidgets(self):\n pass\n", "step-4": "from PyQt5.QtWidgets import QWidget, QHBoxLayout, QGraphicsOpacityEffect, QPushButton\nfrom PyQt5.QtCore import Qt\n\n\nclass ToolBar(QWidget):\n \"\"\"\n Window for entering parameters\n \"\"\"\n\n def __init__(self, parent):\n super().__init__(parent)\n self._main_wnd = parent\n self.setAttribute(Qt.WA_StyledBackground, True)\n self.setObjectName('options')\n self.setStyleSheet(\n \"\"\"\n #options, #closeButton {\n border-radius: 6px;\n background-color: rgb(0, 0, 0);\n color: #fff;\n }\n QToolBar {\n background-color: rgb(0, 0, 0);\n color: #fff;\n }\n \"\"\"\n )\n self.setupWidgets()\n effect = QGraphicsOpacityEffect()\n effect.setOpacity(0.66)\n self.setGraphicsEffect(effect)\n self.setMinimumWidth(220)\n self.updateWidgets()\n self.connectSignals()\n self.setAcceptDrops(True)\n\n def mainWnd(self):\n return self._main_wnd\n\n def setupWidgets(self):\n self._layout = QHBoxLayout()\n self._layout.setContentsMargins(6, 5, 12, 12)\n self._layout.setSpacing(0)\n self._open_file = self.addButton('O', self._main_wnd.onOpenFile)\n self._layout.addSpacing(8)\n self._add_text = self.addButton('T', self._main_wnd.onAddText)\n self._layout.addStretch()\n self.setLayout(self._layout)\n\n def addButton(self, text, action):\n button = QPushButton(text)\n button.clicked.connect(action)\n self._layout.addWidget(button)\n return button\n\n def connectSignals(self):\n pass\n\n def updateWidgets(self):\n pass\n", "step-5": "from PyQt5.QtWidgets import QWidget, QHBoxLayout, QGraphicsOpacityEffect, \\\n QPushButton\nfrom PyQt5.QtCore import Qt\n\n\nclass ToolBar(QWidget):\n \"\"\"\n Window for entering parameters\n \"\"\"\n\n def __init__(self, parent):\n super().__init__(parent)\n self._main_wnd = parent\n\n self.setAttribute(Qt.WA_StyledBackground, True)\n self.setObjectName(\"options\")\n self.setStyleSheet(\"\"\"\n #options, #closeButton {\n border-radius: 6px;\n background-color: rgb(0, 0, 0);\n color: #fff;\n }\n QToolBar {\n background-color: rgb(0, 0, 0);\n color: #fff;\n }\n \"\"\")\n\n self.setupWidgets()\n\n effect = QGraphicsOpacityEffect()\n effect.setOpacity(0.66)\n self.setGraphicsEffect(effect)\n\n self.setMinimumWidth(220)\n self.updateWidgets()\n self.connectSignals()\n\n self.setAcceptDrops(True)\n\n def mainWnd(self):\n return self._main_wnd\n\n def setupWidgets(self):\n self._layout = QHBoxLayout()\n self._layout.setContentsMargins(6, 5, 12, 12)\n self._layout.setSpacing(0)\n\n self._open_file = self.addButton(\"O\", self._main_wnd.onOpenFile)\n self._layout.addSpacing(8)\n self._add_text = self.addButton(\"T\", self._main_wnd.onAddText)\n\n self._layout.addStretch()\n\n self.setLayout(self._layout)\n\n def addButton(self, text, action):\n button = QPushButton(text)\n button.clicked.connect(action)\n self._layout.addWidget(button)\n return button\n\n def connectSignals(self):\n pass\n\n def updateWidgets(self):\n pass\n", "step-ids": [ 3, 5, 6, 9, 10 ] }
[ 3, 5, 6, 9, 10 ]
# -*- encoding: utf-8 -*- #---------------------------------------------------------------------------- # # Copyright (C) 2014 . # Coded by: Borni DHIFI ([email protected]) # #---------------------------------------------------------------------------- import models import wizard import parser # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
normal
{ "blob_id": "a3216aa41cd28b91653b99017e21a03e43372e9b", "index": 4137, "step-1": "<mask token>\n", "step-2": "import models\nimport wizard\nimport parser\n", "step-3": "# -*- encoding: utf-8 -*-\n#----------------------------------------------------------------------------\n#\n# Copyright (C) 2014 .\n# Coded by: Borni DHIFI ([email protected])\n#\n#----------------------------------------------------------------------------\n\nimport models\nimport wizard\nimport parser\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
# Copyright 2014 Charles Noneman # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test suite for running the test modules""" from __future__ import print_function import importlib import pkgutil import unittest import test def run(): """Runs all of the tests""" subsuite_list = [] for _, modname, _ in pkgutil.iter_modules(test.__path__): if modname.startswith("test_"): module = importlib.import_module('test.' + modname) subsuite = unittest.TestLoader().loadTestsFromModule(module) subsuite_list.append(subsuite) suite = unittest.TestSuite(subsuite_list) print("Testing:\n") unittest.TextTestRunner(verbosity=2).run(suite) if __name__ == '__main__': run()
normal
{ "blob_id": "9a7908212bf13565109cd4d9ab6de65909bc6910", "index": 3606, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef run():\n \"\"\"Runs all of the tests\"\"\"\n subsuite_list = []\n for _, modname, _ in pkgutil.iter_modules(test.__path__):\n if modname.startswith('test_'):\n module = importlib.import_module('test.' + modname)\n subsuite = unittest.TestLoader().loadTestsFromModule(module)\n subsuite_list.append(subsuite)\n suite = unittest.TestSuite(subsuite_list)\n print('Testing:\\n')\n unittest.TextTestRunner(verbosity=2).run(suite)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef run():\n \"\"\"Runs all of the tests\"\"\"\n subsuite_list = []\n for _, modname, _ in pkgutil.iter_modules(test.__path__):\n if modname.startswith('test_'):\n module = importlib.import_module('test.' + modname)\n subsuite = unittest.TestLoader().loadTestsFromModule(module)\n subsuite_list.append(subsuite)\n suite = unittest.TestSuite(subsuite_list)\n print('Testing:\\n')\n unittest.TextTestRunner(verbosity=2).run(suite)\n\n\nif __name__ == '__main__':\n run()\n", "step-4": "<mask token>\nfrom __future__ import print_function\nimport importlib\nimport pkgutil\nimport unittest\nimport test\n\n\ndef run():\n \"\"\"Runs all of the tests\"\"\"\n subsuite_list = []\n for _, modname, _ in pkgutil.iter_modules(test.__path__):\n if modname.startswith('test_'):\n module = importlib.import_module('test.' + modname)\n subsuite = unittest.TestLoader().loadTestsFromModule(module)\n subsuite_list.append(subsuite)\n suite = unittest.TestSuite(subsuite_list)\n print('Testing:\\n')\n unittest.TextTestRunner(verbosity=2).run(suite)\n\n\nif __name__ == '__main__':\n run()\n", "step-5": "# Copyright 2014 Charles Noneman\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test suite for running the test modules\"\"\"\n\nfrom __future__ import print_function\n\nimport importlib\nimport pkgutil\nimport unittest\nimport test\n\ndef run():\n\t\"\"\"Runs all of the tests\"\"\"\n\tsubsuite_list = []\n\tfor _, modname, _ in pkgutil.iter_modules(test.__path__):\n\t\tif modname.startswith(\"test_\"):\n\t\t\tmodule = importlib.import_module('test.' + modname)\n\t\t\tsubsuite = unittest.TestLoader().loadTestsFromModule(module)\n\t\t\tsubsuite_list.append(subsuite)\n\tsuite = unittest.TestSuite(subsuite_list)\n\n\tprint(\"Testing:\\n\")\n\tunittest.TextTestRunner(verbosity=2).run(suite)\n\nif __name__ == '__main__':\n\trun()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
"""Sorting components: peak waveform features.""" import numpy as np from spikeinterface.core.job_tools import fix_job_kwargs from spikeinterface.core import get_channel_distances from spikeinterface.sortingcomponents.peak_localization import LocalizeCenterOfMass, LocalizeMonopolarTriangulation from spikeinterface.sortingcomponents.peak_pipeline import run_peak_pipeline, PipelineNode, ExtractDenseWaveforms def compute_features_from_peaks( recording, peaks, feature_list=["ptp", ], feature_params={}, ms_before=1., ms_after=1., **job_kwargs, ): """Extract features on the fly from the recording given a list of peaks. Parameters ---------- recording: RecordingExtractor The recording extractor object. peaks: array Peaks array, as returned by detect_peaks() in "compact_numpy" way. feature_list: List of features to be computed. - amplitude - ptp - com - energy ms_before: float The duration in ms before the peak for extracting the features (default 1 ms) ms_after: float The duration in ms after the peakfor extracting the features (default 1 ms) {} Returns ------- A tuple of features. Even if there is one feature. Every feature have shape[0] == peaks.shape[0]. dtype and other dim depends on features. """ job_kwargs = fix_job_kwargs(job_kwargs) extract_dense_waveforms = ExtractDenseWaveforms(recording, ms_before=ms_before, ms_after=ms_after, return_output=False) nodes = [ extract_dense_waveforms, ] for feature_name in feature_list: Class = _features_class[feature_name] params = feature_params.get(feature_name, {}).copy() node = Class(recording, parents=[extract_dense_waveforms], **params) nodes.append(node) features = run_peak_pipeline(recording, peaks, nodes, job_kwargs, job_name='features_from_peaks', squeeze_output=False) return features class AmplitudeFeature(PipelineNode): def __init__(self, recording, name='amplitude_feature', return_output=True, parents=None, all_channels=False, peak_sign='neg'): PipelineNode.__init__(self, recording, return_output=return_output, parents=parents) self.all_channels = all_channels self.peak_sign = peak_sign self._kwargs.update(dict(all_channels=all_channels, peak_sign=peak_sign)) self._dtype = recording.get_dtype() def get_dtype(self): return self._dtype def compute(self, traces, peaks, waveforms): if self.all_channels: if self.peak_sign == 'neg': amplitudes = np.min(waveforms, axis=1) elif self.peak_sign == 'pos': amplitudes = np.max(waveforms, axis=1) elif self.peak_sign == 'both': amplitudes = np.max(np.abs(waveforms, axis=1)) else: if self.peak_sign == 'neg': amplitudes = np.min(waveforms, axis=(1, 2)) elif self.peak_sign == 'pos': amplitudes = np.max(waveforms, axis=(1, 2)) elif self.peak_sign == 'both': amplitudes = np.max(np.abs(waveforms), axis=(1, 2)) return amplitudes class PeakToPeakFeature(PipelineNode): def __init__(self, recording, name='ptp_feature', return_output=True, parents=None, local_radius_um=150., all_channels=True): PipelineNode.__init__(self, recording, return_output=return_output, parents=parents) self.contact_locations = recording.get_channel_locations() self.channel_distance = get_channel_distances(recording) self.neighbours_mask = self.channel_distance < local_radius_um self.all_channels = all_channels self._kwargs.update(dict(local_radius_um=local_radius_um, all_channels=all_channels)) self._dtype = recording.get_dtype() def get_dtype(self): return self._dtype def compute(self, traces, peaks, waveforms): if self.all_channels: all_ptps = np.ptp(waveforms, axis=1) else: all_ptps = np.zeros(peaks.size) for main_chan in np.unique(peaks['channel_ind']): idx, = np.nonzero(peaks['channel_ind'] == main_chan) chan_inds, = np.nonzero(self.neighbours_mask[main_chan]) wfs = waveforms[idx][:, :, chan_inds] all_ptps[idx] = np.max(np.ptp(wfs, axis=1)) return all_ptps class PeakToPeakLagsFeature(PipelineNode): def __init__(self, recording, name='ptp_lag_feature', return_output=True, parents=None, local_radius_um=150., all_channels=True): PipelineNode.__init__(self, recording, return_output=return_output, parents=parents) self.all_channels = all_channels self.local_radius_um = local_radius_um self.contact_locations = recording.get_channel_locations() self.channel_distance = get_channel_distances(recording) self.neighbours_mask = self.channel_distance < local_radius_um self._kwargs.update(dict(local_radius_um=local_radius_um, all_channels=all_channels)) self._dtype = recording.get_dtype() def get_dtype(self): return self._dtype def compute(self, traces, peaks, waveforms): if self.all_channels: all_maxs = np.argmax(waveforms, axis=1) all_mins = np.argmin(waveforms, axis=1) all_lags = all_maxs - all_mins else: all_lags = np.zeros(peaks.size) for main_chan in np.unique(peaks['channel_ind']): idx, = np.nonzero(peaks['channel_ind'] == main_chan) chan_inds, = np.nonzero(self.neighbours_mask[main_chan]) wfs = waveforms[idx][:, :, chan_inds] maxs = np.argmax(wfs, axis=1) mins = np.argmin(wfs, axis=1) lags = maxs - mins ptps = np.argmax(np.ptp(wfs, axis=1), axis=1) all_lags[idx] = lags[np.arange(len(idx)), ptps] return all_lags class RandomProjectionsFeature(PipelineNode): def __init__(self, recording, name='random_projections_feature', return_output=True, parents=None, projections=None, local_radius_um=150., min_values=None): PipelineNode.__init__(self, recording, return_output=return_output, parents=parents) self.projections = projections self.local_radius_um = local_radius_um self.min_values = min_values self.contact_locations = recording.get_channel_locations() self.channel_distance = get_channel_distances(recording) self.neighbours_mask = self.channel_distance < local_radius_um self._kwargs.update(dict(projections=projections, local_radius_um=local_radius_um, min_values=min_values)) self._dtype = recording.get_dtype() def get_dtype(self): return self._dtype def compute(self, traces, peaks, waveforms): all_projections = np.zeros((peaks.size, self.projections.shape[1]), dtype=self._dtype) for main_chan in np.unique(peaks['channel_ind']): idx, = np.nonzero(peaks['channel_ind'] == main_chan) chan_inds, = np.nonzero(self.neighbours_mask[main_chan]) local_projections = self.projections[chan_inds, :] wf_ptp = (waveforms[idx][:, :, chan_inds]).ptp(axis=1) if self.min_values is not None: wf_ptp = (wf_ptp/self.min_values[chan_inds])**4 denom = np.sum(wf_ptp, axis=1) mask = denom != 0 all_projections[idx[mask]] = np.dot(wf_ptp[mask], local_projections)/(denom[mask][:, np.newaxis]) return all_projections class RandomProjectionsEnergyFeature(PipelineNode): def __init__(self, recording, name='random_projections_energy_feature', return_output=True, parents=None, projections=None, local_radius_um=150., min_values=None): PipelineNode.__init__(self, recording, return_output=return_output, parents=parents) self.contact_locations = recording.get_channel_locations() self.channel_distance = get_channel_distances(recording) self.neighbours_mask = self.channel_distance < local_radius_um self.projections = projections self.min_values = min_values self.local_radius_um = local_radius_um self._kwargs.update(dict(projections=projections, min_values=min_values, local_radius_um=local_radius_um)) self._dtype = recording.get_dtype() def get_dtype(self): return self._dtype def compute(self, traces, peaks, waveforms): all_projections = np.zeros((peaks.size, self.projections.shape[1]), dtype=self._dtype) for main_chan in np.unique(peaks['channel_ind']): idx, = np.nonzero(peaks['channel_ind'] == main_chan) chan_inds, = np.nonzero(self.neighbours_mask[main_chan]) local_projections = self.projections[chan_inds, :] energies = np.linalg.norm(waveforms[idx][:, :, chan_inds], axis=1) if self.min_values is not None: energies = (energies/self.min_values[chan_inds])**4 denom = np.sum(energies, axis=1) mask = denom != 0 all_projections[idx[mask]] = np.dot(energies[mask], local_projections)/(denom[mask][:, np.newaxis]) return all_projections class StdPeakToPeakFeature(PipelineNode): def __init__(self, recording, name='std_ptp_feature', return_output=True, parents=None, local_radius_um=150.): PipelineNode.__init__(self, recording, return_output=return_output, parents=parents) self.contact_locations = recording.get_channel_locations() self.channel_distance = get_channel_distances(recording) self.neighbours_mask = self.channel_distance < local_radius_um self._kwargs.update(dict(local_radius_um=local_radius_um)) self._dtype = recording.get_dtype() def get_dtype(self): return self._dtype def compute(self, traces, peaks, waveforms): all_ptps = np.zeros(peaks.size) for main_chan in np.unique(peaks['channel_ind']): idx, = np.nonzero(peaks['channel_ind'] == main_chan) chan_inds, = np.nonzero(self.neighbours_mask[main_chan]) wfs = waveforms[idx][:, :, chan_inds] all_ptps[idx] = np.std(np.ptp(wfs, axis=1), axis=1) return all_ptps class GlobalPeakToPeakFeature(PipelineNode): def __init__(self, recording, name='global_ptp_feature', return_output=True, parents=None, local_radius_um=150.): PipelineNode.__init__(self, recording, return_output=return_output, parents=parents) self.contact_locations = recording.get_channel_locations() self.channel_distance = get_channel_distances(recording) self.neighbours_mask = self.channel_distance < local_radius_um self._kwargs.update(dict(local_radius_um=local_radius_um)) self._dtype = recording.get_dtype() def get_dtype(self): return self._dtype def compute(self, traces, peaks, waveforms): all_ptps = np.zeros(peaks.size) for main_chan in np.unique(peaks['channel_ind']): idx, = np.nonzero(peaks['channel_ind'] == main_chan) chan_inds, = np.nonzero(self.neighbours_mask[main_chan]) wfs = waveforms[idx][:, :, chan_inds] all_ptps[idx] = np.max(wfs, axis=(1, 2)) - np.min(wfs, axis=(1, 2)) return all_ptps class KurtosisPeakToPeakFeature(PipelineNode): def __init__(self, recording, name='kurtosis_ptp_feature', return_output=True, parents=None, local_radius_um=150.): PipelineNode.__init__(self, recording, return_output=return_output, parents=parents) self.contact_locations = recording.get_channel_locations() self.channel_distance = get_channel_distances(recording) self.neighbours_mask = self.channel_distance < local_radius_um self._kwargs.update(dict(local_radius_um=local_radius_um)) self._dtype = recording.get_dtype() def get_dtype(self): return self._dtype def compute(self, traces, peaks, waveforms): all_ptps = np.zeros(peaks.size) import scipy for main_chan in np.unique(peaks['channel_ind']): idx, = np.nonzero(peaks['channel_ind'] == main_chan) chan_inds, = np.nonzero(self.neighbours_mask[main_chan]) wfs = waveforms[idx][:, :, chan_inds] all_ptps[idx] = scipy.stats.kurtosis(np.ptp(wfs, axis=1), axis=1) return all_ptps class EnergyFeature(PipelineNode): def __init__(self, recording, name='energy_feature', return_output=True, parents=None, local_radius_um=50.): PipelineNode.__init__(self, recording, return_output=return_output, parents=parents) self.contact_locations = recording.get_channel_locations() self.channel_distance = get_channel_distances(recording) self.neighbours_mask = self.channel_distance < local_radius_um self._kwargs.update(dict(local_radius_um=local_radius_um)) def get_dtype(self): return np.dtype('float32') def compute(self, traces, peaks, waveforms): energy = np.zeros(peaks.size, dtype='float32') for main_chan in np.unique(peaks['channel_ind']): idx, = np.nonzero(peaks['channel_ind'] == main_chan) chan_inds, = np.nonzero(self.neighbours_mask[main_chan]) wfs = waveforms[idx][:, :, chan_inds] energy[idx] = np.linalg.norm(wfs, axis=(1, 2)) / chan_inds.size return energy _features_class = { 'amplitude': AmplitudeFeature, 'ptp' : PeakToPeakFeature, 'center_of_mass' : LocalizeCenterOfMass, 'monopolar_triangulation' : LocalizeMonopolarTriangulation, 'energy' : EnergyFeature, 'std_ptp' : StdPeakToPeakFeature, 'kurtosis_ptp' : KurtosisPeakToPeakFeature, 'random_projections_ptp' : RandomProjectionsFeature, 'random_projections_energy' : RandomProjectionsEnergyFeature, 'ptp_lag' : PeakToPeakLagsFeature, 'global_ptp' : GlobalPeakToPeakFeature }
normal
{ "blob_id": "6fe22b3f98bff1a9b775fce631ae94a4ee22b04c", "index": 4371, "step-1": "<mask token>\n\n\nclass RandomProjectionsFeature(PipelineNode):\n <mask token>\n\n def get_dtype(self):\n return self._dtype\n <mask token>\n\n\nclass RandomProjectionsEnergyFeature(PipelineNode):\n\n def __init__(self, recording, name='random_projections_energy_feature',\n return_output=True, parents=None, projections=None, local_radius_um\n =150.0, min_values=None):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self.projections = projections\n self.min_values = min_values\n self.local_radius_um = local_radius_um\n self._kwargs.update(dict(projections=projections, min_values=\n min_values, local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_projections = np.zeros((peaks.size, self.projections.shape[1]),\n dtype=self._dtype)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n local_projections = self.projections[chan_inds, :]\n energies = np.linalg.norm(waveforms[idx][:, :, chan_inds], axis=1)\n if self.min_values is not None:\n energies = (energies / self.min_values[chan_inds]) ** 4\n denom = np.sum(energies, axis=1)\n mask = denom != 0\n all_projections[idx[mask]] = np.dot(energies[mask],\n local_projections) / denom[mask][:, np.newaxis]\n return all_projections\n\n\nclass StdPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='std_ptp_feature', return_output=\n True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.std(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass GlobalPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='global_ptp_feature', return_output=\n True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.max(wfs, axis=(1, 2)) - np.min(wfs, axis=(1, 2))\n return all_ptps\n\n\nclass KurtosisPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='kurtosis_ptp_feature',\n return_output=True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n import scipy\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = scipy.stats.kurtosis(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass EnergyFeature(PipelineNode):\n\n def __init__(self, recording, name='energy_feature', return_output=True,\n parents=None, local_radius_um=50.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n\n def get_dtype(self):\n return np.dtype('float32')\n\n def compute(self, traces, peaks, waveforms):\n energy = np.zeros(peaks.size, dtype='float32')\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n energy[idx] = np.linalg.norm(wfs, axis=(1, 2)) / chan_inds.size\n return energy\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass RandomProjectionsFeature(PipelineNode):\n\n def __init__(self, recording, name='random_projections_feature',\n return_output=True, parents=None, projections=None, local_radius_um\n =150.0, min_values=None):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.projections = projections\n self.local_radius_um = local_radius_um\n self.min_values = min_values\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(projections=projections, local_radius_um=\n local_radius_um, min_values=min_values))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_projections = np.zeros((peaks.size, self.projections.shape[1]),\n dtype=self._dtype)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n local_projections = self.projections[chan_inds, :]\n wf_ptp = waveforms[idx][:, :, chan_inds].ptp(axis=1)\n if self.min_values is not None:\n wf_ptp = (wf_ptp / self.min_values[chan_inds]) ** 4\n denom = np.sum(wf_ptp, axis=1)\n mask = denom != 0\n all_projections[idx[mask]] = np.dot(wf_ptp[mask], local_projections\n ) / denom[mask][:, np.newaxis]\n return all_projections\n\n\nclass RandomProjectionsEnergyFeature(PipelineNode):\n\n def __init__(self, recording, name='random_projections_energy_feature',\n return_output=True, parents=None, projections=None, local_radius_um\n =150.0, min_values=None):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self.projections = projections\n self.min_values = min_values\n self.local_radius_um = local_radius_um\n self._kwargs.update(dict(projections=projections, min_values=\n min_values, local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_projections = np.zeros((peaks.size, self.projections.shape[1]),\n dtype=self._dtype)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n local_projections = self.projections[chan_inds, :]\n energies = np.linalg.norm(waveforms[idx][:, :, chan_inds], axis=1)\n if self.min_values is not None:\n energies = (energies / self.min_values[chan_inds]) ** 4\n denom = np.sum(energies, axis=1)\n mask = denom != 0\n all_projections[idx[mask]] = np.dot(energies[mask],\n local_projections) / denom[mask][:, np.newaxis]\n return all_projections\n\n\nclass StdPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='std_ptp_feature', return_output=\n True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.std(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass GlobalPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='global_ptp_feature', return_output=\n True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.max(wfs, axis=(1, 2)) - np.min(wfs, axis=(1, 2))\n return all_ptps\n\n\nclass KurtosisPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='kurtosis_ptp_feature',\n return_output=True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n import scipy\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = scipy.stats.kurtosis(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass EnergyFeature(PipelineNode):\n\n def __init__(self, recording, name='energy_feature', return_output=True,\n parents=None, local_radius_um=50.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n\n def get_dtype(self):\n return np.dtype('float32')\n\n def compute(self, traces, peaks, waveforms):\n energy = np.zeros(peaks.size, dtype='float32')\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n energy[idx] = np.linalg.norm(wfs, axis=(1, 2)) / chan_inds.size\n return energy\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass PeakToPeakLagsFeature(PipelineNode):\n\n def __init__(self, recording, name='ptp_lag_feature', return_output=\n True, parents=None, local_radius_um=150.0, all_channels=True):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.all_channels = all_channels\n self.local_radius_um = local_radius_um\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um,\n all_channels=all_channels))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n if self.all_channels:\n all_maxs = np.argmax(waveforms, axis=1)\n all_mins = np.argmin(waveforms, axis=1)\n all_lags = all_maxs - all_mins\n else:\n all_lags = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n maxs = np.argmax(wfs, axis=1)\n mins = np.argmin(wfs, axis=1)\n lags = maxs - mins\n ptps = np.argmax(np.ptp(wfs, axis=1), axis=1)\n all_lags[idx] = lags[np.arange(len(idx)), ptps]\n return all_lags\n\n\nclass RandomProjectionsFeature(PipelineNode):\n\n def __init__(self, recording, name='random_projections_feature',\n return_output=True, parents=None, projections=None, local_radius_um\n =150.0, min_values=None):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.projections = projections\n self.local_radius_um = local_radius_um\n self.min_values = min_values\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(projections=projections, local_radius_um=\n local_radius_um, min_values=min_values))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_projections = np.zeros((peaks.size, self.projections.shape[1]),\n dtype=self._dtype)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n local_projections = self.projections[chan_inds, :]\n wf_ptp = waveforms[idx][:, :, chan_inds].ptp(axis=1)\n if self.min_values is not None:\n wf_ptp = (wf_ptp / self.min_values[chan_inds]) ** 4\n denom = np.sum(wf_ptp, axis=1)\n mask = denom != 0\n all_projections[idx[mask]] = np.dot(wf_ptp[mask], local_projections\n ) / denom[mask][:, np.newaxis]\n return all_projections\n\n\nclass RandomProjectionsEnergyFeature(PipelineNode):\n\n def __init__(self, recording, name='random_projections_energy_feature',\n return_output=True, parents=None, projections=None, local_radius_um\n =150.0, min_values=None):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self.projections = projections\n self.min_values = min_values\n self.local_radius_um = local_radius_um\n self._kwargs.update(dict(projections=projections, min_values=\n min_values, local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_projections = np.zeros((peaks.size, self.projections.shape[1]),\n dtype=self._dtype)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n local_projections = self.projections[chan_inds, :]\n energies = np.linalg.norm(waveforms[idx][:, :, chan_inds], axis=1)\n if self.min_values is not None:\n energies = (energies / self.min_values[chan_inds]) ** 4\n denom = np.sum(energies, axis=1)\n mask = denom != 0\n all_projections[idx[mask]] = np.dot(energies[mask],\n local_projections) / denom[mask][:, np.newaxis]\n return all_projections\n\n\nclass StdPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='std_ptp_feature', return_output=\n True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.std(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass GlobalPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='global_ptp_feature', return_output=\n True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.max(wfs, axis=(1, 2)) - np.min(wfs, axis=(1, 2))\n return all_ptps\n\n\nclass KurtosisPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='kurtosis_ptp_feature',\n return_output=True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n import scipy\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = scipy.stats.kurtosis(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass EnergyFeature(PipelineNode):\n\n def __init__(self, recording, name='energy_feature', return_output=True,\n parents=None, local_radius_um=50.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n\n def get_dtype(self):\n return np.dtype('float32')\n\n def compute(self, traces, peaks, waveforms):\n energy = np.zeros(peaks.size, dtype='float32')\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n energy[idx] = np.linalg.norm(wfs, axis=(1, 2)) / chan_inds.size\n return energy\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass PeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='ptp_feature', return_output=True,\n parents=None, local_radius_um=150.0, all_channels=True):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self.all_channels = all_channels\n self._kwargs.update(dict(local_radius_um=local_radius_um,\n all_channels=all_channels))\n self._dtype = recording.get_dtype()\n <mask token>\n\n def compute(self, traces, peaks, waveforms):\n if self.all_channels:\n all_ptps = np.ptp(waveforms, axis=1)\n else:\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.max(np.ptp(wfs, axis=1))\n return all_ptps\n\n\nclass PeakToPeakLagsFeature(PipelineNode):\n\n def __init__(self, recording, name='ptp_lag_feature', return_output=\n True, parents=None, local_radius_um=150.0, all_channels=True):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.all_channels = all_channels\n self.local_radius_um = local_radius_um\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um,\n all_channels=all_channels))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n if self.all_channels:\n all_maxs = np.argmax(waveforms, axis=1)\n all_mins = np.argmin(waveforms, axis=1)\n all_lags = all_maxs - all_mins\n else:\n all_lags = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n maxs = np.argmax(wfs, axis=1)\n mins = np.argmin(wfs, axis=1)\n lags = maxs - mins\n ptps = np.argmax(np.ptp(wfs, axis=1), axis=1)\n all_lags[idx] = lags[np.arange(len(idx)), ptps]\n return all_lags\n\n\nclass RandomProjectionsFeature(PipelineNode):\n\n def __init__(self, recording, name='random_projections_feature',\n return_output=True, parents=None, projections=None, local_radius_um\n =150.0, min_values=None):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.projections = projections\n self.local_radius_um = local_radius_um\n self.min_values = min_values\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(projections=projections, local_radius_um=\n local_radius_um, min_values=min_values))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_projections = np.zeros((peaks.size, self.projections.shape[1]),\n dtype=self._dtype)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n local_projections = self.projections[chan_inds, :]\n wf_ptp = waveforms[idx][:, :, chan_inds].ptp(axis=1)\n if self.min_values is not None:\n wf_ptp = (wf_ptp / self.min_values[chan_inds]) ** 4\n denom = np.sum(wf_ptp, axis=1)\n mask = denom != 0\n all_projections[idx[mask]] = np.dot(wf_ptp[mask], local_projections\n ) / denom[mask][:, np.newaxis]\n return all_projections\n\n\nclass RandomProjectionsEnergyFeature(PipelineNode):\n\n def __init__(self, recording, name='random_projections_energy_feature',\n return_output=True, parents=None, projections=None, local_radius_um\n =150.0, min_values=None):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self.projections = projections\n self.min_values = min_values\n self.local_radius_um = local_radius_um\n self._kwargs.update(dict(projections=projections, min_values=\n min_values, local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_projections = np.zeros((peaks.size, self.projections.shape[1]),\n dtype=self._dtype)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n local_projections = self.projections[chan_inds, :]\n energies = np.linalg.norm(waveforms[idx][:, :, chan_inds], axis=1)\n if self.min_values is not None:\n energies = (energies / self.min_values[chan_inds]) ** 4\n denom = np.sum(energies, axis=1)\n mask = denom != 0\n all_projections[idx[mask]] = np.dot(energies[mask],\n local_projections) / denom[mask][:, np.newaxis]\n return all_projections\n\n\nclass StdPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='std_ptp_feature', return_output=\n True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.std(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass GlobalPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='global_ptp_feature', return_output=\n True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.max(wfs, axis=(1, 2)) - np.min(wfs, axis=(1, 2))\n return all_ptps\n\n\nclass KurtosisPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='kurtosis_ptp_feature',\n return_output=True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n import scipy\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = scipy.stats.kurtosis(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass EnergyFeature(PipelineNode):\n\n def __init__(self, recording, name='energy_feature', return_output=True,\n parents=None, local_radius_um=50.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n\n def get_dtype(self):\n return np.dtype('float32')\n\n def compute(self, traces, peaks, waveforms):\n energy = np.zeros(peaks.size, dtype='float32')\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n energy[idx] = np.linalg.norm(wfs, axis=(1, 2)) / chan_inds.size\n return energy\n\n\n<mask token>\n", "step-5": "\"\"\"Sorting components: peak waveform features.\"\"\"\nimport numpy as np\n\nfrom spikeinterface.core.job_tools import fix_job_kwargs\nfrom spikeinterface.core import get_channel_distances\nfrom spikeinterface.sortingcomponents.peak_localization import LocalizeCenterOfMass, LocalizeMonopolarTriangulation\nfrom spikeinterface.sortingcomponents.peak_pipeline import run_peak_pipeline, PipelineNode, ExtractDenseWaveforms\n\n\n\ndef compute_features_from_peaks(\n recording,\n peaks,\n feature_list=[\"ptp\", ],\n feature_params={},\n ms_before=1.,\n ms_after=1.,\n **job_kwargs,\n):\n \"\"\"Extract features on the fly from the recording given a list of peaks. \n\n Parameters\n ----------\n recording: RecordingExtractor\n The recording extractor object.\n peaks: array\n Peaks array, as returned by detect_peaks() in \"compact_numpy\" way.\n feature_list: List of features to be computed.\n - amplitude\n - ptp\n - com\n - energy\n ms_before: float\n The duration in ms before the peak for extracting the features (default 1 ms)\n ms_after: float\n The duration in ms after the peakfor extracting the features (default 1 ms)\n\n {}\n\n Returns\n -------\n A tuple of features. Even if there is one feature.\n Every feature have shape[0] == peaks.shape[0].\n dtype and other dim depends on features.\n\n \"\"\"\n job_kwargs = fix_job_kwargs(job_kwargs)\n\n extract_dense_waveforms = ExtractDenseWaveforms(recording, ms_before=ms_before, ms_after=ms_after, return_output=False)\n nodes = [\n extract_dense_waveforms,\n ]\n for feature_name in feature_list:\n Class = _features_class[feature_name]\n params = feature_params.get(feature_name, {}).copy()\n node = Class(recording, parents=[extract_dense_waveforms], **params)\n nodes.append(node)\n\n features = run_peak_pipeline(recording, peaks, nodes, job_kwargs, job_name='features_from_peaks', squeeze_output=False)\n\n return features\n\n\nclass AmplitudeFeature(PipelineNode):\n def __init__(self, recording, name='amplitude_feature', return_output=True, parents=None, \n all_channels=False, peak_sign='neg'):\n PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)\n\n self.all_channels = all_channels\n self.peak_sign = peak_sign\n self._kwargs.update(dict(all_channels=all_channels, peak_sign=peak_sign))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n if self.all_channels:\n if self.peak_sign == 'neg':\n amplitudes = np.min(waveforms, axis=1)\n elif self.peak_sign == 'pos':\n amplitudes = np.max(waveforms, axis=1)\n elif self.peak_sign == 'both':\n amplitudes = np.max(np.abs(waveforms, axis=1))\n else:\n if self.peak_sign == 'neg':\n amplitudes = np.min(waveforms, axis=(1, 2))\n elif self.peak_sign == 'pos':\n amplitudes = np.max(waveforms, axis=(1, 2))\n elif self.peak_sign == 'both':\n amplitudes = np.max(np.abs(waveforms), axis=(1, 2))\n return amplitudes\n\n\nclass PeakToPeakFeature(PipelineNode):\n def __init__(self, recording, name='ptp_feature', return_output=True, parents=None,\n local_radius_um=150., all_channels=True):\n PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)\n\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self.all_channels = all_channels\n self._kwargs.update(dict(local_radius_um=local_radius_um, all_channels=all_channels))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n if self.all_channels:\n all_ptps = np.ptp(waveforms, axis=1)\n else:\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.max(np.ptp(wfs, axis=1))\n return all_ptps\n\n\nclass PeakToPeakLagsFeature(PipelineNode):\n def __init__(self, recording, name='ptp_lag_feature', return_output=True, parents=None,\n local_radius_um=150., all_channels=True):\n PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)\n\n self.all_channels = all_channels\n self.local_radius_um = local_radius_um\n\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n \n self._kwargs.update(dict(local_radius_um=local_radius_um, all_channels=all_channels))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n if self.all_channels:\n all_maxs = np.argmax(waveforms, axis=1)\n all_mins = np.argmin(waveforms, axis=1)\n all_lags = all_maxs - all_mins\n else:\n all_lags = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n maxs = np.argmax(wfs, axis=1)\n mins = np.argmin(wfs, axis=1)\n lags = maxs - mins\n ptps = np.argmax(np.ptp(wfs, axis=1), axis=1)\n all_lags[idx] = lags[np.arange(len(idx)), ptps]\n return all_lags\n\n\nclass RandomProjectionsFeature(PipelineNode):\n\n def __init__(self, recording, name='random_projections_feature', return_output=True, parents=None,\n projections=None, local_radius_um=150., min_values=None):\n PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)\n\n self.projections = projections\n self.local_radius_um = local_radius_um\n self.min_values = min_values\n\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n \n self._kwargs.update(dict(projections=projections, local_radius_um=local_radius_um, min_values=min_values))\n \n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_projections = np.zeros((peaks.size, self.projections.shape[1]), dtype=self._dtype)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n local_projections = self.projections[chan_inds, :]\n wf_ptp = (waveforms[idx][:, :, chan_inds]).ptp(axis=1)\n\n if self.min_values is not None:\n wf_ptp = (wf_ptp/self.min_values[chan_inds])**4\n\n denom = np.sum(wf_ptp, axis=1)\n mask = denom != 0\n\n all_projections[idx[mask]] = np.dot(wf_ptp[mask], local_projections)/(denom[mask][:, np.newaxis])\n return all_projections\n\n\nclass RandomProjectionsEnergyFeature(PipelineNode):\n def __init__(self, recording, name='random_projections_energy_feature', return_output=True, parents=None,\n projections=None, local_radius_um=150., min_values=None):\n PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)\n\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n\n self.projections = projections\n self.min_values = min_values\n self.local_radius_um = local_radius_um\n self._kwargs.update(dict(projections=projections, min_values=min_values, local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_projections = np.zeros((peaks.size, self.projections.shape[1]), dtype=self._dtype)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n local_projections = self.projections[chan_inds, :]\n energies = np.linalg.norm(waveforms[idx][:, :, chan_inds], axis=1)\n\n if self.min_values is not None:\n energies = (energies/self.min_values[chan_inds])**4\n\n denom = np.sum(energies, axis=1)\n mask = denom != 0\n\n all_projections[idx[mask]] = np.dot(energies[mask], local_projections)/(denom[mask][:, np.newaxis])\n return all_projections\n\n\nclass StdPeakToPeakFeature(PipelineNode):\n def __init__(self, recording, name='std_ptp_feature', return_output=True, parents=None,\n local_radius_um=150.):\n PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)\n\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n \n self._kwargs.update(dict(local_radius_um=local_radius_um))\n\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.std(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass GlobalPeakToPeakFeature(PipelineNode):\n def __init__(self, recording, name='global_ptp_feature', return_output=True, parents=None,\n local_radius_um=150.):\n PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)\n\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n \n self._kwargs.update(dict(local_radius_um=local_radius_um))\n\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.max(wfs, axis=(1, 2)) - np.min(wfs, axis=(1, 2))\n return all_ptps\n\nclass KurtosisPeakToPeakFeature(PipelineNode):\n def __init__(self, recording, name='kurtosis_ptp_feature', return_output=True, parents=None,\n local_radius_um=150.):\n PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)\n\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n \n self._kwargs.update(dict(local_radius_um=local_radius_um))\n\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n import scipy\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = scipy.stats.kurtosis(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass EnergyFeature(PipelineNode):\n def __init__(self, recording, name='energy_feature', return_output=True, parents=None,\n local_radius_um=50.):\n PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)\n\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n \n self._kwargs.update(dict(local_radius_um=local_radius_um))\n\n def get_dtype(self):\n return np.dtype('float32')\n\n def compute(self, traces, peaks, waveforms):\n energy = np.zeros(peaks.size, dtype='float32')\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n\n wfs = waveforms[idx][:, :, chan_inds]\n energy[idx] = np.linalg.norm(wfs, axis=(1, 2)) / chan_inds.size\n return energy\n\n\n_features_class = {\n 'amplitude': AmplitudeFeature,\n 'ptp' : PeakToPeakFeature,\n 'center_of_mass' : LocalizeCenterOfMass,\n 'monopolar_triangulation' : LocalizeMonopolarTriangulation,\n 'energy' : EnergyFeature,\n 'std_ptp' : StdPeakToPeakFeature,\n 'kurtosis_ptp' : KurtosisPeakToPeakFeature,\n 'random_projections_ptp' : RandomProjectionsFeature,\n 'random_projections_energy' : RandomProjectionsEnergyFeature,\n 'ptp_lag' : PeakToPeakLagsFeature,\n 'global_ptp' : GlobalPeakToPeakFeature\n}", "step-ids": [ 22, 24, 28, 31, 40 ] }
[ 22, 24, 28, 31, 40 ]
# Generated by Django 2.2.7 on 2019-11-15 23:43 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('quizzapp', '0005_auto_20191115_2339'), ] operations = [ migrations.RemoveField( model_name='question', name='titre', ), ]
normal
{ "blob_id": "b2fa6104f03dc76522a51f352101cef199ddc665", "index": 675, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('quizzapp', '0005_auto_20191115_2339')]\n operations = [migrations.RemoveField(model_name='question', name='titre')]\n", "step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('quizzapp', '0005_auto_20191115_2339')]\n operations = [migrations.RemoveField(model_name='question', name='titre')]\n", "step-5": "# Generated by Django 2.2.7 on 2019-11-15 23:43\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('quizzapp', '0005_auto_20191115_2339'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='question',\n name='titre',\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# block-comments.py ''' Block comments generally apply to some (or all) code that follows them, and are indented to the same level as that code. Each line of a block comment starts with a # and a single space (unless it is indented text inside the comment). Paragraphs inside a block comment are separated by a line containing a single #. '''
normal
{ "blob_id": "83bac8176caafc5551089c4bef5c1f38e1e8d4da", "index": 5952, "step-1": "<mask token>\n", "step-2": "# block-comments.py\n'''\nBlock comments generally apply to some (or all) code that follows them, and are\nindented to the same level as that code. Each line of a block comment starts\nwith a # and a single space (unless it is indented text inside the comment).\n\nParagraphs inside a block comment are separated by a line containing a single #.\n'''\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
def first_repeat(chars): for x in chars: if chars.count(x) > 1: return x return '-1'
normal
{ "blob_id": "bf683f8e7fb5ad5f7cd915a8a01d9adf7d13e739", "index": 3375, "step-1": "<mask token>\n", "step-2": "def first_repeat(chars):\n for x in chars:\n if chars.count(x) > 1:\n return x\n return '-1'\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
""" Common, pure functions used by the D-BAS. .. codeauthor:: Tobias Krauthoff <[email protected] """ import hashlib import locale import os import re import warnings from collections import defaultdict from datetime import datetime from enum import Enum, auto from html import escape, unescape from typing import List from urllib import parse from uuid import uuid4 from sqlalchemy import func from dbas.database import DBDiscussionSession from dbas.database.discussion_model import Argument, Premise, Statement, TextVersion, Issue, User, Settings, \ ClickedArgument, ClickedStatement, MarkedArgument, MarkedStatement, PremiseGroup from dbas.logger import logger from dbas.strings.keywords import Keywords as _ from dbas.strings.translator import Translator nick_of_anonymous_user = 'anonymous' fallback_lang = 'en' tag_type = 'span' start_attack = '<{} data-argumentation-type="attack">'.format(tag_type) start_argument = '<{} data-argumentation-type="argument">'.format(tag_type) start_position = '<{} data-argumentation-type="position">'.format(tag_type) start_content = '<{} class="triangle-content-text">'.format(tag_type) start_pro = '<{} data-attitude="pro">'.format(tag_type) start_con = '<{} data-attitude="con">'.format(tag_type) start_tag = '<{}>'.format(tag_type) end_tag = '</{}>'.format(tag_type) class BubbleTypes(Enum): USER = auto() SYSTEM = auto() STATUS = auto() INFO = auto() def __str__(self): return str(self.value) class Relations(Enum): UNDERMINE = 'undermine' UNDERCUT = 'undercut' REBUT = 'rebut' SUPPORT = 'support' def __str__(self): return str(self.value) class Attitudes(Enum): AGREE = 'agree' DISAGREE = 'disagree' DONT_KNOW = 'dontknow' def __str__(self): return str(self.value) relation_mapper = {relation.value: relation for relation in Relations} attitude_mapper = {attitude.value: attitude for attitude in Attitudes} def get_global_url(): """ Returns the global url of the project, based on the ENV :return: String """ return os.environ.get('URL', '') def get_changelog(no): """ Returns the 'no' last entries from the changelog :param no: int :return: list """ path = str(os.path.realpath(__file__ + '/../../CHANGELOG.md')) lines = [line.rstrip('\n').strip() for line in open(path) if len(line.rstrip('\n').strip()) > 0] changelog = [] title = '' body = [] for l in lines: if l.startswith('#'): if len(title) > 0: changelog.append({'title': title, 'body': body}) body = [] title = l.replace('### ', '') else: body.append(l.replace('- ', '')) return changelog[0:no] def is_development_mode(registry): """ Returns true, if mode is set to development in current ini file. :param registry: request.registry :return: Boolean """ if 'mode' in registry.settings: return registry.settings['mode'].lower() == 'development' return False def usage_of_modern_bubbles(registry): """ Returns true, if modern bubbles are set in the current ini file. :param registry: request.registry :return: Boolean """ if 'modern_bubbles' in registry.settings: return registry.settings['modern_bubbles'].lower() == 'true' return False def usage_of_matomo(registry): """ Returns true, if matomo is set in the current ini file. :param registry: request.registry :return: Boolean """ if 'mode' in registry.settings: return registry.settings['usage_of_matomo'].lower() == 'true' return False def escape_string(text): """ Escapes all html special chars. :param text: string :return: html.escape(text) """ return escape(text) def get_discussion_language(matchdict, params, session, current_issue_uid=None): """ Returns Language.ui_locales CALL AFTER issue_handler.get_id_of_slug(..)! :param matchdict: matchdict of the current request :param params: params of the current request :param session: session of the current request :param current_issue_uid: uid :return: """ if not current_issue_uid: current_issue = DBDiscussionSession.query(Issue).filter(Issue.is_disabled == False, Issue.is_private == False).first() current_issue_uid = current_issue.uid if current_issue else None # first matchdict, then params, then session, afterwards fallback issue = matchdict['issue'] if 'issue' in matchdict \ else params['issue'] if 'issue' in params \ else session['issue'] if 'issue' in session \ else current_issue_uid db_issue = DBDiscussionSession.query(Issue).get(issue) return db_issue.lang if db_issue else 'en' def python_datetime_pretty_print(ts, lang): """ Pretty print of a locale :param ts: Timestamp :param lang: ui_locales :return: String """ formatter = '%b. %d.' if lang == 'de': try: locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8') formatter = '%d. %b.' except locale.Error: locale.setlocale(locale.LC_TIME, 'en_US.UTF8') return datetime.strptime(str(ts), '%Y-%m-%d').strftime(formatter) def get_all_arguments_by_statement(statement_uid, include_disabled=False): """ Returns a list of all arguments where the statement is a conclusion or member of the premisegroup :param statement_uid: Statement.uid :param include_disabled: Boolean :return: [Arguments] """ logger('DBAS.LIB', 'main {}, include_disabled {}'.format(statement_uid, include_disabled)) db_arguments = __get_arguments_of_conclusion(statement_uid, include_disabled) arg_array = [arg for arg in db_arguments] if db_arguments else [] premises = DBDiscussionSession.query(Premise).filter_by(statement_uid=statement_uid) if not include_disabled: premises = premises.filter_by(is_disabled=False) premises = premises.all() for premise in premises: arg_array += __get_argument_of_premisegroup(premise.premisegroup_uid, include_disabled) db_undercuts = [] for arg in arg_array: db_undercuts += __get_undercuts_of_argument(arg.uid, include_disabled) db_undercutted_undercuts = [] for arg in db_undercuts: db_undercutted_undercuts += __get_undercuts_of_argument(arg.uid, include_disabled) arg_array = list(set(arg_array + db_undercuts + db_undercutted_undercuts)) logger('DBAS.LIB', 'returning arguments {}'.format([arg.uid for arg in arg_array])) return arg_array if len(arg_array) > 0 else None def __get_argument_of_premisegroup(premisegroup_uid, include_disabled): """ Returns all arguments with the given premisegroup :param premisegroup_uid: PremisgGroup.uid :param include_disabled: Boolean :return: list of Arguments """ db_arguments = DBDiscussionSession.query(Argument).filter_by(premisegroup_uid=premisegroup_uid) if not include_disabled: db_arguments = db_arguments.filter_by(is_disabled=False) return db_arguments.all() if db_arguments else [] def __get_undercuts_of_argument(argument_uid, include_disabled): """ Returns all undercuts fo the given argument :param argument_uid: Argument.uid :param include_disabled: boolean :return: list of Arguments """ db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid=argument_uid) if not include_disabled: db_undercuts = db_undercuts.filter_by(is_disabled=False) return db_undercuts.all() if db_undercuts else [] def __get_arguments_of_conclusion(statement_uid, include_disabled): """ Returns all arguments, where the statement is set as conclusion :param statement_uid: Statement.uid :param include_disabled: Boolean :return: list of arguments """ db_arguments = DBDiscussionSession.query(Argument).filter_by(conclusion_uid=statement_uid) if not include_disabled: db_arguments = db_arguments.filter_by(is_disabled=False) return db_arguments.all() if db_arguments else [] def get_all_arguments_with_text_by_statement_id(statement_uid): """ Given a statement_uid, it returns all arguments, which use this statement and adds the corresponding text to it, which normally appears in the bubbles. The resulting text depends on the provided language. :param statement_uid: uid to a statement, which should be analyzed :return: list of dictionaries containing some properties of these arguments :rtype: list """ logger('DBAS.LIB', 'main ' + str(statement_uid)) arguments = get_all_arguments_by_statement(statement_uid) results = [] if arguments: results = [{'uid': arg.uid, 'text': get_text_for_argument_uid(arg.uid)} for arg in arguments] return results def get_all_arguments_with_text_and_url_by_statement_id(db_statement, urlmanager, color_statement=False, is_jump=False): """ Given a statement_uid, it returns all arguments, which use this statement and adds the corresponding text to it, which normally appears in the bubbles. The resulting text depends on the provided language. :param db_statement: Statement :param urlmanager: :param color_statement: True, if the statement (specified by the ID) should be colored :return: list of dictionaries containing some properties of these arguments :rtype: list """ logger('DBAS.LIB', 'main ' + str(db_statement.uid)) arguments = get_all_arguments_by_statement(db_statement.uid) uids = [arg.uid for arg in arguments] if arguments else None results = list() sb = '<{} data-argumentation-type="position">'.format(tag_type) if color_statement else '' se = '</{}>'.format(tag_type) if color_statement else '' if not uids: return [] uids.sort() for uid in uids: statement_text = db_statement.get_text() attack_type = 'jump' if is_jump else '' argument_text = get_text_for_argument_uid(uid, anonymous_style=True, attack_type=attack_type) pos = argument_text.lower().find(statement_text.lower()) argument_text = argument_text[:pos] + sb + argument_text[pos:] pos += len(statement_text) + len(sb) argument_text = argument_text[:pos] + se + argument_text[pos:] results.append({ 'uid': uid, 'text': argument_text, 'url': urlmanager.get_url_for_jump(uid) }) return results def get_slug_by_statement_uid(uid): """ Returns slug for the given Issue.uid :param uid: Issue.uid :return: String """ db_statement = DBDiscussionSession.query(Statement).get(uid) return resolve_issue_uid_to_slug(db_statement.issue_uid) def get_text_for_argument_uid(uid, nickname=None, with_html_tag=False, start_with_intro=False, first_arg_by_user=False, user_changed_opinion=False, rearrange_intro=False, colored_position=False, attack_type=None, minimize_on_undercut=False, is_users_opinion=True, anonymous_style=False, support_counter_argument=False): """ Returns current argument as string like "conclusion, because premise1 and premise2" :param uid: Integer :param with_html_tag: Boolean :param start_with_intro: Boolean :param first_arg_by_user: Boolean :param user_changed_opinion: Boolean :param rearrange_intro: Boolean :param colored_position: Boolean :param attack_type: String :param minimize_on_undercut: Boolean :param anonymous_style: Boolean :param support_counter_argument: Boolean :return: String """ logger('DBAS.LIB', 'main {}'.format(uid)) db_argument = DBDiscussionSession.query(Argument).get(uid) if not db_argument: return None lang = db_argument.lang _t = Translator(lang) premisegroup_by_user = False author_uid = None db_user = DBDiscussionSession.query(User).filter_by(nickname=str(nickname)).first() if db_user: author_uid = db_user.uid pgroup = DBDiscussionSession.query(PremiseGroup).get(db_argument.premisegroup_uid) marked_argument = DBDiscussionSession.query(MarkedArgument).filter_by( argument_uid=uid, author_uid=db_user.uid).first() premisegroup_by_user = pgroup.author_uid == db_user.uid or marked_argument is not None # getting all argument id arg_array = [db_argument] while db_argument.argument_uid: db_argument = DBDiscussionSession.query(Argument).get(db_argument.argument_uid) arg_array.append(db_argument) if attack_type == 'jump': return __build_argument_for_jump(arg_array, with_html_tag) if len(arg_array) == 1: # build one argument only return __build_single_argument(arg_array[0], rearrange_intro, with_html_tag, colored_position, attack_type, _t, start_with_intro, is_users_opinion, anonymous_style, support_counter_argument, author_uid) else: # get all pgroups and at last, the conclusion return __build_nested_argument(arg_array, first_arg_by_user, user_changed_opinion, with_html_tag, start_with_intro, minimize_on_undercut, anonymous_style, premisegroup_by_user, _t) def __build_argument_for_jump(arg_array: List[Argument], with_html_tag): """ Build tet for an argument, if we jump to this argument :param arg_array: [Argument] :param with_html_tag: Boolean :return: String """ tag_premise = ('<' + tag_type + ' data-argumentation-type="attack">') if with_html_tag else '' tag_conclusion = ('<' + tag_type + ' data-argumentation-type="argument">') if with_html_tag else '' tag_end = ('</' + tag_type + '>') if with_html_tag else '' lang = arg_array[0].lang _t = Translator(lang) if len(arg_array) == 1: ret_value = __build_val_for_jump(arg_array[0], tag_premise, tag_conclusion, tag_end, _t) elif len(arg_array) == 2: ret_value = __build_val_for_undercut(arg_array, tag_premise, tag_conclusion, tag_end, _t) else: ret_value = __build_val_for_undercutted_undercut(arg_array, tag_premise, tag_conclusion, tag_end, _t) return ret_value.replace(' ', ' ') def __build_val_for_jump(db_argument, tag_premise, tag_conclusion, tag_end, _t): premises = db_argument.get_premisegroup_text() if premises[-1] != '.': premises += '.' conclusion = db_argument.get_conclusion_text() because = _t.get(_.because).lower() conclusion = tag_conclusion + conclusion + tag_end premises = tag_premise + premises + tag_end intro = (start_con + _t.get(_.isNotRight).lower() + end_tag) if not db_argument.is_supportive else '' ret_value = '{} {} {} {}'.format(conclusion, intro, because, premises) if _t.get_lang() == 'de': intro = _t.get(_.itIsTrueThatAnonymous) if db_argument.is_supportive else _t.get(_.itIsFalseThatAnonymous) intro = intro[0:1].upper() + intro[1:] intro = (start_pro if db_argument.is_supportive else start_con) + intro + end_tag ret_value = '{} {}, {} {}'.format(intro, conclusion, because, premises) return ret_value def __build_val_for_undercut(arg_array: List[Argument], tag_premise, tag_conclusion, tag_end, _t): db_undercut = arg_array[0] db_conclusion_argument = arg_array[1] premise = db_undercut.get_premisegroup_text() conclusion_premise = db_conclusion_argument.get_premisegroup_text() conclusion_conclusion = db_conclusion_argument.get_conclusion_text() premise = tag_premise + premise + tag_end conclusion_premise = tag_conclusion + conclusion_premise + tag_end conclusion_conclusion = tag_conclusion + conclusion_conclusion + tag_end intro = (_t.get(_.statementAbout) + ' ') if _t.get_lang() == 'de' else '' bind = start_con + _t.get(_.isNotAGoodReasonFor) + end_tag because = _t.get(_.because) ret_value = '{}{} {} {}. {} {}.'.format(intro, conclusion_premise, bind, conclusion_conclusion, because, premise) return ret_value def __build_val_for_undercutted_undercut(arg_array: List[Argument], tag_premise, tag_conclusion, tag_end, _t): premise1 = arg_array[0].get_premisegroup_text() premise2 = arg_array[1].get_premisegroup_text() premise3 = arg_array[2].get_premisegroup_text() conclusion = arg_array[2].get_conclusion_text() bind = start_con + _t.get(_.isNotAGoodReasonAgainstArgument) + end_tag because = _t.get(_.because) seperator = ',' if _t.get_lang() == 'de' else '' premise1 = tag_premise + premise1 + tag_end premise2 = tag_conclusion + premise2 + tag_end argument = '{}{} {} {}'.format(conclusion, seperator, because.lower(), premise3) argument = tag_conclusion + argument + tag_end # P2 ist kein guter Grund gegen das Argument, dass C weil P3. Weil P1 ret_value = '{} {} {}. {} {}'.format(premise2, bind, argument, because, premise1) return ret_value def __build_single_argument(db_argument: Argument, rearrange_intro: bool, with_html_tag: bool, colored_position: bool, attack_type: str, _t: Translator, start_with_intro: bool, is_users_opinion: bool, anonymous_style: bool, support_counter_argument: bool=False, author_uid=None): """ Build up argument text for a single argument Please, do not touch this! :param uid: Argument.uid :param rearrange_intro: Boolean :param with_html_tag: Boolean :param colored_position: Boolean :param attack_type: String :param _t: Translator :param start_with_intro: Boolean :param is_users_opinion: Boolean :param anonymous_style: Boolean :param support_counter_argument: Boolean :param author_uid: User.uid :return: String """ premises_text = db_argument.get_premisegroup_text() conclusion_text = db_argument.get_conclusion_text() lang = db_argument.lang if lang != 'de': premises_text = premises_text[0:1].lower() + premises_text[1:] # pretty print premises_text, conclusion_text, sb, sb_none, se = __get_tags_for_building_single_argument(with_html_tag, attack_type, colored_position, premises_text, conclusion_text) marked_element = False if author_uid: db_marked = DBDiscussionSession.query(MarkedArgument).filter(MarkedArgument.argument_uid == db_argument.uid, MarkedArgument.author_uid == author_uid).first() marked_element = db_marked is not None you_have_the_opinion_that = _t.get(_.youHaveTheOpinionThat).format('').strip() if lang == 'de': ret_value = __build_single_argument_for_de(_t, sb, se, you_have_the_opinion_that, start_with_intro, anonymous_style, rearrange_intro, db_argument, attack_type, sb_none, marked_element, lang, premises_text, conclusion_text, is_users_opinion, support_counter_argument) else: ret_value = __build_single_argument_for_en(_t, sb, se, you_have_the_opinion_that, marked_element, conclusion_text, premises_text, db_argument) return ret_value.replace(' ', ' ') def __get_tags_for_building_single_argument(with_html_tag, attack_type, colored_position, premises, conclusion): sb_none = start_tag if with_html_tag else '' se = end_tag if with_html_tag else '' if attack_type not in ['dont_know', 'jump']: sb = start_tag if with_html_tag else '' if colored_position: sb = start_position if with_html_tag else '' if attack_type == Relations.UNDERMINE: premises = sb + premises + se else: conclusion = sb + conclusion + se else: sb = start_argument if with_html_tag else '' sb_tmp = start_attack if with_html_tag else '' premises = sb + premises + se conclusion = sb_tmp + conclusion + se return premises, conclusion, sb, sb_none, se def __build_single_argument_for_de(_t, sb, se, you_have_the_opinion_that, start_with_intro, anonymous_style, rearrange_intro, db_argument, attack_type, sb_none, marked_element, lang, premises, conclusion, is_users_opinion, support_counter_argument): if start_with_intro and not anonymous_style: intro = _t.get(_.itIsTrueThat) if db_argument.is_supportive else _t.get(_.itIsFalseThat) if rearrange_intro: intro = _t.get(_.itTrueIsThat) if db_argument.is_supportive else _t.get(_.itFalseIsThat) ret_value = (sb_none if attack_type in ['dont_know'] else sb) + intro + se + ' ' elif is_users_opinion and not anonymous_style: ret_value = sb_none if support_counter_argument: ret_value += _t.get(_.youAgreeWithThecounterargument) elif marked_element: ret_value += you_have_the_opinion_that else: ret_value += _t.get(_.youArgue) ret_value += se + ' ' else: tmp = _t.get(_.itIsTrueThatAnonymous if db_argument.is_supportive else _.itIsFalseThatAnonymous) ret_value = sb_none + sb + tmp + se + ' ' ret_value += ' {}{}{} '.format(sb, _t.get(_.itIsNotRight), se) if not db_argument.is_supportive else '' ret_value += conclusion ret_value += ', ' if lang == 'de' else ' ' ret_value += sb_none + _t.get(_.because).lower() + se + ' ' + premises return ret_value def __build_single_argument_for_en(_t, sb, se, you_have_the_opinion_that, marked_element, conclusion, premises, db_arg): tmp = sb + ' ' + _t.get(_.isNotRight).lower() + se + ', ' + _t.get(_.because).lower() + ' ' ret_value = (you_have_the_opinion_that + ' ' if marked_element else '') + conclusion + ' ' ret_value += _t.get(_.because).lower() if db_arg.is_supportive else tmp ret_value += ' ' + premises return ret_value def __build_nested_argument(arg_array: List[Argument], first_arg_by_user, user_changed_opinion, with_html_tag, start_with_intro, minimize_on_undercut, anonymous_style, premisegroup_by_user, _t): """ :param arg_array: :param first_arg_by_user: :param user_changed_opinion: :param with_html_tag: :param start_with_intro: :param minimize_on_undercut: :param anonymous_style: :param premisegroup_by_user: :param _t: :return: """ # get all pgroups and at last, the conclusion pgroups = [] supportive = [] arg_array = arg_array[::-1] local_lang = arg_array[0].lang # grepping all arguments in the chain for db_argument in arg_array: text = db_argument.get_premisegroup_text() pgroups.append(text) supportive.append(db_argument.is_supportive) conclusion = arg_array[0].get_conclusion_text() # html tags for framing sb = start_position if with_html_tag else '' se = end_tag if with_html_tag else '' because = (', ' if local_lang == 'de' else ' ') + _t.get(_.because).lower() + ' ' if len(arg_array) % 2 is 0 and not first_arg_by_user and not anonymous_style: # system starts ret_value = _t.get(_.earlierYouArguedThat if user_changed_opinion else _.otherUsersSaidThat) + ' ' tmp_users_opinion = True # user after system elif not anonymous_style: # user starts ret_value = (_t.get(_.soYourOpinionIsThat) + ': ') if start_with_intro else '' tmp_users_opinion = False # system after user conclusion = se + conclusion[0:1].upper() + conclusion[1:] # pretty print else: ret_value = _t.get(_.someoneArgued) + ' ' tmp_users_opinion = False tmp = _t.get(_.itFalseIsThat) + ' ' if not supportive[0] else '' ret_value += tmp + conclusion + because + pgroups[0] + '.' del pgroups[0] # just display the last premise group on undercuts, because the story is always saved in all bubbles if minimize_on_undercut and not user_changed_opinion and len(pgroups) > 2: return _t.get(_.butYouCounteredWith).strip() + ' ' + sb + pgroups[len(pgroups) - 1] + se + '.' for i, pgroup in enumerate(pgroups): ret_value += ' ' if tmp_users_opinion and not anonymous_style: tmp = _.butYouCounteredWithArgument if premisegroup_by_user else _.butYouCounteredWithInterest ret_value += _t.get(_.otherParticipantsConvincedYouThat if user_changed_opinion else tmp) elif not anonymous_style: ret_value += _t.get(_.youAgreeWithThatNow) else: ret_value += _t.get(_.otherUsersSaidThat) if i == 0 else _t.get(_.thenOtherUsersSaidThat) ret_value += sb + ' ' + pgroups[i] + '.' tmp_users_opinion = not tmp_users_opinion return ret_value.replace(' ', ' ') def get_text_for_premisegroup_uid(uid): """ Returns joined text of the premise group and the premise ids :param uid: premisegroup_uid :return: text, uids """ warnings.warn("Use PremiseGroup.get_text() instead.", DeprecationWarning) db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid=uid).join(Statement).all() if len(db_premises) == 0: return '' texts = [premise.get_text() for premise in db_premises] lang = DBDiscussionSession.query(Statement).get(db_premises[0].statements.uid).lang _t = Translator(lang) return ' {} '.format(_t.get(_.aand)).join(texts) def get_text_for_statement_uid(uid: int, colored_position=False): """ Returns text of statement with given uid :param uid: Statement.uid :param colored_position: Boolean :return: String """ warnings.warn("Use Statement.get_text() or Statement.get_html() instead.", DeprecationWarning) if not isinstance(uid, int): return None db_statement = DBDiscussionSession.query(Statement).get(uid) if not db_statement: return None db_textversion = DBDiscussionSession.query(TextVersion).order_by(TextVersion.uid.desc()).get( db_statement.textversion_uid) content = db_textversion.content while content.endswith(('.', '?', '!')): content = content[:-1] sb, se = '', '' if colored_position: sb = '<{} data-argumentation-type="position">'.format(tag_type) se = '</{}>'.format(tag_type) return sb + content + se def get_text_for_premise(uid: int, colored_position: bool = False): """ Returns text of premise with given uid :param uid: Statement.uid :param colored_position: Boolean :return: String """ db_premise = DBDiscussionSession.query(Premise).get(uid) if db_premise: return db_premise.get_text(html=colored_position) else: return None def get_text_for_conclusion(argument, start_with_intro=False, rearrange_intro=False, is_users_opinion=True): """ Check the arguments conclusion whether it is an statement or an argument and returns the text :param argument: Argument :param start_with_intro: Boolean :param rearrange_intro: Boolean :return: String """ if argument.argument_uid: return get_text_for_argument_uid(argument.argument_uid, start_with_intro, rearrange_intro=rearrange_intro, is_users_opinion=is_users_opinion) else: return argument.get_conclusion_text() def resolve_issue_uid_to_slug(uid): """ Given the issue uid query database and return the correct slug of the issue. :param uid: issue_uid :type uid: int :return: Slug of issue :rtype: str """ issue = DBDiscussionSession.query(Issue).get(uid) return issue.slug if issue else None def get_all_attacking_arg_uids_from_history(history): """ Returns all arguments of the history, which attacked the user :param history: String :return: [Arguments.uid] :rtype: list """ try: splitted_history = history.split('-') uids = [] for part in splitted_history: if 'reaction' in part: parts = part.split('/') pos = parts.index('reaction') uids.append(part.split('/')[pos + 3]) return uids except AttributeError: return [] def get_user_by_private_or_public_nickname(nickname): """ Gets the user by his (public) nickname, based on the option, whether his nickname is public or not :param nickname: Nickname of the user :return: Current user or None """ db_user = get_user_by_case_insensitive_nickname(nickname) db_public_user = get_user_by_case_insensitive_public_nickname(nickname) uid = 0 if db_user: uid = db_user.uid elif db_public_user: uid = db_public_user.uid db_settings = DBDiscussionSession.query(Settings).filter_by(author_uid=uid).first() if not db_settings: return None if db_settings.should_show_public_nickname and db_user: return db_user elif not db_settings.should_show_public_nickname and db_public_user: return db_public_user return None def get_user_by_case_insensitive_nickname(nickname): """ Returns user with given nickname :param nickname: String :return: User or None """ return DBDiscussionSession.query(User).filter(func.lower(User.nickname) == func.lower(nickname)).first() def get_user_by_case_insensitive_public_nickname(public_nickname): """ Returns user with given public nickname :param public_nickname: String :return: User or None """ return DBDiscussionSession.query(User).filter( func.lower(User.public_nickname) == func.lower(public_nickname)).first() def pretty_print_options(message): """ Some modifications for pretty printing. Use uppercase for first letter in text and a single dot for the end if there isn't one already. :param message: String :return: String """ # check for html if message[0:1] == '<': pos = message.index('>') message = message[0:pos + 1] + message[pos + 1:pos + 2].upper() + message[pos + 2:] else: message = message[0:1].upper() + message[1:] # check for html if message[-1] == '>': pos = message.rfind('<') if message[pos - 1:pos] not in ['.', '?', '!']: message = message[0:pos] + '.' + message[pos:] elif not message.endswith(tuple(['.', '?', '!'])) and id is not 'now': message += '.' return message def create_speechbubble_dict(bubble_type: BubbleTypes, is_markable: bool=False, is_author: bool=False, uid: str='', bubble_url: str= '', content: str= '', omit_bubble_url: bool=False, omit_vote_info: bool=False, argument_uid: int=None, statement_uid: int=None, is_supportive: bool=False, nickname: str='anonymous', lang: str='en', is_users_opinion: bool=False, other_author: User=None): """ Creates an dictionary which includes every information needed for a bubble. :param bubble_type: BubbleTypes :param is_markable: True if the content itself could be flagged :param is_author: True if the current user is author of the content :param uid: Identifier for the bubble :param bubble_url: URL for the click event of the bubble :param content: Text of the bubble :param omit_bubble_url: True if the bubble should have a link :param omit_vote_info: True if the bubble have the little, grey information text :param argument_uid: Argument.uid :param statement_uid: Statement.uid :param is_supportive: Boolean :param nickname: String :param omit_bubble_url: Boolean :param lang: is_users_opinion :param is_users_opinion: Boolean :return: dict() """ gravatar_link = get_global_url() + '/static/images/icon.png' profile = None if uid is not 'now': content = pretty_print_options(content) if bubble_type is BubbleTypes.SYSTEM and other_author is not None: gravatar_link = get_profile_picture(other_author, 25) profile = '/user/{}'.format(other_author.uid), # check for users opinion if bubble_type is BubbleTypes.USER and nickname != 'anonymous': db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname).first() db_marked = None gravatar_link = get_profile_picture(db_user, 25) if argument_uid is not None and db_user is not None: db_marked = DBDiscussionSession.query(MarkedArgument).filter( MarkedArgument.argument_uid == argument_uid, MarkedArgument.author_uid == db_user.uid).first() if statement_uid is not None and db_user is not None: db_marked = DBDiscussionSession.query(MarkedStatement).filter( MarkedStatement.statement_uid == statement_uid, MarkedStatement.author_uid == db_user.uid).first() is_users_opinion = db_marked is not None speech = { 'is_user': bubble_type is BubbleTypes.USER, 'is_system': bubble_type is BubbleTypes.SYSTEM, 'is_status': bubble_type is BubbleTypes.STATUS, 'is_info': bubble_type is BubbleTypes.INFO, 'is_markable': is_markable, 'is_author': is_author, 'id': uid if len(str(uid)) > 0 else uuid4().hex, 'bubble_url': bubble_url, 'message': content, 'omit_bubble_url': omit_bubble_url, 'omit_vote_info': omit_vote_info, 'data_type': 'argument' if argument_uid else 'statement' if statement_uid else 'None', 'data_argument_uid': argument_uid, 'data_statement_uid': statement_uid, 'data_is_supportive': is_supportive, 'is_users_opinion': is_users_opinion, 'enemy': { 'avatar': gravatar_link, 'profile': profile, 'available': profile is not None } } votecount_keys = __get_text_for_click_and_mark_count(nickname, bubble_type is BubbleTypes.USER, argument_uid, statement_uid, speech, lang) speech['votecounts_message'] = votecount_keys[speech['votecounts']] return speech def __get_text_for_click_and_mark_count(nickname, is_user, argument_uid, statement_uid, speech, lang): """ Build text for a bubble, how many other participants have the same interest? :param nickname: User.nickname :param is_user: boolean :param argument_uid: Argument.uid :param statement_uid: Statement.uid :param speech: dict() :param lang: ui_locales :return: [String] """ if not nickname: nickname = 'anonymous' db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname).first() if not db_user: db_user = DBDiscussionSession.query(User).filter_by(nickname='anonymous').first() db_clicks, db_marks = __get_clicks_and_marks(argument_uid, statement_uid, db_user) _t = Translator(lang) speech['votecounts'] = len(db_clicks) if db_clicks else 0 if db_marks: speech['votecounts'] += len(db_marks) votecount_keys = defaultdict(lambda: "{} {}.".format(speech['votecounts'], _t.get(_.voteCountTextMore))) if is_user and db_user.gender == 'm': gender_key = _.voteCountTextFirstM elif is_user and db_user.gender == 'f': gender_key = _.voteCountTextFirstF else: gender_key = _.voteCountTextFirst votecount_keys[0] = '{}.'.format(_t.get(gender_key)) votecount_keys[1] = _t.get(_.voteCountTextOneOther) + '.' return votecount_keys def __get_clicks_and_marks(argument_uid, statement_uid, db_user): db_clicks = None db_marks = None if argument_uid: db_clicks = DBDiscussionSession.query(ClickedArgument). \ filter(ClickedArgument.argument_uid == argument_uid, ClickedArgument.is_up_vote == True, ClickedArgument.is_valid, ClickedArgument.author_uid != db_user.uid).all() db_marks = DBDiscussionSession.query(MarkedArgument). \ filter(MarkedArgument.argument_uid == argument_uid, MarkedArgument.author_uid != db_user.uid).all() elif statement_uid: db_clicks = DBDiscussionSession.query(ClickedStatement). \ filter(ClickedStatement.statement_uid == statement_uid, ClickedStatement.is_up_vote == True, ClickedStatement.is_valid, ClickedStatement.author_uid != db_user.uid).all() db_marks = DBDiscussionSession.query(MarkedStatement). \ filter(MarkedStatement.statement_uid == statement_uid, MarkedStatement.author_uid != db_user.uid).all() return db_clicks, db_marks def is_argument_disabled_due_to_disabled_statements(argument): """ Returns true if any involved statement is disabled. :param argument: Argument :return: Boolean """ if argument.conclusion_uid is None: # check conclusion of given arguments conclusion db_argument = DBDiscussionSession.query(Argument).get(argument.argument_uid) conclusion = DBDiscussionSession(Statement).get(db_argument.conclusion_uid) if conclusion.is_disabled: return True # check premisegroup of given arguments conclusion premises = __get_all_premises_of_argument(db_argument) for premise in premises: if premise.statements.is_disabled: return True else: # check conclusion of given argument print(argument.conclusion_uid) conclusion = DBDiscussionSession.query(Statement).get(argument.conclusion_uid) if conclusion.is_disabled: return True # check premisegroup of given argument premises = __get_all_premises_of_argument(argument) for premise in premises: if premise.statements.is_disabled: return True return False def is_author_of_statement(db_user: User, statement_uid: int) -> bool: """ Is the user with given nickname author of the statement? :param db_user: User :param statement_uid: Statement.uid :return: Boolean """ db_user = db_user if db_user and db_user.nickname != nick_of_anonymous_user else None if not db_user: return False db_textversion = DBDiscussionSession.query(TextVersion).filter_by(statement_uid=statement_uid).order_by( TextVersion.uid.asc()).first() # TODO #432 if not db_textversion: return False return db_textversion.author_uid == db_user.uid def is_author_of_argument(db_user: User, argument_uid: int) -> bool: """ Is the user with given nickname author of the argument? :param db_user: User :param argument_uid: Argument.uid :return: Boolean """ db_user = db_user if db_user and db_user.nickname != nick_of_anonymous_user else None if not db_user: return False db_argument = DBDiscussionSession.query(Argument).filter(Argument.uid == argument_uid, Argument.author_uid == db_user.uid).first() return True if db_argument else False def __get_all_premises_of_argument(argument): """ Returns list with all premises of the argument. :param argument: Argument :return: list() """ ret_list = [] db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid=argument.premisegroup_uid).join( Statement).all() for premise in db_premises: ret_list.append(premise) return ret_list def get_profile_picture(user: User, size: int = 80, ignore_privacy_settings: bool = False): """ Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px :param user: User :param size: Integer, default 80 :param ignore_privacy_settings: :return: String """ additional_id = '' if user and isinstance(user, User): additional_id = '' if user.settings.should_show_public_nickname or ignore_privacy_settings else 'x' return __get_gravatar(user, additional_id, size) def get_public_profile_picture(user: User, size: int = 80): """ Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px If the user doesn't want an public profile, an anonymous image will be returned :param user: User :param size: Integer, default 80 :return: String """ additional_id = '' if user.settings.should_show_public_nickname: additional_id = 'x' if len(str(user.oauth_provider)) > 0: additional_id = '{}{}'.format(user.oauth_provider, user.oauth_provider_id) return __get_gravatar(user, additional_id, size) def __get_gravatar(user, additional_id, size): if user: if str(user.email) == 'None': email = (user.nickname + additional_id).encode('utf-8') else: email = (user.email + additional_id).encode('utf-8') else: email = 'unknown'.encode('utf-8') gravatar_url = 'https://secure.gravatar.com/avatar/{}?'.format(hashlib.md5(email.lower()).hexdigest()) gravatar_url += parse.urlencode({'d': 'wavatar', 's': str(size)}) return gravatar_url def get_author_data(uid, gravatar_on_right_side=True, linked_with_users_page=True, profile_picture_size=20): """ Returns a-tag with gravatar of current author and users page as href :param uid: Uid of the author :param gravatar_on_right_side: True, if the gravatar is on the right of authors name :param linked_with_users_page: True, if the text is a link to the authors site :param profile_picture_size: Integer :return: HTML-String """ db_user = DBDiscussionSession.query(User).get(int(uid)) if not db_user: return None, 'Missing author with uid ' + str(uid), False nick = db_user.global_nickname img_src = get_profile_picture(db_user, profile_picture_size) link_begin = '' link_end = '' if linked_with_users_page: link_begin = '<a href="/user/{}" title="{}">'.format(db_user.uid, nick) link_end = '</a>' side = 'left' if gravatar_on_right_side else 'right' img = '<img class="img-circle" src="{}" style="padding-{}: 0.3em">'.format(img_src, side) if gravatar_on_right_side: return db_user, '{}{}{}{}'.format(link_begin, nick, img, link_end), True else: return db_user, '{}{}{}{}'.format(link_begin, img, nick, link_end), True def bubbles_already_last_in_list(bubble_list, bubbles): """ Are the given bubbles already at the end of the bubble list :param bubble_list: list of Bubbles :param bubbles: list of bubbles :return: Boolean """ if isinstance(bubbles, list): length = len(bubbles) else: length = 1 bubbles = [bubbles] if len(bubble_list) < length: return False for bubble in bubbles: if 'message' not in bubble: return False start_index = - length is_already_in = False for bubble in bubbles: last = bubble_list[start_index] if 'message' not in last or 'message' not in bubble: return False text1 = unhtmlify(last['message'].lower()).strip() text2 = unhtmlify(bubble['message'].lower()).strip() is_already_in = is_already_in or (text1 == text2) start_index += 1 return is_already_in def unhtmlify(html): """ Remove html-tags and unescape encoded html-entities. :param html: Evil-string containing html :return: """ return unescape(re.sub(r'<.*?>', '', html))
normal
{ "blob_id": "10a9437453371bd7472e93af1026c778b7983cf8", "index": 1137, "step-1": "<mask token>\n\n\nclass BubbleTypes(Enum):\n USER = auto()\n SYSTEM = auto()\n STATUS = auto()\n INFO = auto()\n\n def __str__(self):\n return str(self.value)\n\n\nclass Relations(Enum):\n UNDERMINE = 'undermine'\n UNDERCUT = 'undercut'\n REBUT = 'rebut'\n SUPPORT = 'support'\n\n def __str__(self):\n return str(self.value)\n\n\nclass Attitudes(Enum):\n AGREE = 'agree'\n DISAGREE = 'disagree'\n DONT_KNOW = 'dontknow'\n\n def __str__(self):\n return str(self.value)\n\n\n<mask token>\n\n\ndef escape_string(text):\n \"\"\"\n Escapes all html special chars.\n\n :param text: string\n :return: html.escape(text)\n \"\"\"\n return escape(text)\n\n\ndef get_discussion_language(matchdict, params, session, current_issue_uid=None\n ):\n \"\"\"\n Returns Language.ui_locales\n CALL AFTER issue_handler.get_id_of_slug(..)!\n\n :param matchdict: matchdict of the current request\n :param params: params of the current request\n :param session: session of the current request\n :param current_issue_uid: uid\n :return:\n \"\"\"\n if not current_issue_uid:\n current_issue = DBDiscussionSession.query(Issue).filter(Issue.\n is_disabled == False, Issue.is_private == False).first()\n current_issue_uid = current_issue.uid if current_issue else None\n issue = matchdict['issue'] if 'issue' in matchdict else params['issue'\n ] if 'issue' in params else session['issue'\n ] if 'issue' in session else current_issue_uid\n db_issue = DBDiscussionSession.query(Issue).get(issue)\n return db_issue.lang if db_issue else 'en'\n\n\ndef python_datetime_pretty_print(ts, lang):\n \"\"\"\n Pretty print of a locale\n\n :param ts: Timestamp\n :param lang: ui_locales\n :return: String\n \"\"\"\n formatter = '%b. %d.'\n if lang == 'de':\n try:\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n formatter = '%d. %b.'\n except locale.Error:\n locale.setlocale(locale.LC_TIME, 'en_US.UTF8')\n return datetime.strptime(str(ts), '%Y-%m-%d').strftime(formatter)\n\n\n<mask token>\n\n\ndef __get_undercuts_of_argument(argument_uid, include_disabled):\n \"\"\"\n Returns all undercuts fo the given argument\n\n :param argument_uid: Argument.uid\n :param include_disabled: boolean\n :return: list of Arguments\n \"\"\"\n db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid\n =argument_uid)\n if not include_disabled:\n db_undercuts = db_undercuts.filter_by(is_disabled=False)\n return db_undercuts.all() if db_undercuts else []\n\n\n<mask token>\n\n\ndef get_all_arguments_with_text_by_statement_id(statement_uid):\n \"\"\"\n Given a statement_uid, it returns all arguments, which use this statement and adds\n the corresponding text to it, which normally appears in the bubbles. The resulting\n text depends on the provided language.\n\n :param statement_uid: uid to a statement, which should be analyzed\n :return: list of dictionaries containing some properties of these arguments\n :rtype: list\n \"\"\"\n logger('DBAS.LIB', 'main ' + str(statement_uid))\n arguments = get_all_arguments_by_statement(statement_uid)\n results = []\n if arguments:\n results = [{'uid': arg.uid, 'text': get_text_for_argument_uid(arg.\n uid)} for arg in arguments]\n return results\n\n\n<mask token>\n\n\ndef get_slug_by_statement_uid(uid):\n \"\"\"\n Returns slug for the given Issue.uid\n\n :param uid: Issue.uid\n :return: String\n \"\"\"\n db_statement = DBDiscussionSession.query(Statement).get(uid)\n return resolve_issue_uid_to_slug(db_statement.issue_uid)\n\n\ndef get_text_for_argument_uid(uid, nickname=None, with_html_tag=False,\n start_with_intro=False, first_arg_by_user=False, user_changed_opinion=\n False, rearrange_intro=False, colored_position=False, attack_type=None,\n minimize_on_undercut=False, is_users_opinion=True, anonymous_style=\n False, support_counter_argument=False):\n \"\"\"\n Returns current argument as string like \"conclusion, because premise1 and premise2\"\n\n :param uid: Integer\n :param with_html_tag: Boolean\n :param start_with_intro: Boolean\n :param first_arg_by_user: Boolean\n :param user_changed_opinion: Boolean\n :param rearrange_intro: Boolean\n :param colored_position: Boolean\n :param attack_type: String\n :param minimize_on_undercut: Boolean\n :param anonymous_style: Boolean\n :param support_counter_argument: Boolean\n :return: String\n \"\"\"\n logger('DBAS.LIB', 'main {}'.format(uid))\n db_argument = DBDiscussionSession.query(Argument).get(uid)\n if not db_argument:\n return None\n lang = db_argument.lang\n _t = Translator(lang)\n premisegroup_by_user = False\n author_uid = None\n db_user = DBDiscussionSession.query(User).filter_by(nickname=str(nickname)\n ).first()\n if db_user:\n author_uid = db_user.uid\n pgroup = DBDiscussionSession.query(PremiseGroup).get(db_argument.\n premisegroup_uid)\n marked_argument = DBDiscussionSession.query(MarkedArgument).filter_by(\n argument_uid=uid, author_uid=db_user.uid).first()\n premisegroup_by_user = (pgroup.author_uid == db_user.uid or \n marked_argument is not None)\n arg_array = [db_argument]\n while db_argument.argument_uid:\n db_argument = DBDiscussionSession.query(Argument).get(db_argument.\n argument_uid)\n arg_array.append(db_argument)\n if attack_type == 'jump':\n return __build_argument_for_jump(arg_array, with_html_tag)\n if len(arg_array) == 1:\n return __build_single_argument(arg_array[0], rearrange_intro,\n with_html_tag, colored_position, attack_type, _t,\n start_with_intro, is_users_opinion, anonymous_style,\n support_counter_argument, author_uid)\n else:\n return __build_nested_argument(arg_array, first_arg_by_user,\n user_changed_opinion, with_html_tag, start_with_intro,\n minimize_on_undercut, anonymous_style, premisegroup_by_user, _t)\n\n\n<mask token>\n\n\ndef __build_val_for_jump(db_argument, tag_premise, tag_conclusion, tag_end, _t\n ):\n premises = db_argument.get_premisegroup_text()\n if premises[-1] != '.':\n premises += '.'\n conclusion = db_argument.get_conclusion_text()\n because = _t.get(_.because).lower()\n conclusion = tag_conclusion + conclusion + tag_end\n premises = tag_premise + premises + tag_end\n intro = start_con + _t.get(_.isNotRight).lower(\n ) + end_tag if not db_argument.is_supportive else ''\n ret_value = '{} {} {} {}'.format(conclusion, intro, because, premises)\n if _t.get_lang() == 'de':\n intro = _t.get(_.itIsTrueThatAnonymous\n ) if db_argument.is_supportive else _t.get(_.itIsFalseThatAnonymous\n )\n intro = intro[0:1].upper() + intro[1:]\n intro = (start_pro if db_argument.is_supportive else start_con\n ) + intro + end_tag\n ret_value = '{} {}, {} {}'.format(intro, conclusion, because, premises)\n return ret_value\n\n\n<mask token>\n\n\ndef __build_nested_argument(arg_array: List[Argument], first_arg_by_user,\n user_changed_opinion, with_html_tag, start_with_intro,\n minimize_on_undercut, anonymous_style, premisegroup_by_user, _t):\n \"\"\"\n\n :param arg_array:\n :param first_arg_by_user:\n :param user_changed_opinion:\n :param with_html_tag:\n :param start_with_intro:\n :param minimize_on_undercut:\n :param anonymous_style:\n :param premisegroup_by_user:\n :param _t:\n :return:\n \"\"\"\n pgroups = []\n supportive = []\n arg_array = arg_array[::-1]\n local_lang = arg_array[0].lang\n for db_argument in arg_array:\n text = db_argument.get_premisegroup_text()\n pgroups.append(text)\n supportive.append(db_argument.is_supportive)\n conclusion = arg_array[0].get_conclusion_text()\n sb = start_position if with_html_tag else ''\n se = end_tag if with_html_tag else ''\n because = (', ' if local_lang == 'de' else ' ') + _t.get(_.because).lower(\n ) + ' '\n if len(arg_array\n ) % 2 is 0 and not first_arg_by_user and not anonymous_style:\n ret_value = _t.get(_.earlierYouArguedThat if user_changed_opinion else\n _.otherUsersSaidThat) + ' '\n tmp_users_opinion = True\n elif not anonymous_style:\n ret_value = _t.get(_.soYourOpinionIsThat\n ) + ': ' if start_with_intro else ''\n tmp_users_opinion = False\n conclusion = se + conclusion[0:1].upper() + conclusion[1:]\n else:\n ret_value = _t.get(_.someoneArgued) + ' '\n tmp_users_opinion = False\n tmp = _t.get(_.itFalseIsThat) + ' ' if not supportive[0] else ''\n ret_value += tmp + conclusion + because + pgroups[0] + '.'\n del pgroups[0]\n if minimize_on_undercut and not user_changed_opinion and len(pgroups) > 2:\n return _t.get(_.butYouCounteredWith).strip() + ' ' + sb + pgroups[\n len(pgroups) - 1] + se + '.'\n for i, pgroup in enumerate(pgroups):\n ret_value += ' '\n if tmp_users_opinion and not anonymous_style:\n tmp = (_.butYouCounteredWithArgument if premisegroup_by_user else\n _.butYouCounteredWithInterest)\n ret_value += _t.get(_.otherParticipantsConvincedYouThat if\n user_changed_opinion else tmp)\n elif not anonymous_style:\n ret_value += _t.get(_.youAgreeWithThatNow)\n else:\n ret_value += _t.get(_.otherUsersSaidThat) if i == 0 else _t.get(_\n .thenOtherUsersSaidThat)\n ret_value += sb + ' ' + pgroups[i] + '.'\n tmp_users_opinion = not tmp_users_opinion\n return ret_value.replace(' ', ' ')\n\n\ndef get_text_for_premisegroup_uid(uid):\n \"\"\"\n Returns joined text of the premise group and the premise ids\n\n :param uid: premisegroup_uid\n :return: text, uids\n \"\"\"\n warnings.warn('Use PremiseGroup.get_text() instead.', DeprecationWarning)\n db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid\n =uid).join(Statement).all()\n if len(db_premises) == 0:\n return ''\n texts = [premise.get_text() for premise in db_premises]\n lang = DBDiscussionSession.query(Statement).get(db_premises[0].\n statements.uid).lang\n _t = Translator(lang)\n return ' {} '.format(_t.get(_.aand)).join(texts)\n\n\n<mask token>\n\n\ndef get_text_for_premise(uid: int, colored_position: bool=False):\n \"\"\"\n Returns text of premise with given uid\n\n :param uid: Statement.uid\n :param colored_position: Boolean\n :return: String\n \"\"\"\n db_premise = DBDiscussionSession.query(Premise).get(uid)\n if db_premise:\n return db_premise.get_text(html=colored_position)\n else:\n return None\n\n\ndef get_text_for_conclusion(argument, start_with_intro=False,\n rearrange_intro=False, is_users_opinion=True):\n \"\"\"\n Check the arguments conclusion whether it is an statement or an argument and returns the text\n\n :param argument: Argument\n :param start_with_intro: Boolean\n :param rearrange_intro: Boolean\n :return: String\n \"\"\"\n if argument.argument_uid:\n return get_text_for_argument_uid(argument.argument_uid,\n start_with_intro, rearrange_intro=rearrange_intro,\n is_users_opinion=is_users_opinion)\n else:\n return argument.get_conclusion_text()\n\n\n<mask token>\n\n\ndef get_user_by_private_or_public_nickname(nickname):\n \"\"\"\n Gets the user by his (public) nickname, based on the option, whether his nickname is public or not\n\n :param nickname: Nickname of the user\n :return: Current user or None\n \"\"\"\n db_user = get_user_by_case_insensitive_nickname(nickname)\n db_public_user = get_user_by_case_insensitive_public_nickname(nickname)\n uid = 0\n if db_user:\n uid = db_user.uid\n elif db_public_user:\n uid = db_public_user.uid\n db_settings = DBDiscussionSession.query(Settings).filter_by(author_uid=uid\n ).first()\n if not db_settings:\n return None\n if db_settings.should_show_public_nickname and db_user:\n return db_user\n elif not db_settings.should_show_public_nickname and db_public_user:\n return db_public_user\n return None\n\n\ndef get_user_by_case_insensitive_nickname(nickname):\n \"\"\"\n Returns user with given nickname\n\n :param nickname: String\n :return: User or None\n \"\"\"\n return DBDiscussionSession.query(User).filter(func.lower(User.nickname) ==\n func.lower(nickname)).first()\n\n\n<mask token>\n\n\ndef __get_text_for_click_and_mark_count(nickname, is_user, argument_uid,\n statement_uid, speech, lang):\n \"\"\"\n Build text for a bubble, how many other participants have the same interest?\n\n :param nickname: User.nickname\n :param is_user: boolean\n :param argument_uid: Argument.uid\n :param statement_uid: Statement.uid\n :param speech: dict()\n :param lang: ui_locales\n :return: [String]\n \"\"\"\n if not nickname:\n nickname = 'anonymous'\n db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname\n ).first()\n if not db_user:\n db_user = DBDiscussionSession.query(User).filter_by(nickname=\n 'anonymous').first()\n db_clicks, db_marks = __get_clicks_and_marks(argument_uid,\n statement_uid, db_user)\n _t = Translator(lang)\n speech['votecounts'] = len(db_clicks) if db_clicks else 0\n if db_marks:\n speech['votecounts'] += len(db_marks)\n votecount_keys = defaultdict(lambda : '{} {}.'.format(speech[\n 'votecounts'], _t.get(_.voteCountTextMore)))\n if is_user and db_user.gender == 'm':\n gender_key = _.voteCountTextFirstM\n elif is_user and db_user.gender == 'f':\n gender_key = _.voteCountTextFirstF\n else:\n gender_key = _.voteCountTextFirst\n votecount_keys[0] = '{}.'.format(_t.get(gender_key))\n votecount_keys[1] = _t.get(_.voteCountTextOneOther) + '.'\n return votecount_keys\n\n\ndef __get_clicks_and_marks(argument_uid, statement_uid, db_user):\n db_clicks = None\n db_marks = None\n if argument_uid:\n db_clicks = DBDiscussionSession.query(ClickedArgument).filter(\n ClickedArgument.argument_uid == argument_uid, ClickedArgument.\n is_up_vote == True, ClickedArgument.is_valid, ClickedArgument.\n author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedArgument).filter(\n MarkedArgument.argument_uid == argument_uid, MarkedArgument.\n author_uid != db_user.uid).all()\n elif statement_uid:\n db_clicks = DBDiscussionSession.query(ClickedStatement).filter(\n ClickedStatement.statement_uid == statement_uid, \n ClickedStatement.is_up_vote == True, ClickedStatement.is_valid,\n ClickedStatement.author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedStatement).filter(\n MarkedStatement.statement_uid == statement_uid, MarkedStatement\n .author_uid != db_user.uid).all()\n return db_clicks, db_marks\n\n\ndef is_argument_disabled_due_to_disabled_statements(argument):\n \"\"\"\n Returns true if any involved statement is disabled.\n\n :param argument: Argument\n :return: Boolean\n \"\"\"\n if argument.conclusion_uid is None:\n db_argument = DBDiscussionSession.query(Argument).get(argument.\n argument_uid)\n conclusion = DBDiscussionSession(Statement).get(db_argument.\n conclusion_uid)\n if conclusion.is_disabled:\n return True\n premises = __get_all_premises_of_argument(db_argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n else:\n print(argument.conclusion_uid)\n conclusion = DBDiscussionSession.query(Statement).get(argument.\n conclusion_uid)\n if conclusion.is_disabled:\n return True\n premises = __get_all_premises_of_argument(argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n return False\n\n\ndef is_author_of_statement(db_user: User, statement_uid: int) ->bool:\n \"\"\"\n Is the user with given nickname author of the statement?\n\n :param db_user: User\n :param statement_uid: Statement.uid\n :return: Boolean\n \"\"\"\n db_user = (db_user if db_user and db_user.nickname !=\n nick_of_anonymous_user else None)\n if not db_user:\n return False\n db_textversion = DBDiscussionSession.query(TextVersion).filter_by(\n statement_uid=statement_uid).order_by(TextVersion.uid.asc()).first()\n if not db_textversion:\n return False\n return db_textversion.author_uid == db_user.uid\n\n\n<mask token>\n\n\ndef get_profile_picture(user: User, size: int=80, ignore_privacy_settings:\n bool=False):\n \"\"\"\n Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px\n\n :param user: User\n :param size: Integer, default 80\n :param ignore_privacy_settings:\n :return: String\n \"\"\"\n additional_id = ''\n if user and isinstance(user, User):\n additional_id = ('' if user.settings.should_show_public_nickname or\n ignore_privacy_settings else 'x')\n return __get_gravatar(user, additional_id, size)\n\n\n<mask token>\n\n\ndef get_author_data(uid, gravatar_on_right_side=True,\n linked_with_users_page=True, profile_picture_size=20):\n \"\"\"\n Returns a-tag with gravatar of current author and users page as href\n\n :param uid: Uid of the author\n :param gravatar_on_right_side: True, if the gravatar is on the right of authors name\n :param linked_with_users_page: True, if the text is a link to the authors site\n :param profile_picture_size: Integer\n :return: HTML-String\n \"\"\"\n db_user = DBDiscussionSession.query(User).get(int(uid))\n if not db_user:\n return None, 'Missing author with uid ' + str(uid), False\n nick = db_user.global_nickname\n img_src = get_profile_picture(db_user, profile_picture_size)\n link_begin = ''\n link_end = ''\n if linked_with_users_page:\n link_begin = '<a href=\"/user/{}\" title=\"{}\">'.format(db_user.uid, nick)\n link_end = '</a>'\n side = 'left' if gravatar_on_right_side else 'right'\n img = '<img class=\"img-circle\" src=\"{}\" style=\"padding-{}: 0.3em\">'.format(\n img_src, side)\n if gravatar_on_right_side:\n return db_user, '{}{}{}{}'.format(link_begin, nick, img, link_end\n ), True\n else:\n return db_user, '{}{}{}{}'.format(link_begin, img, nick, link_end\n ), True\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass BubbleTypes(Enum):\n USER = auto()\n SYSTEM = auto()\n STATUS = auto()\n INFO = auto()\n\n def __str__(self):\n return str(self.value)\n\n\nclass Relations(Enum):\n UNDERMINE = 'undermine'\n UNDERCUT = 'undercut'\n REBUT = 'rebut'\n SUPPORT = 'support'\n\n def __str__(self):\n return str(self.value)\n\n\nclass Attitudes(Enum):\n AGREE = 'agree'\n DISAGREE = 'disagree'\n DONT_KNOW = 'dontknow'\n\n def __str__(self):\n return str(self.value)\n\n\n<mask token>\n\n\ndef escape_string(text):\n \"\"\"\n Escapes all html special chars.\n\n :param text: string\n :return: html.escape(text)\n \"\"\"\n return escape(text)\n\n\ndef get_discussion_language(matchdict, params, session, current_issue_uid=None\n ):\n \"\"\"\n Returns Language.ui_locales\n CALL AFTER issue_handler.get_id_of_slug(..)!\n\n :param matchdict: matchdict of the current request\n :param params: params of the current request\n :param session: session of the current request\n :param current_issue_uid: uid\n :return:\n \"\"\"\n if not current_issue_uid:\n current_issue = DBDiscussionSession.query(Issue).filter(Issue.\n is_disabled == False, Issue.is_private == False).first()\n current_issue_uid = current_issue.uid if current_issue else None\n issue = matchdict['issue'] if 'issue' in matchdict else params['issue'\n ] if 'issue' in params else session['issue'\n ] if 'issue' in session else current_issue_uid\n db_issue = DBDiscussionSession.query(Issue).get(issue)\n return db_issue.lang if db_issue else 'en'\n\n\ndef python_datetime_pretty_print(ts, lang):\n \"\"\"\n Pretty print of a locale\n\n :param ts: Timestamp\n :param lang: ui_locales\n :return: String\n \"\"\"\n formatter = '%b. %d.'\n if lang == 'de':\n try:\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n formatter = '%d. %b.'\n except locale.Error:\n locale.setlocale(locale.LC_TIME, 'en_US.UTF8')\n return datetime.strptime(str(ts), '%Y-%m-%d').strftime(formatter)\n\n\n<mask token>\n\n\ndef __get_undercuts_of_argument(argument_uid, include_disabled):\n \"\"\"\n Returns all undercuts fo the given argument\n\n :param argument_uid: Argument.uid\n :param include_disabled: boolean\n :return: list of Arguments\n \"\"\"\n db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid\n =argument_uid)\n if not include_disabled:\n db_undercuts = db_undercuts.filter_by(is_disabled=False)\n return db_undercuts.all() if db_undercuts else []\n\n\n<mask token>\n\n\ndef get_all_arguments_with_text_by_statement_id(statement_uid):\n \"\"\"\n Given a statement_uid, it returns all arguments, which use this statement and adds\n the corresponding text to it, which normally appears in the bubbles. The resulting\n text depends on the provided language.\n\n :param statement_uid: uid to a statement, which should be analyzed\n :return: list of dictionaries containing some properties of these arguments\n :rtype: list\n \"\"\"\n logger('DBAS.LIB', 'main ' + str(statement_uid))\n arguments = get_all_arguments_by_statement(statement_uid)\n results = []\n if arguments:\n results = [{'uid': arg.uid, 'text': get_text_for_argument_uid(arg.\n uid)} for arg in arguments]\n return results\n\n\n<mask token>\n\n\ndef get_slug_by_statement_uid(uid):\n \"\"\"\n Returns slug for the given Issue.uid\n\n :param uid: Issue.uid\n :return: String\n \"\"\"\n db_statement = DBDiscussionSession.query(Statement).get(uid)\n return resolve_issue_uid_to_slug(db_statement.issue_uid)\n\n\ndef get_text_for_argument_uid(uid, nickname=None, with_html_tag=False,\n start_with_intro=False, first_arg_by_user=False, user_changed_opinion=\n False, rearrange_intro=False, colored_position=False, attack_type=None,\n minimize_on_undercut=False, is_users_opinion=True, anonymous_style=\n False, support_counter_argument=False):\n \"\"\"\n Returns current argument as string like \"conclusion, because premise1 and premise2\"\n\n :param uid: Integer\n :param with_html_tag: Boolean\n :param start_with_intro: Boolean\n :param first_arg_by_user: Boolean\n :param user_changed_opinion: Boolean\n :param rearrange_intro: Boolean\n :param colored_position: Boolean\n :param attack_type: String\n :param minimize_on_undercut: Boolean\n :param anonymous_style: Boolean\n :param support_counter_argument: Boolean\n :return: String\n \"\"\"\n logger('DBAS.LIB', 'main {}'.format(uid))\n db_argument = DBDiscussionSession.query(Argument).get(uid)\n if not db_argument:\n return None\n lang = db_argument.lang\n _t = Translator(lang)\n premisegroup_by_user = False\n author_uid = None\n db_user = DBDiscussionSession.query(User).filter_by(nickname=str(nickname)\n ).first()\n if db_user:\n author_uid = db_user.uid\n pgroup = DBDiscussionSession.query(PremiseGroup).get(db_argument.\n premisegroup_uid)\n marked_argument = DBDiscussionSession.query(MarkedArgument).filter_by(\n argument_uid=uid, author_uid=db_user.uid).first()\n premisegroup_by_user = (pgroup.author_uid == db_user.uid or \n marked_argument is not None)\n arg_array = [db_argument]\n while db_argument.argument_uid:\n db_argument = DBDiscussionSession.query(Argument).get(db_argument.\n argument_uid)\n arg_array.append(db_argument)\n if attack_type == 'jump':\n return __build_argument_for_jump(arg_array, with_html_tag)\n if len(arg_array) == 1:\n return __build_single_argument(arg_array[0], rearrange_intro,\n with_html_tag, colored_position, attack_type, _t,\n start_with_intro, is_users_opinion, anonymous_style,\n support_counter_argument, author_uid)\n else:\n return __build_nested_argument(arg_array, first_arg_by_user,\n user_changed_opinion, with_html_tag, start_with_intro,\n minimize_on_undercut, anonymous_style, premisegroup_by_user, _t)\n\n\n<mask token>\n\n\ndef __build_val_for_jump(db_argument, tag_premise, tag_conclusion, tag_end, _t\n ):\n premises = db_argument.get_premisegroup_text()\n if premises[-1] != '.':\n premises += '.'\n conclusion = db_argument.get_conclusion_text()\n because = _t.get(_.because).lower()\n conclusion = tag_conclusion + conclusion + tag_end\n premises = tag_premise + premises + tag_end\n intro = start_con + _t.get(_.isNotRight).lower(\n ) + end_tag if not db_argument.is_supportive else ''\n ret_value = '{} {} {} {}'.format(conclusion, intro, because, premises)\n if _t.get_lang() == 'de':\n intro = _t.get(_.itIsTrueThatAnonymous\n ) if db_argument.is_supportive else _t.get(_.itIsFalseThatAnonymous\n )\n intro = intro[0:1].upper() + intro[1:]\n intro = (start_pro if db_argument.is_supportive else start_con\n ) + intro + end_tag\n ret_value = '{} {}, {} {}'.format(intro, conclusion, because, premises)\n return ret_value\n\n\n<mask token>\n\n\ndef __get_tags_for_building_single_argument(with_html_tag, attack_type,\n colored_position, premises, conclusion):\n sb_none = start_tag if with_html_tag else ''\n se = end_tag if with_html_tag else ''\n if attack_type not in ['dont_know', 'jump']:\n sb = start_tag if with_html_tag else ''\n if colored_position:\n sb = start_position if with_html_tag else ''\n if attack_type == Relations.UNDERMINE:\n premises = sb + premises + se\n else:\n conclusion = sb + conclusion + se\n else:\n sb = start_argument if with_html_tag else ''\n sb_tmp = start_attack if with_html_tag else ''\n premises = sb + premises + se\n conclusion = sb_tmp + conclusion + se\n return premises, conclusion, sb, sb_none, se\n\n\n<mask token>\n\n\ndef __build_nested_argument(arg_array: List[Argument], first_arg_by_user,\n user_changed_opinion, with_html_tag, start_with_intro,\n minimize_on_undercut, anonymous_style, premisegroup_by_user, _t):\n \"\"\"\n\n :param arg_array:\n :param first_arg_by_user:\n :param user_changed_opinion:\n :param with_html_tag:\n :param start_with_intro:\n :param minimize_on_undercut:\n :param anonymous_style:\n :param premisegroup_by_user:\n :param _t:\n :return:\n \"\"\"\n pgroups = []\n supportive = []\n arg_array = arg_array[::-1]\n local_lang = arg_array[0].lang\n for db_argument in arg_array:\n text = db_argument.get_premisegroup_text()\n pgroups.append(text)\n supportive.append(db_argument.is_supportive)\n conclusion = arg_array[0].get_conclusion_text()\n sb = start_position if with_html_tag else ''\n se = end_tag if with_html_tag else ''\n because = (', ' if local_lang == 'de' else ' ') + _t.get(_.because).lower(\n ) + ' '\n if len(arg_array\n ) % 2 is 0 and not first_arg_by_user and not anonymous_style:\n ret_value = _t.get(_.earlierYouArguedThat if user_changed_opinion else\n _.otherUsersSaidThat) + ' '\n tmp_users_opinion = True\n elif not anonymous_style:\n ret_value = _t.get(_.soYourOpinionIsThat\n ) + ': ' if start_with_intro else ''\n tmp_users_opinion = False\n conclusion = se + conclusion[0:1].upper() + conclusion[1:]\n else:\n ret_value = _t.get(_.someoneArgued) + ' '\n tmp_users_opinion = False\n tmp = _t.get(_.itFalseIsThat) + ' ' if not supportive[0] else ''\n ret_value += tmp + conclusion + because + pgroups[0] + '.'\n del pgroups[0]\n if minimize_on_undercut and not user_changed_opinion and len(pgroups) > 2:\n return _t.get(_.butYouCounteredWith).strip() + ' ' + sb + pgroups[\n len(pgroups) - 1] + se + '.'\n for i, pgroup in enumerate(pgroups):\n ret_value += ' '\n if tmp_users_opinion and not anonymous_style:\n tmp = (_.butYouCounteredWithArgument if premisegroup_by_user else\n _.butYouCounteredWithInterest)\n ret_value += _t.get(_.otherParticipantsConvincedYouThat if\n user_changed_opinion else tmp)\n elif not anonymous_style:\n ret_value += _t.get(_.youAgreeWithThatNow)\n else:\n ret_value += _t.get(_.otherUsersSaidThat) if i == 0 else _t.get(_\n .thenOtherUsersSaidThat)\n ret_value += sb + ' ' + pgroups[i] + '.'\n tmp_users_opinion = not tmp_users_opinion\n return ret_value.replace(' ', ' ')\n\n\ndef get_text_for_premisegroup_uid(uid):\n \"\"\"\n Returns joined text of the premise group and the premise ids\n\n :param uid: premisegroup_uid\n :return: text, uids\n \"\"\"\n warnings.warn('Use PremiseGroup.get_text() instead.', DeprecationWarning)\n db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid\n =uid).join(Statement).all()\n if len(db_premises) == 0:\n return ''\n texts = [premise.get_text() for premise in db_premises]\n lang = DBDiscussionSession.query(Statement).get(db_premises[0].\n statements.uid).lang\n _t = Translator(lang)\n return ' {} '.format(_t.get(_.aand)).join(texts)\n\n\n<mask token>\n\n\ndef get_text_for_premise(uid: int, colored_position: bool=False):\n \"\"\"\n Returns text of premise with given uid\n\n :param uid: Statement.uid\n :param colored_position: Boolean\n :return: String\n \"\"\"\n db_premise = DBDiscussionSession.query(Premise).get(uid)\n if db_premise:\n return db_premise.get_text(html=colored_position)\n else:\n return None\n\n\ndef get_text_for_conclusion(argument, start_with_intro=False,\n rearrange_intro=False, is_users_opinion=True):\n \"\"\"\n Check the arguments conclusion whether it is an statement or an argument and returns the text\n\n :param argument: Argument\n :param start_with_intro: Boolean\n :param rearrange_intro: Boolean\n :return: String\n \"\"\"\n if argument.argument_uid:\n return get_text_for_argument_uid(argument.argument_uid,\n start_with_intro, rearrange_intro=rearrange_intro,\n is_users_opinion=is_users_opinion)\n else:\n return argument.get_conclusion_text()\n\n\n<mask token>\n\n\ndef get_user_by_private_or_public_nickname(nickname):\n \"\"\"\n Gets the user by his (public) nickname, based on the option, whether his nickname is public or not\n\n :param nickname: Nickname of the user\n :return: Current user or None\n \"\"\"\n db_user = get_user_by_case_insensitive_nickname(nickname)\n db_public_user = get_user_by_case_insensitive_public_nickname(nickname)\n uid = 0\n if db_user:\n uid = db_user.uid\n elif db_public_user:\n uid = db_public_user.uid\n db_settings = DBDiscussionSession.query(Settings).filter_by(author_uid=uid\n ).first()\n if not db_settings:\n return None\n if db_settings.should_show_public_nickname and db_user:\n return db_user\n elif not db_settings.should_show_public_nickname and db_public_user:\n return db_public_user\n return None\n\n\ndef get_user_by_case_insensitive_nickname(nickname):\n \"\"\"\n Returns user with given nickname\n\n :param nickname: String\n :return: User or None\n \"\"\"\n return DBDiscussionSession.query(User).filter(func.lower(User.nickname) ==\n func.lower(nickname)).first()\n\n\n<mask token>\n\n\ndef __get_text_for_click_and_mark_count(nickname, is_user, argument_uid,\n statement_uid, speech, lang):\n \"\"\"\n Build text for a bubble, how many other participants have the same interest?\n\n :param nickname: User.nickname\n :param is_user: boolean\n :param argument_uid: Argument.uid\n :param statement_uid: Statement.uid\n :param speech: dict()\n :param lang: ui_locales\n :return: [String]\n \"\"\"\n if not nickname:\n nickname = 'anonymous'\n db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname\n ).first()\n if not db_user:\n db_user = DBDiscussionSession.query(User).filter_by(nickname=\n 'anonymous').first()\n db_clicks, db_marks = __get_clicks_and_marks(argument_uid,\n statement_uid, db_user)\n _t = Translator(lang)\n speech['votecounts'] = len(db_clicks) if db_clicks else 0\n if db_marks:\n speech['votecounts'] += len(db_marks)\n votecount_keys = defaultdict(lambda : '{} {}.'.format(speech[\n 'votecounts'], _t.get(_.voteCountTextMore)))\n if is_user and db_user.gender == 'm':\n gender_key = _.voteCountTextFirstM\n elif is_user and db_user.gender == 'f':\n gender_key = _.voteCountTextFirstF\n else:\n gender_key = _.voteCountTextFirst\n votecount_keys[0] = '{}.'.format(_t.get(gender_key))\n votecount_keys[1] = _t.get(_.voteCountTextOneOther) + '.'\n return votecount_keys\n\n\ndef __get_clicks_and_marks(argument_uid, statement_uid, db_user):\n db_clicks = None\n db_marks = None\n if argument_uid:\n db_clicks = DBDiscussionSession.query(ClickedArgument).filter(\n ClickedArgument.argument_uid == argument_uid, ClickedArgument.\n is_up_vote == True, ClickedArgument.is_valid, ClickedArgument.\n author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedArgument).filter(\n MarkedArgument.argument_uid == argument_uid, MarkedArgument.\n author_uid != db_user.uid).all()\n elif statement_uid:\n db_clicks = DBDiscussionSession.query(ClickedStatement).filter(\n ClickedStatement.statement_uid == statement_uid, \n ClickedStatement.is_up_vote == True, ClickedStatement.is_valid,\n ClickedStatement.author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedStatement).filter(\n MarkedStatement.statement_uid == statement_uid, MarkedStatement\n .author_uid != db_user.uid).all()\n return db_clicks, db_marks\n\n\ndef is_argument_disabled_due_to_disabled_statements(argument):\n \"\"\"\n Returns true if any involved statement is disabled.\n\n :param argument: Argument\n :return: Boolean\n \"\"\"\n if argument.conclusion_uid is None:\n db_argument = DBDiscussionSession.query(Argument).get(argument.\n argument_uid)\n conclusion = DBDiscussionSession(Statement).get(db_argument.\n conclusion_uid)\n if conclusion.is_disabled:\n return True\n premises = __get_all_premises_of_argument(db_argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n else:\n print(argument.conclusion_uid)\n conclusion = DBDiscussionSession.query(Statement).get(argument.\n conclusion_uid)\n if conclusion.is_disabled:\n return True\n premises = __get_all_premises_of_argument(argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n return False\n\n\ndef is_author_of_statement(db_user: User, statement_uid: int) ->bool:\n \"\"\"\n Is the user with given nickname author of the statement?\n\n :param db_user: User\n :param statement_uid: Statement.uid\n :return: Boolean\n \"\"\"\n db_user = (db_user if db_user and db_user.nickname !=\n nick_of_anonymous_user else None)\n if not db_user:\n return False\n db_textversion = DBDiscussionSession.query(TextVersion).filter_by(\n statement_uid=statement_uid).order_by(TextVersion.uid.asc()).first()\n if not db_textversion:\n return False\n return db_textversion.author_uid == db_user.uid\n\n\n<mask token>\n\n\ndef get_profile_picture(user: User, size: int=80, ignore_privacy_settings:\n bool=False):\n \"\"\"\n Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px\n\n :param user: User\n :param size: Integer, default 80\n :param ignore_privacy_settings:\n :return: String\n \"\"\"\n additional_id = ''\n if user and isinstance(user, User):\n additional_id = ('' if user.settings.should_show_public_nickname or\n ignore_privacy_settings else 'x')\n return __get_gravatar(user, additional_id, size)\n\n\n<mask token>\n\n\ndef __get_gravatar(user, additional_id, size):\n if user:\n if str(user.email) == 'None':\n email = (user.nickname + additional_id).encode('utf-8')\n else:\n email = (user.email + additional_id).encode('utf-8')\n else:\n email = 'unknown'.encode('utf-8')\n gravatar_url = 'https://secure.gravatar.com/avatar/{}?'.format(hashlib.\n md5(email.lower()).hexdigest())\n gravatar_url += parse.urlencode({'d': 'wavatar', 's': str(size)})\n return gravatar_url\n\n\ndef get_author_data(uid, gravatar_on_right_side=True,\n linked_with_users_page=True, profile_picture_size=20):\n \"\"\"\n Returns a-tag with gravatar of current author and users page as href\n\n :param uid: Uid of the author\n :param gravatar_on_right_side: True, if the gravatar is on the right of authors name\n :param linked_with_users_page: True, if the text is a link to the authors site\n :param profile_picture_size: Integer\n :return: HTML-String\n \"\"\"\n db_user = DBDiscussionSession.query(User).get(int(uid))\n if not db_user:\n return None, 'Missing author with uid ' + str(uid), False\n nick = db_user.global_nickname\n img_src = get_profile_picture(db_user, profile_picture_size)\n link_begin = ''\n link_end = ''\n if linked_with_users_page:\n link_begin = '<a href=\"/user/{}\" title=\"{}\">'.format(db_user.uid, nick)\n link_end = '</a>'\n side = 'left' if gravatar_on_right_side else 'right'\n img = '<img class=\"img-circle\" src=\"{}\" style=\"padding-{}: 0.3em\">'.format(\n img_src, side)\n if gravatar_on_right_side:\n return db_user, '{}{}{}{}'.format(link_begin, nick, img, link_end\n ), True\n else:\n return db_user, '{}{}{}{}'.format(link_begin, img, nick, link_end\n ), True\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass BubbleTypes(Enum):\n USER = auto()\n SYSTEM = auto()\n STATUS = auto()\n INFO = auto()\n\n def __str__(self):\n return str(self.value)\n\n\nclass Relations(Enum):\n UNDERMINE = 'undermine'\n UNDERCUT = 'undercut'\n REBUT = 'rebut'\n SUPPORT = 'support'\n\n def __str__(self):\n return str(self.value)\n\n\nclass Attitudes(Enum):\n AGREE = 'agree'\n DISAGREE = 'disagree'\n DONT_KNOW = 'dontknow'\n\n def __str__(self):\n return str(self.value)\n\n\n<mask token>\n\n\ndef get_global_url():\n \"\"\"\n Returns the global url of the project, based on the ENV\n\n :return: String\n \"\"\"\n return os.environ.get('URL', '')\n\n\ndef get_changelog(no):\n \"\"\"\n Returns the 'no' last entries from the changelog\n\n :param no: int\n :return: list\n \"\"\"\n path = str(os.path.realpath(__file__ + '/../../CHANGELOG.md'))\n lines = [line.rstrip('\\n').strip() for line in open(path) if len(line.\n rstrip('\\n').strip()) > 0]\n changelog = []\n title = ''\n body = []\n for l in lines:\n if l.startswith('#'):\n if len(title) > 0:\n changelog.append({'title': title, 'body': body})\n body = []\n title = l.replace('### ', '')\n else:\n body.append(l.replace('- ', ''))\n return changelog[0:no]\n\n\n<mask token>\n\n\ndef usage_of_matomo(registry):\n \"\"\"\n Returns true, if matomo is set in the current ini file.\n\n :param registry: request.registry\n :return: Boolean\n \"\"\"\n if 'mode' in registry.settings:\n return registry.settings['usage_of_matomo'].lower() == 'true'\n return False\n\n\ndef escape_string(text):\n \"\"\"\n Escapes all html special chars.\n\n :param text: string\n :return: html.escape(text)\n \"\"\"\n return escape(text)\n\n\ndef get_discussion_language(matchdict, params, session, current_issue_uid=None\n ):\n \"\"\"\n Returns Language.ui_locales\n CALL AFTER issue_handler.get_id_of_slug(..)!\n\n :param matchdict: matchdict of the current request\n :param params: params of the current request\n :param session: session of the current request\n :param current_issue_uid: uid\n :return:\n \"\"\"\n if not current_issue_uid:\n current_issue = DBDiscussionSession.query(Issue).filter(Issue.\n is_disabled == False, Issue.is_private == False).first()\n current_issue_uid = current_issue.uid if current_issue else None\n issue = matchdict['issue'] if 'issue' in matchdict else params['issue'\n ] if 'issue' in params else session['issue'\n ] if 'issue' in session else current_issue_uid\n db_issue = DBDiscussionSession.query(Issue).get(issue)\n return db_issue.lang if db_issue else 'en'\n\n\ndef python_datetime_pretty_print(ts, lang):\n \"\"\"\n Pretty print of a locale\n\n :param ts: Timestamp\n :param lang: ui_locales\n :return: String\n \"\"\"\n formatter = '%b. %d.'\n if lang == 'de':\n try:\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n formatter = '%d. %b.'\n except locale.Error:\n locale.setlocale(locale.LC_TIME, 'en_US.UTF8')\n return datetime.strptime(str(ts), '%Y-%m-%d').strftime(formatter)\n\n\ndef get_all_arguments_by_statement(statement_uid, include_disabled=False):\n \"\"\"\n Returns a list of all arguments where the statement is a conclusion or member of the premisegroup\n\n :param statement_uid: Statement.uid\n :param include_disabled: Boolean\n :return: [Arguments]\n \"\"\"\n logger('DBAS.LIB', 'main {}, include_disabled {}'.format(statement_uid,\n include_disabled))\n db_arguments = __get_arguments_of_conclusion(statement_uid,\n include_disabled)\n arg_array = [arg for arg in db_arguments] if db_arguments else []\n premises = DBDiscussionSession.query(Premise).filter_by(statement_uid=\n statement_uid)\n if not include_disabled:\n premises = premises.filter_by(is_disabled=False)\n premises = premises.all()\n for premise in premises:\n arg_array += __get_argument_of_premisegroup(premise.\n premisegroup_uid, include_disabled)\n db_undercuts = []\n for arg in arg_array:\n db_undercuts += __get_undercuts_of_argument(arg.uid, include_disabled)\n db_undercutted_undercuts = []\n for arg in db_undercuts:\n db_undercutted_undercuts += __get_undercuts_of_argument(arg.uid,\n include_disabled)\n arg_array = list(set(arg_array + db_undercuts + db_undercutted_undercuts))\n logger('DBAS.LIB', 'returning arguments {}'.format([arg.uid for arg in\n arg_array]))\n return arg_array if len(arg_array) > 0 else None\n\n\ndef __get_argument_of_premisegroup(premisegroup_uid, include_disabled):\n \"\"\"\n Returns all arguments with the given premisegroup\n\n :param premisegroup_uid: PremisgGroup.uid\n :param include_disabled: Boolean\n :return: list of Arguments\n \"\"\"\n db_arguments = DBDiscussionSession.query(Argument).filter_by(\n premisegroup_uid=premisegroup_uid)\n if not include_disabled:\n db_arguments = db_arguments.filter_by(is_disabled=False)\n return db_arguments.all() if db_arguments else []\n\n\ndef __get_undercuts_of_argument(argument_uid, include_disabled):\n \"\"\"\n Returns all undercuts fo the given argument\n\n :param argument_uid: Argument.uid\n :param include_disabled: boolean\n :return: list of Arguments\n \"\"\"\n db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid\n =argument_uid)\n if not include_disabled:\n db_undercuts = db_undercuts.filter_by(is_disabled=False)\n return db_undercuts.all() if db_undercuts else []\n\n\ndef __get_arguments_of_conclusion(statement_uid, include_disabled):\n \"\"\"\n Returns all arguments, where the statement is set as conclusion\n\n :param statement_uid: Statement.uid\n :param include_disabled: Boolean\n :return: list of arguments\n \"\"\"\n db_arguments = DBDiscussionSession.query(Argument).filter_by(conclusion_uid\n =statement_uid)\n if not include_disabled:\n db_arguments = db_arguments.filter_by(is_disabled=False)\n return db_arguments.all() if db_arguments else []\n\n\ndef get_all_arguments_with_text_by_statement_id(statement_uid):\n \"\"\"\n Given a statement_uid, it returns all arguments, which use this statement and adds\n the corresponding text to it, which normally appears in the bubbles. The resulting\n text depends on the provided language.\n\n :param statement_uid: uid to a statement, which should be analyzed\n :return: list of dictionaries containing some properties of these arguments\n :rtype: list\n \"\"\"\n logger('DBAS.LIB', 'main ' + str(statement_uid))\n arguments = get_all_arguments_by_statement(statement_uid)\n results = []\n if arguments:\n results = [{'uid': arg.uid, 'text': get_text_for_argument_uid(arg.\n uid)} for arg in arguments]\n return results\n\n\ndef get_all_arguments_with_text_and_url_by_statement_id(db_statement,\n urlmanager, color_statement=False, is_jump=False):\n \"\"\"\n Given a statement_uid, it returns all arguments, which use this statement and adds\n the corresponding text to it, which normally appears in the bubbles. The resulting\n text depends on the provided language.\n\n :param db_statement: Statement\n :param urlmanager:\n :param color_statement: True, if the statement (specified by the ID) should be colored\n :return: list of dictionaries containing some properties of these arguments\n :rtype: list\n \"\"\"\n logger('DBAS.LIB', 'main ' + str(db_statement.uid))\n arguments = get_all_arguments_by_statement(db_statement.uid)\n uids = [arg.uid for arg in arguments] if arguments else None\n results = list()\n sb = '<{} data-argumentation-type=\"position\">'.format(tag_type\n ) if color_statement else ''\n se = '</{}>'.format(tag_type) if color_statement else ''\n if not uids:\n return []\n uids.sort()\n for uid in uids:\n statement_text = db_statement.get_text()\n attack_type = 'jump' if is_jump else ''\n argument_text = get_text_for_argument_uid(uid, anonymous_style=True,\n attack_type=attack_type)\n pos = argument_text.lower().find(statement_text.lower())\n argument_text = argument_text[:pos] + sb + argument_text[pos:]\n pos += len(statement_text) + len(sb)\n argument_text = argument_text[:pos] + se + argument_text[pos:]\n results.append({'uid': uid, 'text': argument_text, 'url':\n urlmanager.get_url_for_jump(uid)})\n return results\n\n\ndef get_slug_by_statement_uid(uid):\n \"\"\"\n Returns slug for the given Issue.uid\n\n :param uid: Issue.uid\n :return: String\n \"\"\"\n db_statement = DBDiscussionSession.query(Statement).get(uid)\n return resolve_issue_uid_to_slug(db_statement.issue_uid)\n\n\ndef get_text_for_argument_uid(uid, nickname=None, with_html_tag=False,\n start_with_intro=False, first_arg_by_user=False, user_changed_opinion=\n False, rearrange_intro=False, colored_position=False, attack_type=None,\n minimize_on_undercut=False, is_users_opinion=True, anonymous_style=\n False, support_counter_argument=False):\n \"\"\"\n Returns current argument as string like \"conclusion, because premise1 and premise2\"\n\n :param uid: Integer\n :param with_html_tag: Boolean\n :param start_with_intro: Boolean\n :param first_arg_by_user: Boolean\n :param user_changed_opinion: Boolean\n :param rearrange_intro: Boolean\n :param colored_position: Boolean\n :param attack_type: String\n :param minimize_on_undercut: Boolean\n :param anonymous_style: Boolean\n :param support_counter_argument: Boolean\n :return: String\n \"\"\"\n logger('DBAS.LIB', 'main {}'.format(uid))\n db_argument = DBDiscussionSession.query(Argument).get(uid)\n if not db_argument:\n return None\n lang = db_argument.lang\n _t = Translator(lang)\n premisegroup_by_user = False\n author_uid = None\n db_user = DBDiscussionSession.query(User).filter_by(nickname=str(nickname)\n ).first()\n if db_user:\n author_uid = db_user.uid\n pgroup = DBDiscussionSession.query(PremiseGroup).get(db_argument.\n premisegroup_uid)\n marked_argument = DBDiscussionSession.query(MarkedArgument).filter_by(\n argument_uid=uid, author_uid=db_user.uid).first()\n premisegroup_by_user = (pgroup.author_uid == db_user.uid or \n marked_argument is not None)\n arg_array = [db_argument]\n while db_argument.argument_uid:\n db_argument = DBDiscussionSession.query(Argument).get(db_argument.\n argument_uid)\n arg_array.append(db_argument)\n if attack_type == 'jump':\n return __build_argument_for_jump(arg_array, with_html_tag)\n if len(arg_array) == 1:\n return __build_single_argument(arg_array[0], rearrange_intro,\n with_html_tag, colored_position, attack_type, _t,\n start_with_intro, is_users_opinion, anonymous_style,\n support_counter_argument, author_uid)\n else:\n return __build_nested_argument(arg_array, first_arg_by_user,\n user_changed_opinion, with_html_tag, start_with_intro,\n minimize_on_undercut, anonymous_style, premisegroup_by_user, _t)\n\n\n<mask token>\n\n\ndef __build_val_for_jump(db_argument, tag_premise, tag_conclusion, tag_end, _t\n ):\n premises = db_argument.get_premisegroup_text()\n if premises[-1] != '.':\n premises += '.'\n conclusion = db_argument.get_conclusion_text()\n because = _t.get(_.because).lower()\n conclusion = tag_conclusion + conclusion + tag_end\n premises = tag_premise + premises + tag_end\n intro = start_con + _t.get(_.isNotRight).lower(\n ) + end_tag if not db_argument.is_supportive else ''\n ret_value = '{} {} {} {}'.format(conclusion, intro, because, premises)\n if _t.get_lang() == 'de':\n intro = _t.get(_.itIsTrueThatAnonymous\n ) if db_argument.is_supportive else _t.get(_.itIsFalseThatAnonymous\n )\n intro = intro[0:1].upper() + intro[1:]\n intro = (start_pro if db_argument.is_supportive else start_con\n ) + intro + end_tag\n ret_value = '{} {}, {} {}'.format(intro, conclusion, because, premises)\n return ret_value\n\n\n<mask token>\n\n\ndef __build_val_for_undercutted_undercut(arg_array: List[Argument],\n tag_premise, tag_conclusion, tag_end, _t):\n premise1 = arg_array[0].get_premisegroup_text()\n premise2 = arg_array[1].get_premisegroup_text()\n premise3 = arg_array[2].get_premisegroup_text()\n conclusion = arg_array[2].get_conclusion_text()\n bind = start_con + _t.get(_.isNotAGoodReasonAgainstArgument) + end_tag\n because = _t.get(_.because)\n seperator = ',' if _t.get_lang() == 'de' else ''\n premise1 = tag_premise + premise1 + tag_end\n premise2 = tag_conclusion + premise2 + tag_end\n argument = '{}{} {} {}'.format(conclusion, seperator, because.lower(),\n premise3)\n argument = tag_conclusion + argument + tag_end\n ret_value = '{} {} {}. {} {}'.format(premise2, bind, argument, because,\n premise1)\n return ret_value\n\n\ndef __build_single_argument(db_argument: Argument, rearrange_intro: bool,\n with_html_tag: bool, colored_position: bool, attack_type: str, _t:\n Translator, start_with_intro: bool, is_users_opinion: bool,\n anonymous_style: bool, support_counter_argument: bool=False, author_uid\n =None):\n \"\"\"\n Build up argument text for a single argument\n\n Please, do not touch this!\n\n :param uid: Argument.uid\n :param rearrange_intro: Boolean\n :param with_html_tag: Boolean\n :param colored_position: Boolean\n :param attack_type: String\n :param _t: Translator\n :param start_with_intro: Boolean\n :param is_users_opinion: Boolean\n :param anonymous_style: Boolean\n :param support_counter_argument: Boolean\n :param author_uid: User.uid\n :return: String\n \"\"\"\n premises_text = db_argument.get_premisegroup_text()\n conclusion_text = db_argument.get_conclusion_text()\n lang = db_argument.lang\n if lang != 'de':\n premises_text = premises_text[0:1].lower() + premises_text[1:]\n premises_text, conclusion_text, sb, sb_none, se = (\n __get_tags_for_building_single_argument(with_html_tag, attack_type,\n colored_position, premises_text, conclusion_text))\n marked_element = False\n if author_uid:\n db_marked = DBDiscussionSession.query(MarkedArgument).filter(\n MarkedArgument.argument_uid == db_argument.uid, MarkedArgument.\n author_uid == author_uid).first()\n marked_element = db_marked is not None\n you_have_the_opinion_that = _t.get(_.youHaveTheOpinionThat).format(''\n ).strip()\n if lang == 'de':\n ret_value = __build_single_argument_for_de(_t, sb, se,\n you_have_the_opinion_that, start_with_intro, anonymous_style,\n rearrange_intro, db_argument, attack_type, sb_none,\n marked_element, lang, premises_text, conclusion_text,\n is_users_opinion, support_counter_argument)\n else:\n ret_value = __build_single_argument_for_en(_t, sb, se,\n you_have_the_opinion_that, marked_element, conclusion_text,\n premises_text, db_argument)\n return ret_value.replace(' ', ' ')\n\n\ndef __get_tags_for_building_single_argument(with_html_tag, attack_type,\n colored_position, premises, conclusion):\n sb_none = start_tag if with_html_tag else ''\n se = end_tag if with_html_tag else ''\n if attack_type not in ['dont_know', 'jump']:\n sb = start_tag if with_html_tag else ''\n if colored_position:\n sb = start_position if with_html_tag else ''\n if attack_type == Relations.UNDERMINE:\n premises = sb + premises + se\n else:\n conclusion = sb + conclusion + se\n else:\n sb = start_argument if with_html_tag else ''\n sb_tmp = start_attack if with_html_tag else ''\n premises = sb + premises + se\n conclusion = sb_tmp + conclusion + se\n return premises, conclusion, sb, sb_none, se\n\n\ndef __build_single_argument_for_de(_t, sb, se, you_have_the_opinion_that,\n start_with_intro, anonymous_style, rearrange_intro, db_argument,\n attack_type, sb_none, marked_element, lang, premises, conclusion,\n is_users_opinion, support_counter_argument):\n if start_with_intro and not anonymous_style:\n intro = _t.get(_.itIsTrueThat\n ) if db_argument.is_supportive else _t.get(_.itIsFalseThat)\n if rearrange_intro:\n intro = _t.get(_.itTrueIsThat\n ) if db_argument.is_supportive else _t.get(_.itFalseIsThat)\n ret_value = (sb_none if attack_type in ['dont_know'] else sb\n ) + intro + se + ' '\n elif is_users_opinion and not anonymous_style:\n ret_value = sb_none\n if support_counter_argument:\n ret_value += _t.get(_.youAgreeWithThecounterargument)\n elif marked_element:\n ret_value += you_have_the_opinion_that\n else:\n ret_value += _t.get(_.youArgue)\n ret_value += se + ' '\n else:\n tmp = _t.get(_.itIsTrueThatAnonymous if db_argument.is_supportive else\n _.itIsFalseThatAnonymous)\n ret_value = sb_none + sb + tmp + se + ' '\n ret_value += ' {}{}{} '.format(sb, _t.get(_.itIsNotRight), se\n ) if not db_argument.is_supportive else ''\n ret_value += conclusion\n ret_value += ', ' if lang == 'de' else ' '\n ret_value += sb_none + _t.get(_.because).lower() + se + ' ' + premises\n return ret_value\n\n\n<mask token>\n\n\ndef __build_nested_argument(arg_array: List[Argument], first_arg_by_user,\n user_changed_opinion, with_html_tag, start_with_intro,\n minimize_on_undercut, anonymous_style, premisegroup_by_user, _t):\n \"\"\"\n\n :param arg_array:\n :param first_arg_by_user:\n :param user_changed_opinion:\n :param with_html_tag:\n :param start_with_intro:\n :param minimize_on_undercut:\n :param anonymous_style:\n :param premisegroup_by_user:\n :param _t:\n :return:\n \"\"\"\n pgroups = []\n supportive = []\n arg_array = arg_array[::-1]\n local_lang = arg_array[0].lang\n for db_argument in arg_array:\n text = db_argument.get_premisegroup_text()\n pgroups.append(text)\n supportive.append(db_argument.is_supportive)\n conclusion = arg_array[0].get_conclusion_text()\n sb = start_position if with_html_tag else ''\n se = end_tag if with_html_tag else ''\n because = (', ' if local_lang == 'de' else ' ') + _t.get(_.because).lower(\n ) + ' '\n if len(arg_array\n ) % 2 is 0 and not first_arg_by_user and not anonymous_style:\n ret_value = _t.get(_.earlierYouArguedThat if user_changed_opinion else\n _.otherUsersSaidThat) + ' '\n tmp_users_opinion = True\n elif not anonymous_style:\n ret_value = _t.get(_.soYourOpinionIsThat\n ) + ': ' if start_with_intro else ''\n tmp_users_opinion = False\n conclusion = se + conclusion[0:1].upper() + conclusion[1:]\n else:\n ret_value = _t.get(_.someoneArgued) + ' '\n tmp_users_opinion = False\n tmp = _t.get(_.itFalseIsThat) + ' ' if not supportive[0] else ''\n ret_value += tmp + conclusion + because + pgroups[0] + '.'\n del pgroups[0]\n if minimize_on_undercut and not user_changed_opinion and len(pgroups) > 2:\n return _t.get(_.butYouCounteredWith).strip() + ' ' + sb + pgroups[\n len(pgroups) - 1] + se + '.'\n for i, pgroup in enumerate(pgroups):\n ret_value += ' '\n if tmp_users_opinion and not anonymous_style:\n tmp = (_.butYouCounteredWithArgument if premisegroup_by_user else\n _.butYouCounteredWithInterest)\n ret_value += _t.get(_.otherParticipantsConvincedYouThat if\n user_changed_opinion else tmp)\n elif not anonymous_style:\n ret_value += _t.get(_.youAgreeWithThatNow)\n else:\n ret_value += _t.get(_.otherUsersSaidThat) if i == 0 else _t.get(_\n .thenOtherUsersSaidThat)\n ret_value += sb + ' ' + pgroups[i] + '.'\n tmp_users_opinion = not tmp_users_opinion\n return ret_value.replace(' ', ' ')\n\n\ndef get_text_for_premisegroup_uid(uid):\n \"\"\"\n Returns joined text of the premise group and the premise ids\n\n :param uid: premisegroup_uid\n :return: text, uids\n \"\"\"\n warnings.warn('Use PremiseGroup.get_text() instead.', DeprecationWarning)\n db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid\n =uid).join(Statement).all()\n if len(db_premises) == 0:\n return ''\n texts = [premise.get_text() for premise in db_premises]\n lang = DBDiscussionSession.query(Statement).get(db_premises[0].\n statements.uid).lang\n _t = Translator(lang)\n return ' {} '.format(_t.get(_.aand)).join(texts)\n\n\n<mask token>\n\n\ndef get_text_for_premise(uid: int, colored_position: bool=False):\n \"\"\"\n Returns text of premise with given uid\n\n :param uid: Statement.uid\n :param colored_position: Boolean\n :return: String\n \"\"\"\n db_premise = DBDiscussionSession.query(Premise).get(uid)\n if db_premise:\n return db_premise.get_text(html=colored_position)\n else:\n return None\n\n\ndef get_text_for_conclusion(argument, start_with_intro=False,\n rearrange_intro=False, is_users_opinion=True):\n \"\"\"\n Check the arguments conclusion whether it is an statement or an argument and returns the text\n\n :param argument: Argument\n :param start_with_intro: Boolean\n :param rearrange_intro: Boolean\n :return: String\n \"\"\"\n if argument.argument_uid:\n return get_text_for_argument_uid(argument.argument_uid,\n start_with_intro, rearrange_intro=rearrange_intro,\n is_users_opinion=is_users_opinion)\n else:\n return argument.get_conclusion_text()\n\n\ndef resolve_issue_uid_to_slug(uid):\n \"\"\"\n Given the issue uid query database and return the correct slug of the issue.\n\n :param uid: issue_uid\n :type uid: int\n :return: Slug of issue\n :rtype: str\n \"\"\"\n issue = DBDiscussionSession.query(Issue).get(uid)\n return issue.slug if issue else None\n\n\n<mask token>\n\n\ndef get_user_by_private_or_public_nickname(nickname):\n \"\"\"\n Gets the user by his (public) nickname, based on the option, whether his nickname is public or not\n\n :param nickname: Nickname of the user\n :return: Current user or None\n \"\"\"\n db_user = get_user_by_case_insensitive_nickname(nickname)\n db_public_user = get_user_by_case_insensitive_public_nickname(nickname)\n uid = 0\n if db_user:\n uid = db_user.uid\n elif db_public_user:\n uid = db_public_user.uid\n db_settings = DBDiscussionSession.query(Settings).filter_by(author_uid=uid\n ).first()\n if not db_settings:\n return None\n if db_settings.should_show_public_nickname and db_user:\n return db_user\n elif not db_settings.should_show_public_nickname and db_public_user:\n return db_public_user\n return None\n\n\ndef get_user_by_case_insensitive_nickname(nickname):\n \"\"\"\n Returns user with given nickname\n\n :param nickname: String\n :return: User or None\n \"\"\"\n return DBDiscussionSession.query(User).filter(func.lower(User.nickname) ==\n func.lower(nickname)).first()\n\n\ndef get_user_by_case_insensitive_public_nickname(public_nickname):\n \"\"\"\n Returns user with given public nickname\n\n :param public_nickname: String\n :return: User or None\n \"\"\"\n return DBDiscussionSession.query(User).filter(func.lower(User.\n public_nickname) == func.lower(public_nickname)).first()\n\n\ndef pretty_print_options(message):\n \"\"\"\n Some modifications for pretty printing.\n Use uppercase for first letter in text and a single dot for the end if there isn't one already.\n\n :param message: String\n :return: String\n \"\"\"\n if message[0:1] == '<':\n pos = message.index('>')\n message = message[0:pos + 1] + message[pos + 1:pos + 2].upper(\n ) + message[pos + 2:]\n else:\n message = message[0:1].upper() + message[1:]\n if message[-1] == '>':\n pos = message.rfind('<')\n if message[pos - 1:pos] not in ['.', '?', '!']:\n message = message[0:pos] + '.' + message[pos:]\n elif not message.endswith(tuple(['.', '?', '!'])) and id is not 'now':\n message += '.'\n return message\n\n\ndef create_speechbubble_dict(bubble_type: BubbleTypes, is_markable: bool=\n False, is_author: bool=False, uid: str='', bubble_url: str='', content:\n str='', omit_bubble_url: bool=False, omit_vote_info: bool=False,\n argument_uid: int=None, statement_uid: int=None, is_supportive: bool=\n False, nickname: str='anonymous', lang: str='en', is_users_opinion:\n bool=False, other_author: User=None):\n \"\"\"\n Creates an dictionary which includes every information needed for a bubble.\n\n :param bubble_type: BubbleTypes\n :param is_markable: True if the content itself could be flagged\n :param is_author: True if the current user is author of the content\n :param uid: Identifier for the bubble\n :param bubble_url: URL for the click event of the bubble\n :param content: Text of the bubble\n :param omit_bubble_url: True if the bubble should have a link\n :param omit_vote_info: True if the bubble have the little, grey information text\n :param argument_uid: Argument.uid\n :param statement_uid: Statement.uid\n :param is_supportive: Boolean\n :param nickname: String\n :param omit_bubble_url: Boolean\n :param lang: is_users_opinion\n :param is_users_opinion: Boolean\n :return: dict()\n \"\"\"\n gravatar_link = get_global_url() + '/static/images/icon.png'\n profile = None\n if uid is not 'now':\n content = pretty_print_options(content)\n if bubble_type is BubbleTypes.SYSTEM and other_author is not None:\n gravatar_link = get_profile_picture(other_author, 25)\n profile = '/user/{}'.format(other_author.uid),\n if bubble_type is BubbleTypes.USER and nickname != 'anonymous':\n db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname\n ).first()\n db_marked = None\n gravatar_link = get_profile_picture(db_user, 25)\n if argument_uid is not None and db_user is not None:\n db_marked = DBDiscussionSession.query(MarkedArgument).filter(\n MarkedArgument.argument_uid == argument_uid, MarkedArgument\n .author_uid == db_user.uid).first()\n if statement_uid is not None and db_user is not None:\n db_marked = DBDiscussionSession.query(MarkedStatement).filter(\n MarkedStatement.statement_uid == statement_uid, \n MarkedStatement.author_uid == db_user.uid).first()\n is_users_opinion = db_marked is not None\n speech = {'is_user': bubble_type is BubbleTypes.USER, 'is_system': \n bubble_type is BubbleTypes.SYSTEM, 'is_status': bubble_type is\n BubbleTypes.STATUS, 'is_info': bubble_type is BubbleTypes.INFO,\n 'is_markable': is_markable, 'is_author': is_author, 'id': uid if \n len(str(uid)) > 0 else uuid4().hex, 'bubble_url': bubble_url,\n 'message': content, 'omit_bubble_url': omit_bubble_url,\n 'omit_vote_info': omit_vote_info, 'data_type': 'argument' if\n argument_uid else 'statement' if statement_uid else 'None',\n 'data_argument_uid': argument_uid, 'data_statement_uid':\n statement_uid, 'data_is_supportive': is_supportive,\n 'is_users_opinion': is_users_opinion, 'enemy': {'avatar':\n gravatar_link, 'profile': profile, 'available': profile is not None}}\n votecount_keys = __get_text_for_click_and_mark_count(nickname, \n bubble_type is BubbleTypes.USER, argument_uid, statement_uid,\n speech, lang)\n speech['votecounts_message'] = votecount_keys[speech['votecounts']]\n return speech\n\n\ndef __get_text_for_click_and_mark_count(nickname, is_user, argument_uid,\n statement_uid, speech, lang):\n \"\"\"\n Build text for a bubble, how many other participants have the same interest?\n\n :param nickname: User.nickname\n :param is_user: boolean\n :param argument_uid: Argument.uid\n :param statement_uid: Statement.uid\n :param speech: dict()\n :param lang: ui_locales\n :return: [String]\n \"\"\"\n if not nickname:\n nickname = 'anonymous'\n db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname\n ).first()\n if not db_user:\n db_user = DBDiscussionSession.query(User).filter_by(nickname=\n 'anonymous').first()\n db_clicks, db_marks = __get_clicks_and_marks(argument_uid,\n statement_uid, db_user)\n _t = Translator(lang)\n speech['votecounts'] = len(db_clicks) if db_clicks else 0\n if db_marks:\n speech['votecounts'] += len(db_marks)\n votecount_keys = defaultdict(lambda : '{} {}.'.format(speech[\n 'votecounts'], _t.get(_.voteCountTextMore)))\n if is_user and db_user.gender == 'm':\n gender_key = _.voteCountTextFirstM\n elif is_user and db_user.gender == 'f':\n gender_key = _.voteCountTextFirstF\n else:\n gender_key = _.voteCountTextFirst\n votecount_keys[0] = '{}.'.format(_t.get(gender_key))\n votecount_keys[1] = _t.get(_.voteCountTextOneOther) + '.'\n return votecount_keys\n\n\ndef __get_clicks_and_marks(argument_uid, statement_uid, db_user):\n db_clicks = None\n db_marks = None\n if argument_uid:\n db_clicks = DBDiscussionSession.query(ClickedArgument).filter(\n ClickedArgument.argument_uid == argument_uid, ClickedArgument.\n is_up_vote == True, ClickedArgument.is_valid, ClickedArgument.\n author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedArgument).filter(\n MarkedArgument.argument_uid == argument_uid, MarkedArgument.\n author_uid != db_user.uid).all()\n elif statement_uid:\n db_clicks = DBDiscussionSession.query(ClickedStatement).filter(\n ClickedStatement.statement_uid == statement_uid, \n ClickedStatement.is_up_vote == True, ClickedStatement.is_valid,\n ClickedStatement.author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedStatement).filter(\n MarkedStatement.statement_uid == statement_uid, MarkedStatement\n .author_uid != db_user.uid).all()\n return db_clicks, db_marks\n\n\ndef is_argument_disabled_due_to_disabled_statements(argument):\n \"\"\"\n Returns true if any involved statement is disabled.\n\n :param argument: Argument\n :return: Boolean\n \"\"\"\n if argument.conclusion_uid is None:\n db_argument = DBDiscussionSession.query(Argument).get(argument.\n argument_uid)\n conclusion = DBDiscussionSession(Statement).get(db_argument.\n conclusion_uid)\n if conclusion.is_disabled:\n return True\n premises = __get_all_premises_of_argument(db_argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n else:\n print(argument.conclusion_uid)\n conclusion = DBDiscussionSession.query(Statement).get(argument.\n conclusion_uid)\n if conclusion.is_disabled:\n return True\n premises = __get_all_premises_of_argument(argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n return False\n\n\ndef is_author_of_statement(db_user: User, statement_uid: int) ->bool:\n \"\"\"\n Is the user with given nickname author of the statement?\n\n :param db_user: User\n :param statement_uid: Statement.uid\n :return: Boolean\n \"\"\"\n db_user = (db_user if db_user and db_user.nickname !=\n nick_of_anonymous_user else None)\n if not db_user:\n return False\n db_textversion = DBDiscussionSession.query(TextVersion).filter_by(\n statement_uid=statement_uid).order_by(TextVersion.uid.asc()).first()\n if not db_textversion:\n return False\n return db_textversion.author_uid == db_user.uid\n\n\ndef is_author_of_argument(db_user: User, argument_uid: int) ->bool:\n \"\"\"\n Is the user with given nickname author of the argument?\n\n :param db_user: User\n :param argument_uid: Argument.uid\n :return: Boolean\n \"\"\"\n db_user = (db_user if db_user and db_user.nickname !=\n nick_of_anonymous_user else None)\n if not db_user:\n return False\n db_argument = DBDiscussionSession.query(Argument).filter(Argument.uid ==\n argument_uid, Argument.author_uid == db_user.uid).first()\n return True if db_argument else False\n\n\n<mask token>\n\n\ndef get_profile_picture(user: User, size: int=80, ignore_privacy_settings:\n bool=False):\n \"\"\"\n Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px\n\n :param user: User\n :param size: Integer, default 80\n :param ignore_privacy_settings:\n :return: String\n \"\"\"\n additional_id = ''\n if user and isinstance(user, User):\n additional_id = ('' if user.settings.should_show_public_nickname or\n ignore_privacy_settings else 'x')\n return __get_gravatar(user, additional_id, size)\n\n\ndef get_public_profile_picture(user: User, size: int=80):\n \"\"\"\n Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px\n If the user doesn't want an public profile, an anonymous image will be returned\n\n :param user: User\n :param size: Integer, default 80\n :return: String\n \"\"\"\n additional_id = ''\n if user.settings.should_show_public_nickname:\n additional_id = 'x'\n if len(str(user.oauth_provider)) > 0:\n additional_id = '{}{}'.format(user.oauth_provider, user.\n oauth_provider_id)\n return __get_gravatar(user, additional_id, size)\n\n\ndef __get_gravatar(user, additional_id, size):\n if user:\n if str(user.email) == 'None':\n email = (user.nickname + additional_id).encode('utf-8')\n else:\n email = (user.email + additional_id).encode('utf-8')\n else:\n email = 'unknown'.encode('utf-8')\n gravatar_url = 'https://secure.gravatar.com/avatar/{}?'.format(hashlib.\n md5(email.lower()).hexdigest())\n gravatar_url += parse.urlencode({'d': 'wavatar', 's': str(size)})\n return gravatar_url\n\n\ndef get_author_data(uid, gravatar_on_right_side=True,\n linked_with_users_page=True, profile_picture_size=20):\n \"\"\"\n Returns a-tag with gravatar of current author and users page as href\n\n :param uid: Uid of the author\n :param gravatar_on_right_side: True, if the gravatar is on the right of authors name\n :param linked_with_users_page: True, if the text is a link to the authors site\n :param profile_picture_size: Integer\n :return: HTML-String\n \"\"\"\n db_user = DBDiscussionSession.query(User).get(int(uid))\n if not db_user:\n return None, 'Missing author with uid ' + str(uid), False\n nick = db_user.global_nickname\n img_src = get_profile_picture(db_user, profile_picture_size)\n link_begin = ''\n link_end = ''\n if linked_with_users_page:\n link_begin = '<a href=\"/user/{}\" title=\"{}\">'.format(db_user.uid, nick)\n link_end = '</a>'\n side = 'left' if gravatar_on_right_side else 'right'\n img = '<img class=\"img-circle\" src=\"{}\" style=\"padding-{}: 0.3em\">'.format(\n img_src, side)\n if gravatar_on_right_side:\n return db_user, '{}{}{}{}'.format(link_begin, nick, img, link_end\n ), True\n else:\n return db_user, '{}{}{}{}'.format(link_begin, img, nick, link_end\n ), True\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass BubbleTypes(Enum):\n USER = auto()\n SYSTEM = auto()\n STATUS = auto()\n INFO = auto()\n\n def __str__(self):\n return str(self.value)\n\n\nclass Relations(Enum):\n UNDERMINE = 'undermine'\n UNDERCUT = 'undercut'\n REBUT = 'rebut'\n SUPPORT = 'support'\n\n def __str__(self):\n return str(self.value)\n\n\nclass Attitudes(Enum):\n AGREE = 'agree'\n DISAGREE = 'disagree'\n DONT_KNOW = 'dontknow'\n\n def __str__(self):\n return str(self.value)\n\n\n<mask token>\n\n\ndef get_global_url():\n \"\"\"\n Returns the global url of the project, based on the ENV\n\n :return: String\n \"\"\"\n return os.environ.get('URL', '')\n\n\ndef get_changelog(no):\n \"\"\"\n Returns the 'no' last entries from the changelog\n\n :param no: int\n :return: list\n \"\"\"\n path = str(os.path.realpath(__file__ + '/../../CHANGELOG.md'))\n lines = [line.rstrip('\\n').strip() for line in open(path) if len(line.\n rstrip('\\n').strip()) > 0]\n changelog = []\n title = ''\n body = []\n for l in lines:\n if l.startswith('#'):\n if len(title) > 0:\n changelog.append({'title': title, 'body': body})\n body = []\n title = l.replace('### ', '')\n else:\n body.append(l.replace('- ', ''))\n return changelog[0:no]\n\n\ndef is_development_mode(registry):\n \"\"\"\n Returns true, if mode is set to development in current ini file.\n\n :param registry: request.registry\n :return: Boolean\n \"\"\"\n if 'mode' in registry.settings:\n return registry.settings['mode'].lower() == 'development'\n return False\n\n\ndef usage_of_modern_bubbles(registry):\n \"\"\"\n Returns true, if modern bubbles are set in the current ini file.\n\n :param registry: request.registry\n :return: Boolean\n \"\"\"\n if 'modern_bubbles' in registry.settings:\n return registry.settings['modern_bubbles'].lower() == 'true'\n return False\n\n\ndef usage_of_matomo(registry):\n \"\"\"\n Returns true, if matomo is set in the current ini file.\n\n :param registry: request.registry\n :return: Boolean\n \"\"\"\n if 'mode' in registry.settings:\n return registry.settings['usage_of_matomo'].lower() == 'true'\n return False\n\n\ndef escape_string(text):\n \"\"\"\n Escapes all html special chars.\n\n :param text: string\n :return: html.escape(text)\n \"\"\"\n return escape(text)\n\n\ndef get_discussion_language(matchdict, params, session, current_issue_uid=None\n ):\n \"\"\"\n Returns Language.ui_locales\n CALL AFTER issue_handler.get_id_of_slug(..)!\n\n :param matchdict: matchdict of the current request\n :param params: params of the current request\n :param session: session of the current request\n :param current_issue_uid: uid\n :return:\n \"\"\"\n if not current_issue_uid:\n current_issue = DBDiscussionSession.query(Issue).filter(Issue.\n is_disabled == False, Issue.is_private == False).first()\n current_issue_uid = current_issue.uid if current_issue else None\n issue = matchdict['issue'] if 'issue' in matchdict else params['issue'\n ] if 'issue' in params else session['issue'\n ] if 'issue' in session else current_issue_uid\n db_issue = DBDiscussionSession.query(Issue).get(issue)\n return db_issue.lang if db_issue else 'en'\n\n\ndef python_datetime_pretty_print(ts, lang):\n \"\"\"\n Pretty print of a locale\n\n :param ts: Timestamp\n :param lang: ui_locales\n :return: String\n \"\"\"\n formatter = '%b. %d.'\n if lang == 'de':\n try:\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n formatter = '%d. %b.'\n except locale.Error:\n locale.setlocale(locale.LC_TIME, 'en_US.UTF8')\n return datetime.strptime(str(ts), '%Y-%m-%d').strftime(formatter)\n\n\ndef get_all_arguments_by_statement(statement_uid, include_disabled=False):\n \"\"\"\n Returns a list of all arguments where the statement is a conclusion or member of the premisegroup\n\n :param statement_uid: Statement.uid\n :param include_disabled: Boolean\n :return: [Arguments]\n \"\"\"\n logger('DBAS.LIB', 'main {}, include_disabled {}'.format(statement_uid,\n include_disabled))\n db_arguments = __get_arguments_of_conclusion(statement_uid,\n include_disabled)\n arg_array = [arg for arg in db_arguments] if db_arguments else []\n premises = DBDiscussionSession.query(Premise).filter_by(statement_uid=\n statement_uid)\n if not include_disabled:\n premises = premises.filter_by(is_disabled=False)\n premises = premises.all()\n for premise in premises:\n arg_array += __get_argument_of_premisegroup(premise.\n premisegroup_uid, include_disabled)\n db_undercuts = []\n for arg in arg_array:\n db_undercuts += __get_undercuts_of_argument(arg.uid, include_disabled)\n db_undercutted_undercuts = []\n for arg in db_undercuts:\n db_undercutted_undercuts += __get_undercuts_of_argument(arg.uid,\n include_disabled)\n arg_array = list(set(arg_array + db_undercuts + db_undercutted_undercuts))\n logger('DBAS.LIB', 'returning arguments {}'.format([arg.uid for arg in\n arg_array]))\n return arg_array if len(arg_array) > 0 else None\n\n\ndef __get_argument_of_premisegroup(premisegroup_uid, include_disabled):\n \"\"\"\n Returns all arguments with the given premisegroup\n\n :param premisegroup_uid: PremisgGroup.uid\n :param include_disabled: Boolean\n :return: list of Arguments\n \"\"\"\n db_arguments = DBDiscussionSession.query(Argument).filter_by(\n premisegroup_uid=premisegroup_uid)\n if not include_disabled:\n db_arguments = db_arguments.filter_by(is_disabled=False)\n return db_arguments.all() if db_arguments else []\n\n\ndef __get_undercuts_of_argument(argument_uid, include_disabled):\n \"\"\"\n Returns all undercuts fo the given argument\n\n :param argument_uid: Argument.uid\n :param include_disabled: boolean\n :return: list of Arguments\n \"\"\"\n db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid\n =argument_uid)\n if not include_disabled:\n db_undercuts = db_undercuts.filter_by(is_disabled=False)\n return db_undercuts.all() if db_undercuts else []\n\n\ndef __get_arguments_of_conclusion(statement_uid, include_disabled):\n \"\"\"\n Returns all arguments, where the statement is set as conclusion\n\n :param statement_uid: Statement.uid\n :param include_disabled: Boolean\n :return: list of arguments\n \"\"\"\n db_arguments = DBDiscussionSession.query(Argument).filter_by(conclusion_uid\n =statement_uid)\n if not include_disabled:\n db_arguments = db_arguments.filter_by(is_disabled=False)\n return db_arguments.all() if db_arguments else []\n\n\ndef get_all_arguments_with_text_by_statement_id(statement_uid):\n \"\"\"\n Given a statement_uid, it returns all arguments, which use this statement and adds\n the corresponding text to it, which normally appears in the bubbles. The resulting\n text depends on the provided language.\n\n :param statement_uid: uid to a statement, which should be analyzed\n :return: list of dictionaries containing some properties of these arguments\n :rtype: list\n \"\"\"\n logger('DBAS.LIB', 'main ' + str(statement_uid))\n arguments = get_all_arguments_by_statement(statement_uid)\n results = []\n if arguments:\n results = [{'uid': arg.uid, 'text': get_text_for_argument_uid(arg.\n uid)} for arg in arguments]\n return results\n\n\ndef get_all_arguments_with_text_and_url_by_statement_id(db_statement,\n urlmanager, color_statement=False, is_jump=False):\n \"\"\"\n Given a statement_uid, it returns all arguments, which use this statement and adds\n the corresponding text to it, which normally appears in the bubbles. The resulting\n text depends on the provided language.\n\n :param db_statement: Statement\n :param urlmanager:\n :param color_statement: True, if the statement (specified by the ID) should be colored\n :return: list of dictionaries containing some properties of these arguments\n :rtype: list\n \"\"\"\n logger('DBAS.LIB', 'main ' + str(db_statement.uid))\n arguments = get_all_arguments_by_statement(db_statement.uid)\n uids = [arg.uid for arg in arguments] if arguments else None\n results = list()\n sb = '<{} data-argumentation-type=\"position\">'.format(tag_type\n ) if color_statement else ''\n se = '</{}>'.format(tag_type) if color_statement else ''\n if not uids:\n return []\n uids.sort()\n for uid in uids:\n statement_text = db_statement.get_text()\n attack_type = 'jump' if is_jump else ''\n argument_text = get_text_for_argument_uid(uid, anonymous_style=True,\n attack_type=attack_type)\n pos = argument_text.lower().find(statement_text.lower())\n argument_text = argument_text[:pos] + sb + argument_text[pos:]\n pos += len(statement_text) + len(sb)\n argument_text = argument_text[:pos] + se + argument_text[pos:]\n results.append({'uid': uid, 'text': argument_text, 'url':\n urlmanager.get_url_for_jump(uid)})\n return results\n\n\ndef get_slug_by_statement_uid(uid):\n \"\"\"\n Returns slug for the given Issue.uid\n\n :param uid: Issue.uid\n :return: String\n \"\"\"\n db_statement = DBDiscussionSession.query(Statement).get(uid)\n return resolve_issue_uid_to_slug(db_statement.issue_uid)\n\n\ndef get_text_for_argument_uid(uid, nickname=None, with_html_tag=False,\n start_with_intro=False, first_arg_by_user=False, user_changed_opinion=\n False, rearrange_intro=False, colored_position=False, attack_type=None,\n minimize_on_undercut=False, is_users_opinion=True, anonymous_style=\n False, support_counter_argument=False):\n \"\"\"\n Returns current argument as string like \"conclusion, because premise1 and premise2\"\n\n :param uid: Integer\n :param with_html_tag: Boolean\n :param start_with_intro: Boolean\n :param first_arg_by_user: Boolean\n :param user_changed_opinion: Boolean\n :param rearrange_intro: Boolean\n :param colored_position: Boolean\n :param attack_type: String\n :param minimize_on_undercut: Boolean\n :param anonymous_style: Boolean\n :param support_counter_argument: Boolean\n :return: String\n \"\"\"\n logger('DBAS.LIB', 'main {}'.format(uid))\n db_argument = DBDiscussionSession.query(Argument).get(uid)\n if not db_argument:\n return None\n lang = db_argument.lang\n _t = Translator(lang)\n premisegroup_by_user = False\n author_uid = None\n db_user = DBDiscussionSession.query(User).filter_by(nickname=str(nickname)\n ).first()\n if db_user:\n author_uid = db_user.uid\n pgroup = DBDiscussionSession.query(PremiseGroup).get(db_argument.\n premisegroup_uid)\n marked_argument = DBDiscussionSession.query(MarkedArgument).filter_by(\n argument_uid=uid, author_uid=db_user.uid).first()\n premisegroup_by_user = (pgroup.author_uid == db_user.uid or \n marked_argument is not None)\n arg_array = [db_argument]\n while db_argument.argument_uid:\n db_argument = DBDiscussionSession.query(Argument).get(db_argument.\n argument_uid)\n arg_array.append(db_argument)\n if attack_type == 'jump':\n return __build_argument_for_jump(arg_array, with_html_tag)\n if len(arg_array) == 1:\n return __build_single_argument(arg_array[0], rearrange_intro,\n with_html_tag, colored_position, attack_type, _t,\n start_with_intro, is_users_opinion, anonymous_style,\n support_counter_argument, author_uid)\n else:\n return __build_nested_argument(arg_array, first_arg_by_user,\n user_changed_opinion, with_html_tag, start_with_intro,\n minimize_on_undercut, anonymous_style, premisegroup_by_user, _t)\n\n\ndef __build_argument_for_jump(arg_array: List[Argument], with_html_tag):\n \"\"\"\n Build tet for an argument, if we jump to this argument\n\n :param arg_array: [Argument]\n :param with_html_tag: Boolean\n :return: String\n \"\"\"\n tag_premise = ('<' + tag_type + ' data-argumentation-type=\"attack\">' if\n with_html_tag else '')\n tag_conclusion = ('<' + tag_type +\n ' data-argumentation-type=\"argument\">' if with_html_tag else '')\n tag_end = '</' + tag_type + '>' if with_html_tag else ''\n lang = arg_array[0].lang\n _t = Translator(lang)\n if len(arg_array) == 1:\n ret_value = __build_val_for_jump(arg_array[0], tag_premise,\n tag_conclusion, tag_end, _t)\n elif len(arg_array) == 2:\n ret_value = __build_val_for_undercut(arg_array, tag_premise,\n tag_conclusion, tag_end, _t)\n else:\n ret_value = __build_val_for_undercutted_undercut(arg_array,\n tag_premise, tag_conclusion, tag_end, _t)\n return ret_value.replace(' ', ' ')\n\n\ndef __build_val_for_jump(db_argument, tag_premise, tag_conclusion, tag_end, _t\n ):\n premises = db_argument.get_premisegroup_text()\n if premises[-1] != '.':\n premises += '.'\n conclusion = db_argument.get_conclusion_text()\n because = _t.get(_.because).lower()\n conclusion = tag_conclusion + conclusion + tag_end\n premises = tag_premise + premises + tag_end\n intro = start_con + _t.get(_.isNotRight).lower(\n ) + end_tag if not db_argument.is_supportive else ''\n ret_value = '{} {} {} {}'.format(conclusion, intro, because, premises)\n if _t.get_lang() == 'de':\n intro = _t.get(_.itIsTrueThatAnonymous\n ) if db_argument.is_supportive else _t.get(_.itIsFalseThatAnonymous\n )\n intro = intro[0:1].upper() + intro[1:]\n intro = (start_pro if db_argument.is_supportive else start_con\n ) + intro + end_tag\n ret_value = '{} {}, {} {}'.format(intro, conclusion, because, premises)\n return ret_value\n\n\ndef __build_val_for_undercut(arg_array: List[Argument], tag_premise,\n tag_conclusion, tag_end, _t):\n db_undercut = arg_array[0]\n db_conclusion_argument = arg_array[1]\n premise = db_undercut.get_premisegroup_text()\n conclusion_premise = db_conclusion_argument.get_premisegroup_text()\n conclusion_conclusion = db_conclusion_argument.get_conclusion_text()\n premise = tag_premise + premise + tag_end\n conclusion_premise = tag_conclusion + conclusion_premise + tag_end\n conclusion_conclusion = tag_conclusion + conclusion_conclusion + tag_end\n intro = _t.get(_.statementAbout) + ' ' if _t.get_lang() == 'de' else ''\n bind = start_con + _t.get(_.isNotAGoodReasonFor) + end_tag\n because = _t.get(_.because)\n ret_value = '{}{} {} {}. {} {}.'.format(intro, conclusion_premise, bind,\n conclusion_conclusion, because, premise)\n return ret_value\n\n\ndef __build_val_for_undercutted_undercut(arg_array: List[Argument],\n tag_premise, tag_conclusion, tag_end, _t):\n premise1 = arg_array[0].get_premisegroup_text()\n premise2 = arg_array[1].get_premisegroup_text()\n premise3 = arg_array[2].get_premisegroup_text()\n conclusion = arg_array[2].get_conclusion_text()\n bind = start_con + _t.get(_.isNotAGoodReasonAgainstArgument) + end_tag\n because = _t.get(_.because)\n seperator = ',' if _t.get_lang() == 'de' else ''\n premise1 = tag_premise + premise1 + tag_end\n premise2 = tag_conclusion + premise2 + tag_end\n argument = '{}{} {} {}'.format(conclusion, seperator, because.lower(),\n premise3)\n argument = tag_conclusion + argument + tag_end\n ret_value = '{} {} {}. {} {}'.format(premise2, bind, argument, because,\n premise1)\n return ret_value\n\n\ndef __build_single_argument(db_argument: Argument, rearrange_intro: bool,\n with_html_tag: bool, colored_position: bool, attack_type: str, _t:\n Translator, start_with_intro: bool, is_users_opinion: bool,\n anonymous_style: bool, support_counter_argument: bool=False, author_uid\n =None):\n \"\"\"\n Build up argument text for a single argument\n\n Please, do not touch this!\n\n :param uid: Argument.uid\n :param rearrange_intro: Boolean\n :param with_html_tag: Boolean\n :param colored_position: Boolean\n :param attack_type: String\n :param _t: Translator\n :param start_with_intro: Boolean\n :param is_users_opinion: Boolean\n :param anonymous_style: Boolean\n :param support_counter_argument: Boolean\n :param author_uid: User.uid\n :return: String\n \"\"\"\n premises_text = db_argument.get_premisegroup_text()\n conclusion_text = db_argument.get_conclusion_text()\n lang = db_argument.lang\n if lang != 'de':\n premises_text = premises_text[0:1].lower() + premises_text[1:]\n premises_text, conclusion_text, sb, sb_none, se = (\n __get_tags_for_building_single_argument(with_html_tag, attack_type,\n colored_position, premises_text, conclusion_text))\n marked_element = False\n if author_uid:\n db_marked = DBDiscussionSession.query(MarkedArgument).filter(\n MarkedArgument.argument_uid == db_argument.uid, MarkedArgument.\n author_uid == author_uid).first()\n marked_element = db_marked is not None\n you_have_the_opinion_that = _t.get(_.youHaveTheOpinionThat).format(''\n ).strip()\n if lang == 'de':\n ret_value = __build_single_argument_for_de(_t, sb, se,\n you_have_the_opinion_that, start_with_intro, anonymous_style,\n rearrange_intro, db_argument, attack_type, sb_none,\n marked_element, lang, premises_text, conclusion_text,\n is_users_opinion, support_counter_argument)\n else:\n ret_value = __build_single_argument_for_en(_t, sb, se,\n you_have_the_opinion_that, marked_element, conclusion_text,\n premises_text, db_argument)\n return ret_value.replace(' ', ' ')\n\n\ndef __get_tags_for_building_single_argument(with_html_tag, attack_type,\n colored_position, premises, conclusion):\n sb_none = start_tag if with_html_tag else ''\n se = end_tag if with_html_tag else ''\n if attack_type not in ['dont_know', 'jump']:\n sb = start_tag if with_html_tag else ''\n if colored_position:\n sb = start_position if with_html_tag else ''\n if attack_type == Relations.UNDERMINE:\n premises = sb + premises + se\n else:\n conclusion = sb + conclusion + se\n else:\n sb = start_argument if with_html_tag else ''\n sb_tmp = start_attack if with_html_tag else ''\n premises = sb + premises + se\n conclusion = sb_tmp + conclusion + se\n return premises, conclusion, sb, sb_none, se\n\n\ndef __build_single_argument_for_de(_t, sb, se, you_have_the_opinion_that,\n start_with_intro, anonymous_style, rearrange_intro, db_argument,\n attack_type, sb_none, marked_element, lang, premises, conclusion,\n is_users_opinion, support_counter_argument):\n if start_with_intro and not anonymous_style:\n intro = _t.get(_.itIsTrueThat\n ) if db_argument.is_supportive else _t.get(_.itIsFalseThat)\n if rearrange_intro:\n intro = _t.get(_.itTrueIsThat\n ) if db_argument.is_supportive else _t.get(_.itFalseIsThat)\n ret_value = (sb_none if attack_type in ['dont_know'] else sb\n ) + intro + se + ' '\n elif is_users_opinion and not anonymous_style:\n ret_value = sb_none\n if support_counter_argument:\n ret_value += _t.get(_.youAgreeWithThecounterargument)\n elif marked_element:\n ret_value += you_have_the_opinion_that\n else:\n ret_value += _t.get(_.youArgue)\n ret_value += se + ' '\n else:\n tmp = _t.get(_.itIsTrueThatAnonymous if db_argument.is_supportive else\n _.itIsFalseThatAnonymous)\n ret_value = sb_none + sb + tmp + se + ' '\n ret_value += ' {}{}{} '.format(sb, _t.get(_.itIsNotRight), se\n ) if not db_argument.is_supportive else ''\n ret_value += conclusion\n ret_value += ', ' if lang == 'de' else ' '\n ret_value += sb_none + _t.get(_.because).lower() + se + ' ' + premises\n return ret_value\n\n\n<mask token>\n\n\ndef __build_nested_argument(arg_array: List[Argument], first_arg_by_user,\n user_changed_opinion, with_html_tag, start_with_intro,\n minimize_on_undercut, anonymous_style, premisegroup_by_user, _t):\n \"\"\"\n\n :param arg_array:\n :param first_arg_by_user:\n :param user_changed_opinion:\n :param with_html_tag:\n :param start_with_intro:\n :param minimize_on_undercut:\n :param anonymous_style:\n :param premisegroup_by_user:\n :param _t:\n :return:\n \"\"\"\n pgroups = []\n supportive = []\n arg_array = arg_array[::-1]\n local_lang = arg_array[0].lang\n for db_argument in arg_array:\n text = db_argument.get_premisegroup_text()\n pgroups.append(text)\n supportive.append(db_argument.is_supportive)\n conclusion = arg_array[0].get_conclusion_text()\n sb = start_position if with_html_tag else ''\n se = end_tag if with_html_tag else ''\n because = (', ' if local_lang == 'de' else ' ') + _t.get(_.because).lower(\n ) + ' '\n if len(arg_array\n ) % 2 is 0 and not first_arg_by_user and not anonymous_style:\n ret_value = _t.get(_.earlierYouArguedThat if user_changed_opinion else\n _.otherUsersSaidThat) + ' '\n tmp_users_opinion = True\n elif not anonymous_style:\n ret_value = _t.get(_.soYourOpinionIsThat\n ) + ': ' if start_with_intro else ''\n tmp_users_opinion = False\n conclusion = se + conclusion[0:1].upper() + conclusion[1:]\n else:\n ret_value = _t.get(_.someoneArgued) + ' '\n tmp_users_opinion = False\n tmp = _t.get(_.itFalseIsThat) + ' ' if not supportive[0] else ''\n ret_value += tmp + conclusion + because + pgroups[0] + '.'\n del pgroups[0]\n if minimize_on_undercut and not user_changed_opinion and len(pgroups) > 2:\n return _t.get(_.butYouCounteredWith).strip() + ' ' + sb + pgroups[\n len(pgroups) - 1] + se + '.'\n for i, pgroup in enumerate(pgroups):\n ret_value += ' '\n if tmp_users_opinion and not anonymous_style:\n tmp = (_.butYouCounteredWithArgument if premisegroup_by_user else\n _.butYouCounteredWithInterest)\n ret_value += _t.get(_.otherParticipantsConvincedYouThat if\n user_changed_opinion else tmp)\n elif not anonymous_style:\n ret_value += _t.get(_.youAgreeWithThatNow)\n else:\n ret_value += _t.get(_.otherUsersSaidThat) if i == 0 else _t.get(_\n .thenOtherUsersSaidThat)\n ret_value += sb + ' ' + pgroups[i] + '.'\n tmp_users_opinion = not tmp_users_opinion\n return ret_value.replace(' ', ' ')\n\n\ndef get_text_for_premisegroup_uid(uid):\n \"\"\"\n Returns joined text of the premise group and the premise ids\n\n :param uid: premisegroup_uid\n :return: text, uids\n \"\"\"\n warnings.warn('Use PremiseGroup.get_text() instead.', DeprecationWarning)\n db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid\n =uid).join(Statement).all()\n if len(db_premises) == 0:\n return ''\n texts = [premise.get_text() for premise in db_premises]\n lang = DBDiscussionSession.query(Statement).get(db_premises[0].\n statements.uid).lang\n _t = Translator(lang)\n return ' {} '.format(_t.get(_.aand)).join(texts)\n\n\n<mask token>\n\n\ndef get_text_for_premise(uid: int, colored_position: bool=False):\n \"\"\"\n Returns text of premise with given uid\n\n :param uid: Statement.uid\n :param colored_position: Boolean\n :return: String\n \"\"\"\n db_premise = DBDiscussionSession.query(Premise).get(uid)\n if db_premise:\n return db_premise.get_text(html=colored_position)\n else:\n return None\n\n\ndef get_text_for_conclusion(argument, start_with_intro=False,\n rearrange_intro=False, is_users_opinion=True):\n \"\"\"\n Check the arguments conclusion whether it is an statement or an argument and returns the text\n\n :param argument: Argument\n :param start_with_intro: Boolean\n :param rearrange_intro: Boolean\n :return: String\n \"\"\"\n if argument.argument_uid:\n return get_text_for_argument_uid(argument.argument_uid,\n start_with_intro, rearrange_intro=rearrange_intro,\n is_users_opinion=is_users_opinion)\n else:\n return argument.get_conclusion_text()\n\n\ndef resolve_issue_uid_to_slug(uid):\n \"\"\"\n Given the issue uid query database and return the correct slug of the issue.\n\n :param uid: issue_uid\n :type uid: int\n :return: Slug of issue\n :rtype: str\n \"\"\"\n issue = DBDiscussionSession.query(Issue).get(uid)\n return issue.slug if issue else None\n\n\ndef get_all_attacking_arg_uids_from_history(history):\n \"\"\"\n Returns all arguments of the history, which attacked the user\n\n :param history: String\n :return: [Arguments.uid]\n :rtype: list\n \"\"\"\n try:\n splitted_history = history.split('-')\n uids = []\n for part in splitted_history:\n if 'reaction' in part:\n parts = part.split('/')\n pos = parts.index('reaction')\n uids.append(part.split('/')[pos + 3])\n return uids\n except AttributeError:\n return []\n\n\ndef get_user_by_private_or_public_nickname(nickname):\n \"\"\"\n Gets the user by his (public) nickname, based on the option, whether his nickname is public or not\n\n :param nickname: Nickname of the user\n :return: Current user or None\n \"\"\"\n db_user = get_user_by_case_insensitive_nickname(nickname)\n db_public_user = get_user_by_case_insensitive_public_nickname(nickname)\n uid = 0\n if db_user:\n uid = db_user.uid\n elif db_public_user:\n uid = db_public_user.uid\n db_settings = DBDiscussionSession.query(Settings).filter_by(author_uid=uid\n ).first()\n if not db_settings:\n return None\n if db_settings.should_show_public_nickname and db_user:\n return db_user\n elif not db_settings.should_show_public_nickname and db_public_user:\n return db_public_user\n return None\n\n\ndef get_user_by_case_insensitive_nickname(nickname):\n \"\"\"\n Returns user with given nickname\n\n :param nickname: String\n :return: User or None\n \"\"\"\n return DBDiscussionSession.query(User).filter(func.lower(User.nickname) ==\n func.lower(nickname)).first()\n\n\ndef get_user_by_case_insensitive_public_nickname(public_nickname):\n \"\"\"\n Returns user with given public nickname\n\n :param public_nickname: String\n :return: User or None\n \"\"\"\n return DBDiscussionSession.query(User).filter(func.lower(User.\n public_nickname) == func.lower(public_nickname)).first()\n\n\ndef pretty_print_options(message):\n \"\"\"\n Some modifications for pretty printing.\n Use uppercase for first letter in text and a single dot for the end if there isn't one already.\n\n :param message: String\n :return: String\n \"\"\"\n if message[0:1] == '<':\n pos = message.index('>')\n message = message[0:pos + 1] + message[pos + 1:pos + 2].upper(\n ) + message[pos + 2:]\n else:\n message = message[0:1].upper() + message[1:]\n if message[-1] == '>':\n pos = message.rfind('<')\n if message[pos - 1:pos] not in ['.', '?', '!']:\n message = message[0:pos] + '.' + message[pos:]\n elif not message.endswith(tuple(['.', '?', '!'])) and id is not 'now':\n message += '.'\n return message\n\n\ndef create_speechbubble_dict(bubble_type: BubbleTypes, is_markable: bool=\n False, is_author: bool=False, uid: str='', bubble_url: str='', content:\n str='', omit_bubble_url: bool=False, omit_vote_info: bool=False,\n argument_uid: int=None, statement_uid: int=None, is_supportive: bool=\n False, nickname: str='anonymous', lang: str='en', is_users_opinion:\n bool=False, other_author: User=None):\n \"\"\"\n Creates an dictionary which includes every information needed for a bubble.\n\n :param bubble_type: BubbleTypes\n :param is_markable: True if the content itself could be flagged\n :param is_author: True if the current user is author of the content\n :param uid: Identifier for the bubble\n :param bubble_url: URL for the click event of the bubble\n :param content: Text of the bubble\n :param omit_bubble_url: True if the bubble should have a link\n :param omit_vote_info: True if the bubble have the little, grey information text\n :param argument_uid: Argument.uid\n :param statement_uid: Statement.uid\n :param is_supportive: Boolean\n :param nickname: String\n :param omit_bubble_url: Boolean\n :param lang: is_users_opinion\n :param is_users_opinion: Boolean\n :return: dict()\n \"\"\"\n gravatar_link = get_global_url() + '/static/images/icon.png'\n profile = None\n if uid is not 'now':\n content = pretty_print_options(content)\n if bubble_type is BubbleTypes.SYSTEM and other_author is not None:\n gravatar_link = get_profile_picture(other_author, 25)\n profile = '/user/{}'.format(other_author.uid),\n if bubble_type is BubbleTypes.USER and nickname != 'anonymous':\n db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname\n ).first()\n db_marked = None\n gravatar_link = get_profile_picture(db_user, 25)\n if argument_uid is not None and db_user is not None:\n db_marked = DBDiscussionSession.query(MarkedArgument).filter(\n MarkedArgument.argument_uid == argument_uid, MarkedArgument\n .author_uid == db_user.uid).first()\n if statement_uid is not None and db_user is not None:\n db_marked = DBDiscussionSession.query(MarkedStatement).filter(\n MarkedStatement.statement_uid == statement_uid, \n MarkedStatement.author_uid == db_user.uid).first()\n is_users_opinion = db_marked is not None\n speech = {'is_user': bubble_type is BubbleTypes.USER, 'is_system': \n bubble_type is BubbleTypes.SYSTEM, 'is_status': bubble_type is\n BubbleTypes.STATUS, 'is_info': bubble_type is BubbleTypes.INFO,\n 'is_markable': is_markable, 'is_author': is_author, 'id': uid if \n len(str(uid)) > 0 else uuid4().hex, 'bubble_url': bubble_url,\n 'message': content, 'omit_bubble_url': omit_bubble_url,\n 'omit_vote_info': omit_vote_info, 'data_type': 'argument' if\n argument_uid else 'statement' if statement_uid else 'None',\n 'data_argument_uid': argument_uid, 'data_statement_uid':\n statement_uid, 'data_is_supportive': is_supportive,\n 'is_users_opinion': is_users_opinion, 'enemy': {'avatar':\n gravatar_link, 'profile': profile, 'available': profile is not None}}\n votecount_keys = __get_text_for_click_and_mark_count(nickname, \n bubble_type is BubbleTypes.USER, argument_uid, statement_uid,\n speech, lang)\n speech['votecounts_message'] = votecount_keys[speech['votecounts']]\n return speech\n\n\ndef __get_text_for_click_and_mark_count(nickname, is_user, argument_uid,\n statement_uid, speech, lang):\n \"\"\"\n Build text for a bubble, how many other participants have the same interest?\n\n :param nickname: User.nickname\n :param is_user: boolean\n :param argument_uid: Argument.uid\n :param statement_uid: Statement.uid\n :param speech: dict()\n :param lang: ui_locales\n :return: [String]\n \"\"\"\n if not nickname:\n nickname = 'anonymous'\n db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname\n ).first()\n if not db_user:\n db_user = DBDiscussionSession.query(User).filter_by(nickname=\n 'anonymous').first()\n db_clicks, db_marks = __get_clicks_and_marks(argument_uid,\n statement_uid, db_user)\n _t = Translator(lang)\n speech['votecounts'] = len(db_clicks) if db_clicks else 0\n if db_marks:\n speech['votecounts'] += len(db_marks)\n votecount_keys = defaultdict(lambda : '{} {}.'.format(speech[\n 'votecounts'], _t.get(_.voteCountTextMore)))\n if is_user and db_user.gender == 'm':\n gender_key = _.voteCountTextFirstM\n elif is_user and db_user.gender == 'f':\n gender_key = _.voteCountTextFirstF\n else:\n gender_key = _.voteCountTextFirst\n votecount_keys[0] = '{}.'.format(_t.get(gender_key))\n votecount_keys[1] = _t.get(_.voteCountTextOneOther) + '.'\n return votecount_keys\n\n\ndef __get_clicks_and_marks(argument_uid, statement_uid, db_user):\n db_clicks = None\n db_marks = None\n if argument_uid:\n db_clicks = DBDiscussionSession.query(ClickedArgument).filter(\n ClickedArgument.argument_uid == argument_uid, ClickedArgument.\n is_up_vote == True, ClickedArgument.is_valid, ClickedArgument.\n author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedArgument).filter(\n MarkedArgument.argument_uid == argument_uid, MarkedArgument.\n author_uid != db_user.uid).all()\n elif statement_uid:\n db_clicks = DBDiscussionSession.query(ClickedStatement).filter(\n ClickedStatement.statement_uid == statement_uid, \n ClickedStatement.is_up_vote == True, ClickedStatement.is_valid,\n ClickedStatement.author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedStatement).filter(\n MarkedStatement.statement_uid == statement_uid, MarkedStatement\n .author_uid != db_user.uid).all()\n return db_clicks, db_marks\n\n\ndef is_argument_disabled_due_to_disabled_statements(argument):\n \"\"\"\n Returns true if any involved statement is disabled.\n\n :param argument: Argument\n :return: Boolean\n \"\"\"\n if argument.conclusion_uid is None:\n db_argument = DBDiscussionSession.query(Argument).get(argument.\n argument_uid)\n conclusion = DBDiscussionSession(Statement).get(db_argument.\n conclusion_uid)\n if conclusion.is_disabled:\n return True\n premises = __get_all_premises_of_argument(db_argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n else:\n print(argument.conclusion_uid)\n conclusion = DBDiscussionSession.query(Statement).get(argument.\n conclusion_uid)\n if conclusion.is_disabled:\n return True\n premises = __get_all_premises_of_argument(argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n return False\n\n\ndef is_author_of_statement(db_user: User, statement_uid: int) ->bool:\n \"\"\"\n Is the user with given nickname author of the statement?\n\n :param db_user: User\n :param statement_uid: Statement.uid\n :return: Boolean\n \"\"\"\n db_user = (db_user if db_user and db_user.nickname !=\n nick_of_anonymous_user else None)\n if not db_user:\n return False\n db_textversion = DBDiscussionSession.query(TextVersion).filter_by(\n statement_uid=statement_uid).order_by(TextVersion.uid.asc()).first()\n if not db_textversion:\n return False\n return db_textversion.author_uid == db_user.uid\n\n\ndef is_author_of_argument(db_user: User, argument_uid: int) ->bool:\n \"\"\"\n Is the user with given nickname author of the argument?\n\n :param db_user: User\n :param argument_uid: Argument.uid\n :return: Boolean\n \"\"\"\n db_user = (db_user if db_user and db_user.nickname !=\n nick_of_anonymous_user else None)\n if not db_user:\n return False\n db_argument = DBDiscussionSession.query(Argument).filter(Argument.uid ==\n argument_uid, Argument.author_uid == db_user.uid).first()\n return True if db_argument else False\n\n\ndef __get_all_premises_of_argument(argument):\n \"\"\"\n Returns list with all premises of the argument.\n\n :param argument: Argument\n :return: list()\n \"\"\"\n ret_list = []\n db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid\n =argument.premisegroup_uid).join(Statement).all()\n for premise in db_premises:\n ret_list.append(premise)\n return ret_list\n\n\ndef get_profile_picture(user: User, size: int=80, ignore_privacy_settings:\n bool=False):\n \"\"\"\n Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px\n\n :param user: User\n :param size: Integer, default 80\n :param ignore_privacy_settings:\n :return: String\n \"\"\"\n additional_id = ''\n if user and isinstance(user, User):\n additional_id = ('' if user.settings.should_show_public_nickname or\n ignore_privacy_settings else 'x')\n return __get_gravatar(user, additional_id, size)\n\n\ndef get_public_profile_picture(user: User, size: int=80):\n \"\"\"\n Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px\n If the user doesn't want an public profile, an anonymous image will be returned\n\n :param user: User\n :param size: Integer, default 80\n :return: String\n \"\"\"\n additional_id = ''\n if user.settings.should_show_public_nickname:\n additional_id = 'x'\n if len(str(user.oauth_provider)) > 0:\n additional_id = '{}{}'.format(user.oauth_provider, user.\n oauth_provider_id)\n return __get_gravatar(user, additional_id, size)\n\n\ndef __get_gravatar(user, additional_id, size):\n if user:\n if str(user.email) == 'None':\n email = (user.nickname + additional_id).encode('utf-8')\n else:\n email = (user.email + additional_id).encode('utf-8')\n else:\n email = 'unknown'.encode('utf-8')\n gravatar_url = 'https://secure.gravatar.com/avatar/{}?'.format(hashlib.\n md5(email.lower()).hexdigest())\n gravatar_url += parse.urlencode({'d': 'wavatar', 's': str(size)})\n return gravatar_url\n\n\ndef get_author_data(uid, gravatar_on_right_side=True,\n linked_with_users_page=True, profile_picture_size=20):\n \"\"\"\n Returns a-tag with gravatar of current author and users page as href\n\n :param uid: Uid of the author\n :param gravatar_on_right_side: True, if the gravatar is on the right of authors name\n :param linked_with_users_page: True, if the text is a link to the authors site\n :param profile_picture_size: Integer\n :return: HTML-String\n \"\"\"\n db_user = DBDiscussionSession.query(User).get(int(uid))\n if not db_user:\n return None, 'Missing author with uid ' + str(uid), False\n nick = db_user.global_nickname\n img_src = get_profile_picture(db_user, profile_picture_size)\n link_begin = ''\n link_end = ''\n if linked_with_users_page:\n link_begin = '<a href=\"/user/{}\" title=\"{}\">'.format(db_user.uid, nick)\n link_end = '</a>'\n side = 'left' if gravatar_on_right_side else 'right'\n img = '<img class=\"img-circle\" src=\"{}\" style=\"padding-{}: 0.3em\">'.format(\n img_src, side)\n if gravatar_on_right_side:\n return db_user, '{}{}{}{}'.format(link_begin, nick, img, link_end\n ), True\n else:\n return db_user, '{}{}{}{}'.format(link_begin, img, nick, link_end\n ), True\n\n\ndef bubbles_already_last_in_list(bubble_list, bubbles):\n \"\"\"\n Are the given bubbles already at the end of the bubble list\n\n :param bubble_list: list of Bubbles\n :param bubbles: list of bubbles\n :return: Boolean\n \"\"\"\n if isinstance(bubbles, list):\n length = len(bubbles)\n else:\n length = 1\n bubbles = [bubbles]\n if len(bubble_list) < length:\n return False\n for bubble in bubbles:\n if 'message' not in bubble:\n return False\n start_index = -length\n is_already_in = False\n for bubble in bubbles:\n last = bubble_list[start_index]\n if 'message' not in last or 'message' not in bubble:\n return False\n text1 = unhtmlify(last['message'].lower()).strip()\n text2 = unhtmlify(bubble['message'].lower()).strip()\n is_already_in = is_already_in or text1 == text2\n start_index += 1\n return is_already_in\n\n\ndef unhtmlify(html):\n \"\"\"\n Remove html-tags and unescape encoded html-entities.\n\n :param html: Evil-string containing html\n :return:\n \"\"\"\n return unescape(re.sub('<.*?>', '', html))\n", "step-5": "\"\"\"\nCommon, pure functions used by the D-BAS.\n\n\n.. codeauthor:: Tobias Krauthoff <[email protected]\n\"\"\"\nimport hashlib\nimport locale\nimport os\nimport re\nimport warnings\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom enum import Enum, auto\nfrom html import escape, unescape\nfrom typing import List\nfrom urllib import parse\nfrom uuid import uuid4\n\nfrom sqlalchemy import func\n\nfrom dbas.database import DBDiscussionSession\nfrom dbas.database.discussion_model import Argument, Premise, Statement, TextVersion, Issue, User, Settings, \\\n ClickedArgument, ClickedStatement, MarkedArgument, MarkedStatement, PremiseGroup\nfrom dbas.logger import logger\nfrom dbas.strings.keywords import Keywords as _\nfrom dbas.strings.translator import Translator\n\nnick_of_anonymous_user = 'anonymous'\n\nfallback_lang = 'en'\ntag_type = 'span'\nstart_attack = '<{} data-argumentation-type=\"attack\">'.format(tag_type)\nstart_argument = '<{} data-argumentation-type=\"argument\">'.format(tag_type)\nstart_position = '<{} data-argumentation-type=\"position\">'.format(tag_type)\nstart_content = '<{} class=\"triangle-content-text\">'.format(tag_type)\nstart_pro = '<{} data-attitude=\"pro\">'.format(tag_type)\nstart_con = '<{} data-attitude=\"con\">'.format(tag_type)\nstart_tag = '<{}>'.format(tag_type)\nend_tag = '</{}>'.format(tag_type)\n\n\nclass BubbleTypes(Enum):\n USER = auto()\n SYSTEM = auto()\n STATUS = auto()\n INFO = auto()\n\n def __str__(self):\n return str(self.value)\n\n\nclass Relations(Enum):\n UNDERMINE = 'undermine'\n UNDERCUT = 'undercut'\n REBUT = 'rebut'\n SUPPORT = 'support'\n\n def __str__(self):\n return str(self.value)\n\n\nclass Attitudes(Enum):\n AGREE = 'agree'\n DISAGREE = 'disagree'\n DONT_KNOW = 'dontknow'\n\n def __str__(self):\n return str(self.value)\n\n\nrelation_mapper = {relation.value: relation for relation in Relations}\nattitude_mapper = {attitude.value: attitude for attitude in Attitudes}\n\n\ndef get_global_url():\n \"\"\"\n Returns the global url of the project, based on the ENV\n\n :return: String\n \"\"\"\n return os.environ.get('URL', '')\n\n\ndef get_changelog(no):\n \"\"\"\n Returns the 'no' last entries from the changelog\n\n :param no: int\n :return: list\n \"\"\"\n path = str(os.path.realpath(__file__ + '/../../CHANGELOG.md'))\n lines = [line.rstrip('\\n').strip() for line in open(path) if len(line.rstrip('\\n').strip()) > 0]\n changelog = []\n title = ''\n body = []\n for l in lines:\n if l.startswith('#'):\n if len(title) > 0:\n changelog.append({'title': title, 'body': body})\n body = []\n title = l.replace('### ', '')\n else:\n body.append(l.replace('- ', ''))\n\n return changelog[0:no]\n\n\ndef is_development_mode(registry):\n \"\"\"\n Returns true, if mode is set to development in current ini file.\n\n :param registry: request.registry\n :return: Boolean\n \"\"\"\n if 'mode' in registry.settings:\n return registry.settings['mode'].lower() == 'development'\n return False\n\n\ndef usage_of_modern_bubbles(registry):\n \"\"\"\n Returns true, if modern bubbles are set in the current ini file.\n\n :param registry: request.registry\n :return: Boolean\n \"\"\"\n if 'modern_bubbles' in registry.settings:\n return registry.settings['modern_bubbles'].lower() == 'true'\n return False\n\n\ndef usage_of_matomo(registry):\n \"\"\"\n Returns true, if matomo is set in the current ini file.\n\n :param registry: request.registry\n :return: Boolean\n \"\"\"\n if 'mode' in registry.settings:\n return registry.settings['usage_of_matomo'].lower() == 'true'\n return False\n\n\ndef escape_string(text):\n \"\"\"\n Escapes all html special chars.\n\n :param text: string\n :return: html.escape(text)\n \"\"\"\n return escape(text)\n\n\ndef get_discussion_language(matchdict, params, session, current_issue_uid=None):\n \"\"\"\n Returns Language.ui_locales\n CALL AFTER issue_handler.get_id_of_slug(..)!\n\n :param matchdict: matchdict of the current request\n :param params: params of the current request\n :param session: session of the current request\n :param current_issue_uid: uid\n :return:\n \"\"\"\n if not current_issue_uid:\n current_issue = DBDiscussionSession.query(Issue).filter(Issue.is_disabled == False,\n Issue.is_private == False).first()\n current_issue_uid = current_issue.uid if current_issue else None\n\n # first matchdict, then params, then session, afterwards fallback\n issue = matchdict['issue'] if 'issue' in matchdict \\\n else params['issue'] if 'issue' in params \\\n else session['issue'] if 'issue' in session \\\n else current_issue_uid\n\n db_issue = DBDiscussionSession.query(Issue).get(issue)\n\n return db_issue.lang if db_issue else 'en'\n\n\ndef python_datetime_pretty_print(ts, lang):\n \"\"\"\n Pretty print of a locale\n\n :param ts: Timestamp\n :param lang: ui_locales\n :return: String\n \"\"\"\n formatter = '%b. %d.'\n if lang == 'de':\n try:\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n formatter = '%d. %b.'\n except locale.Error:\n locale.setlocale(locale.LC_TIME, 'en_US.UTF8')\n\n return datetime.strptime(str(ts), '%Y-%m-%d').strftime(formatter)\n\n\ndef get_all_arguments_by_statement(statement_uid, include_disabled=False):\n \"\"\"\n Returns a list of all arguments where the statement is a conclusion or member of the premisegroup\n\n :param statement_uid: Statement.uid\n :param include_disabled: Boolean\n :return: [Arguments]\n \"\"\"\n logger('DBAS.LIB', 'main {}, include_disabled {}'.format(statement_uid, include_disabled))\n db_arguments = __get_arguments_of_conclusion(statement_uid, include_disabled)\n arg_array = [arg for arg in db_arguments] if db_arguments else []\n\n premises = DBDiscussionSession.query(Premise).filter_by(statement_uid=statement_uid)\n if not include_disabled:\n premises = premises.filter_by(is_disabled=False)\n premises = premises.all()\n\n for premise in premises:\n arg_array += __get_argument_of_premisegroup(premise.premisegroup_uid, include_disabled)\n\n db_undercuts = []\n for arg in arg_array:\n db_undercuts += __get_undercuts_of_argument(arg.uid, include_disabled)\n\n db_undercutted_undercuts = []\n for arg in db_undercuts:\n db_undercutted_undercuts += __get_undercuts_of_argument(arg.uid, include_disabled)\n\n arg_array = list(set(arg_array + db_undercuts + db_undercutted_undercuts))\n\n logger('DBAS.LIB', 'returning arguments {}'.format([arg.uid for arg in arg_array]))\n return arg_array if len(arg_array) > 0 else None\n\n\ndef __get_argument_of_premisegroup(premisegroup_uid, include_disabled):\n \"\"\"\n Returns all arguments with the given premisegroup\n\n :param premisegroup_uid: PremisgGroup.uid\n :param include_disabled: Boolean\n :return: list of Arguments\n \"\"\"\n db_arguments = DBDiscussionSession.query(Argument).filter_by(premisegroup_uid=premisegroup_uid)\n if not include_disabled:\n db_arguments = db_arguments.filter_by(is_disabled=False)\n return db_arguments.all() if db_arguments else []\n\n\ndef __get_undercuts_of_argument(argument_uid, include_disabled):\n \"\"\"\n Returns all undercuts fo the given argument\n\n :param argument_uid: Argument.uid\n :param include_disabled: boolean\n :return: list of Arguments\n \"\"\"\n db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid=argument_uid)\n if not include_disabled:\n db_undercuts = db_undercuts.filter_by(is_disabled=False)\n return db_undercuts.all() if db_undercuts else []\n\n\ndef __get_arguments_of_conclusion(statement_uid, include_disabled):\n \"\"\"\n Returns all arguments, where the statement is set as conclusion\n\n :param statement_uid: Statement.uid\n :param include_disabled: Boolean\n :return: list of arguments\n \"\"\"\n db_arguments = DBDiscussionSession.query(Argument).filter_by(conclusion_uid=statement_uid)\n if not include_disabled:\n db_arguments = db_arguments.filter_by(is_disabled=False)\n return db_arguments.all() if db_arguments else []\n\n\ndef get_all_arguments_with_text_by_statement_id(statement_uid):\n \"\"\"\n Given a statement_uid, it returns all arguments, which use this statement and adds\n the corresponding text to it, which normally appears in the bubbles. The resulting\n text depends on the provided language.\n\n :param statement_uid: uid to a statement, which should be analyzed\n :return: list of dictionaries containing some properties of these arguments\n :rtype: list\n \"\"\"\n logger('DBAS.LIB', 'main ' + str(statement_uid))\n arguments = get_all_arguments_by_statement(statement_uid)\n results = []\n if arguments:\n results = [{'uid': arg.uid, 'text': get_text_for_argument_uid(arg.uid)} for arg in arguments]\n return results\n\n\ndef get_all_arguments_with_text_and_url_by_statement_id(db_statement, urlmanager, color_statement=False,\n is_jump=False):\n \"\"\"\n Given a statement_uid, it returns all arguments, which use this statement and adds\n the corresponding text to it, which normally appears in the bubbles. The resulting\n text depends on the provided language.\n\n :param db_statement: Statement\n :param urlmanager:\n :param color_statement: True, if the statement (specified by the ID) should be colored\n :return: list of dictionaries containing some properties of these arguments\n :rtype: list\n \"\"\"\n logger('DBAS.LIB', 'main ' + str(db_statement.uid))\n arguments = get_all_arguments_by_statement(db_statement.uid)\n uids = [arg.uid for arg in arguments] if arguments else None\n results = list()\n sb = '<{} data-argumentation-type=\"position\">'.format(tag_type) if color_statement else ''\n se = '</{}>'.format(tag_type) if color_statement else ''\n\n if not uids:\n return []\n\n uids.sort()\n for uid in uids:\n statement_text = db_statement.get_text()\n attack_type = 'jump' if is_jump else ''\n argument_text = get_text_for_argument_uid(uid, anonymous_style=True, attack_type=attack_type)\n pos = argument_text.lower().find(statement_text.lower())\n\n argument_text = argument_text[:pos] + sb + argument_text[pos:]\n pos += len(statement_text) + len(sb)\n argument_text = argument_text[:pos] + se + argument_text[pos:]\n\n results.append({\n 'uid': uid,\n 'text': argument_text,\n 'url': urlmanager.get_url_for_jump(uid)\n })\n return results\n\n\ndef get_slug_by_statement_uid(uid):\n \"\"\"\n Returns slug for the given Issue.uid\n\n :param uid: Issue.uid\n :return: String\n \"\"\"\n db_statement = DBDiscussionSession.query(Statement).get(uid)\n return resolve_issue_uid_to_slug(db_statement.issue_uid)\n\n\ndef get_text_for_argument_uid(uid, nickname=None, with_html_tag=False, start_with_intro=False, first_arg_by_user=False,\n user_changed_opinion=False, rearrange_intro=False, colored_position=False,\n attack_type=None, minimize_on_undercut=False, is_users_opinion=True,\n anonymous_style=False, support_counter_argument=False):\n \"\"\"\n Returns current argument as string like \"conclusion, because premise1 and premise2\"\n\n :param uid: Integer\n :param with_html_tag: Boolean\n :param start_with_intro: Boolean\n :param first_arg_by_user: Boolean\n :param user_changed_opinion: Boolean\n :param rearrange_intro: Boolean\n :param colored_position: Boolean\n :param attack_type: String\n :param minimize_on_undercut: Boolean\n :param anonymous_style: Boolean\n :param support_counter_argument: Boolean\n :return: String\n \"\"\"\n logger('DBAS.LIB', 'main {}'.format(uid))\n db_argument = DBDiscussionSession.query(Argument).get(uid)\n if not db_argument:\n return None\n\n lang = db_argument.lang\n _t = Translator(lang)\n premisegroup_by_user = False\n author_uid = None\n db_user = DBDiscussionSession.query(User).filter_by(nickname=str(nickname)).first()\n\n if db_user:\n author_uid = db_user.uid\n pgroup = DBDiscussionSession.query(PremiseGroup).get(db_argument.premisegroup_uid)\n marked_argument = DBDiscussionSession.query(MarkedArgument).filter_by(\n argument_uid=uid,\n author_uid=db_user.uid).first()\n premisegroup_by_user = pgroup.author_uid == db_user.uid or marked_argument is not None\n\n # getting all argument id\n arg_array = [db_argument]\n while db_argument.argument_uid:\n db_argument = DBDiscussionSession.query(Argument).get(db_argument.argument_uid)\n arg_array.append(db_argument)\n\n if attack_type == 'jump':\n return __build_argument_for_jump(arg_array, with_html_tag)\n\n if len(arg_array) == 1:\n # build one argument only\n return __build_single_argument(arg_array[0], rearrange_intro, with_html_tag, colored_position, attack_type, _t,\n start_with_intro, is_users_opinion, anonymous_style, support_counter_argument,\n author_uid)\n\n else:\n # get all pgroups and at last, the conclusion\n return __build_nested_argument(arg_array, first_arg_by_user, user_changed_opinion, with_html_tag,\n start_with_intro, minimize_on_undercut, anonymous_style, premisegroup_by_user,\n _t)\n\n\ndef __build_argument_for_jump(arg_array: List[Argument], with_html_tag):\n \"\"\"\n Build tet for an argument, if we jump to this argument\n\n :param arg_array: [Argument]\n :param with_html_tag: Boolean\n :return: String\n \"\"\"\n tag_premise = ('<' + tag_type + ' data-argumentation-type=\"attack\">') if with_html_tag else ''\n tag_conclusion = ('<' + tag_type + ' data-argumentation-type=\"argument\">') if with_html_tag else ''\n tag_end = ('</' + tag_type + '>') if with_html_tag else ''\n lang = arg_array[0].lang\n _t = Translator(lang)\n\n if len(arg_array) == 1:\n ret_value = __build_val_for_jump(arg_array[0], tag_premise, tag_conclusion, tag_end, _t)\n\n elif len(arg_array) == 2:\n ret_value = __build_val_for_undercut(arg_array, tag_premise, tag_conclusion, tag_end, _t)\n\n else:\n ret_value = __build_val_for_undercutted_undercut(arg_array, tag_premise, tag_conclusion, tag_end, _t)\n\n return ret_value.replace(' ', ' ')\n\n\ndef __build_val_for_jump(db_argument, tag_premise, tag_conclusion, tag_end, _t):\n premises = db_argument.get_premisegroup_text()\n if premises[-1] != '.':\n premises += '.'\n conclusion = db_argument.get_conclusion_text()\n\n because = _t.get(_.because).lower()\n conclusion = tag_conclusion + conclusion + tag_end\n premises = tag_premise + premises + tag_end\n\n intro = (start_con + _t.get(_.isNotRight).lower() + end_tag) if not db_argument.is_supportive else ''\n ret_value = '{} {} {} {}'.format(conclusion, intro, because, premises)\n if _t.get_lang() == 'de':\n intro = _t.get(_.itIsTrueThatAnonymous) if db_argument.is_supportive else _t.get(_.itIsFalseThatAnonymous)\n intro = intro[0:1].upper() + intro[1:]\n intro = (start_pro if db_argument.is_supportive else start_con) + intro + end_tag\n ret_value = '{} {}, {} {}'.format(intro, conclusion, because, premises)\n\n return ret_value\n\n\ndef __build_val_for_undercut(arg_array: List[Argument], tag_premise, tag_conclusion, tag_end, _t):\n db_undercut = arg_array[0]\n db_conclusion_argument = arg_array[1]\n premise = db_undercut.get_premisegroup_text()\n conclusion_premise = db_conclusion_argument.get_premisegroup_text()\n conclusion_conclusion = db_conclusion_argument.get_conclusion_text()\n\n premise = tag_premise + premise + tag_end\n conclusion_premise = tag_conclusion + conclusion_premise + tag_end\n conclusion_conclusion = tag_conclusion + conclusion_conclusion + tag_end\n\n intro = (_t.get(_.statementAbout) + ' ') if _t.get_lang() == 'de' else ''\n bind = start_con + _t.get(_.isNotAGoodReasonFor) + end_tag\n because = _t.get(_.because)\n ret_value = '{}{} {} {}. {} {}.'.format(intro, conclusion_premise, bind, conclusion_conclusion, because, premise)\n\n return ret_value\n\n\ndef __build_val_for_undercutted_undercut(arg_array: List[Argument], tag_premise, tag_conclusion, tag_end, _t):\n premise1 = arg_array[0].get_premisegroup_text()\n premise2 = arg_array[1].get_premisegroup_text()\n premise3 = arg_array[2].get_premisegroup_text()\n conclusion = arg_array[2].get_conclusion_text()\n\n bind = start_con + _t.get(_.isNotAGoodReasonAgainstArgument) + end_tag\n because = _t.get(_.because)\n seperator = ',' if _t.get_lang() == 'de' else ''\n\n premise1 = tag_premise + premise1 + tag_end\n premise2 = tag_conclusion + premise2 + tag_end\n argument = '{}{} {} {}'.format(conclusion, seperator, because.lower(), premise3)\n argument = tag_conclusion + argument + tag_end\n\n # P2 ist kein guter Grund gegen das Argument, dass C weil P3. Weil P1\n ret_value = '{} {} {}. {} {}'.format(premise2, bind, argument, because, premise1)\n return ret_value\n\n\ndef __build_single_argument(db_argument: Argument, rearrange_intro: bool, with_html_tag: bool, colored_position: bool,\n attack_type: str, _t: Translator, start_with_intro: bool, is_users_opinion: bool,\n anonymous_style: bool, support_counter_argument: bool=False, author_uid=None):\n \"\"\"\n Build up argument text for a single argument\n\n Please, do not touch this!\n\n :param uid: Argument.uid\n :param rearrange_intro: Boolean\n :param with_html_tag: Boolean\n :param colored_position: Boolean\n :param attack_type: String\n :param _t: Translator\n :param start_with_intro: Boolean\n :param is_users_opinion: Boolean\n :param anonymous_style: Boolean\n :param support_counter_argument: Boolean\n :param author_uid: User.uid\n :return: String\n \"\"\"\n premises_text = db_argument.get_premisegroup_text()\n conclusion_text = db_argument.get_conclusion_text()\n lang = db_argument.lang\n\n if lang != 'de':\n premises_text = premises_text[0:1].lower() + premises_text[1:] # pretty print\n\n premises_text, conclusion_text, sb, sb_none, se = __get_tags_for_building_single_argument(with_html_tag,\n attack_type,\n colored_position,\n premises_text,\n conclusion_text)\n\n marked_element = False\n if author_uid:\n db_marked = DBDiscussionSession.query(MarkedArgument).filter(MarkedArgument.argument_uid == db_argument.uid,\n MarkedArgument.author_uid == author_uid).first()\n marked_element = db_marked is not None\n\n you_have_the_opinion_that = _t.get(_.youHaveTheOpinionThat).format('').strip()\n\n if lang == 'de':\n ret_value = __build_single_argument_for_de(_t, sb, se, you_have_the_opinion_that, start_with_intro,\n anonymous_style, rearrange_intro, db_argument, attack_type, sb_none,\n marked_element, lang, premises_text, conclusion_text,\n is_users_opinion,\n support_counter_argument)\n else:\n ret_value = __build_single_argument_for_en(_t, sb, se, you_have_the_opinion_that, marked_element,\n conclusion_text,\n premises_text, db_argument)\n return ret_value.replace(' ', ' ')\n\n\ndef __get_tags_for_building_single_argument(with_html_tag, attack_type, colored_position, premises, conclusion):\n sb_none = start_tag if with_html_tag else ''\n se = end_tag if with_html_tag else ''\n if attack_type not in ['dont_know', 'jump']:\n sb = start_tag if with_html_tag else ''\n if colored_position:\n sb = start_position if with_html_tag else ''\n\n if attack_type == Relations.UNDERMINE:\n premises = sb + premises + se\n else:\n conclusion = sb + conclusion + se\n else:\n sb = start_argument if with_html_tag else ''\n sb_tmp = start_attack if with_html_tag else ''\n premises = sb + premises + se\n conclusion = sb_tmp + conclusion + se\n return premises, conclusion, sb, sb_none, se\n\n\ndef __build_single_argument_for_de(_t, sb, se, you_have_the_opinion_that, start_with_intro, anonymous_style,\n rearrange_intro, db_argument, attack_type, sb_none, marked_element, lang,\n premises, conclusion, is_users_opinion, support_counter_argument):\n if start_with_intro and not anonymous_style:\n intro = _t.get(_.itIsTrueThat) if db_argument.is_supportive else _t.get(_.itIsFalseThat)\n if rearrange_intro:\n intro = _t.get(_.itTrueIsThat) if db_argument.is_supportive else _t.get(_.itFalseIsThat)\n\n ret_value = (sb_none if attack_type in ['dont_know'] else sb) + intro + se + ' '\n\n elif is_users_opinion and not anonymous_style:\n ret_value = sb_none\n if support_counter_argument:\n ret_value += _t.get(_.youAgreeWithThecounterargument)\n elif marked_element:\n ret_value += you_have_the_opinion_that\n else:\n ret_value += _t.get(_.youArgue)\n ret_value += se + ' '\n\n else:\n tmp = _t.get(_.itIsTrueThatAnonymous if db_argument.is_supportive else _.itIsFalseThatAnonymous)\n ret_value = sb_none + sb + tmp + se + ' '\n ret_value += ' {}{}{} '.format(sb, _t.get(_.itIsNotRight), se) if not db_argument.is_supportive else ''\n ret_value += conclusion\n ret_value += ', ' if lang == 'de' else ' '\n ret_value += sb_none + _t.get(_.because).lower() + se + ' ' + premises\n return ret_value\n\n\ndef __build_single_argument_for_en(_t, sb, se, you_have_the_opinion_that, marked_element, conclusion, premises, db_arg):\n tmp = sb + ' ' + _t.get(_.isNotRight).lower() + se + ', ' + _t.get(_.because).lower() + ' '\n ret_value = (you_have_the_opinion_that + ' ' if marked_element else '') + conclusion + ' '\n ret_value += _t.get(_.because).lower() if db_arg.is_supportive else tmp\n ret_value += ' ' + premises\n return ret_value\n\n\ndef __build_nested_argument(arg_array: List[Argument], first_arg_by_user, user_changed_opinion, with_html_tag,\n start_with_intro, minimize_on_undercut, anonymous_style, premisegroup_by_user, _t):\n \"\"\"\n\n :param arg_array:\n :param first_arg_by_user:\n :param user_changed_opinion:\n :param with_html_tag:\n :param start_with_intro:\n :param minimize_on_undercut:\n :param anonymous_style:\n :param premisegroup_by_user:\n :param _t:\n :return:\n \"\"\"\n # get all pgroups and at last, the conclusion\n pgroups = []\n supportive = []\n arg_array = arg_array[::-1]\n local_lang = arg_array[0].lang\n\n # grepping all arguments in the chain\n for db_argument in arg_array:\n text = db_argument.get_premisegroup_text()\n\n pgroups.append(text)\n supportive.append(db_argument.is_supportive)\n\n conclusion = arg_array[0].get_conclusion_text()\n\n # html tags for framing\n sb = start_position if with_html_tag else ''\n se = end_tag if with_html_tag else ''\n\n because = (', ' if local_lang == 'de' else ' ') + _t.get(_.because).lower() + ' '\n\n if len(arg_array) % 2 is 0 and not first_arg_by_user and not anonymous_style: # system starts\n ret_value = _t.get(_.earlierYouArguedThat if user_changed_opinion else _.otherUsersSaidThat) + ' '\n tmp_users_opinion = True # user after system\n\n elif not anonymous_style: # user starts\n ret_value = (_t.get(_.soYourOpinionIsThat) + ': ') if start_with_intro else ''\n tmp_users_opinion = False # system after user\n conclusion = se + conclusion[0:1].upper() + conclusion[1:] # pretty print\n\n else:\n ret_value = _t.get(_.someoneArgued) + ' '\n tmp_users_opinion = False\n\n tmp = _t.get(_.itFalseIsThat) + ' ' if not supportive[0] else ''\n ret_value += tmp + conclusion + because + pgroups[0] + '.'\n del pgroups[0]\n\n # just display the last premise group on undercuts, because the story is always saved in all bubbles\n if minimize_on_undercut and not user_changed_opinion and len(pgroups) > 2:\n return _t.get(_.butYouCounteredWith).strip() + ' ' + sb + pgroups[len(pgroups) - 1] + se + '.'\n\n for i, pgroup in enumerate(pgroups):\n ret_value += ' '\n if tmp_users_opinion and not anonymous_style:\n tmp = _.butYouCounteredWithArgument if premisegroup_by_user else _.butYouCounteredWithInterest\n ret_value += _t.get(_.otherParticipantsConvincedYouThat if user_changed_opinion else tmp)\n elif not anonymous_style:\n ret_value += _t.get(_.youAgreeWithThatNow)\n else:\n ret_value += _t.get(_.otherUsersSaidThat) if i == 0 else _t.get(_.thenOtherUsersSaidThat)\n\n ret_value += sb + ' ' + pgroups[i] + '.'\n tmp_users_opinion = not tmp_users_opinion\n\n return ret_value.replace(' ', ' ')\n\n\ndef get_text_for_premisegroup_uid(uid):\n \"\"\"\n Returns joined text of the premise group and the premise ids\n\n :param uid: premisegroup_uid\n :return: text, uids\n \"\"\"\n warnings.warn(\"Use PremiseGroup.get_text() instead.\", DeprecationWarning)\n\n db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid=uid).join(Statement).all()\n if len(db_premises) == 0:\n return ''\n texts = [premise.get_text() for premise in db_premises]\n lang = DBDiscussionSession.query(Statement).get(db_premises[0].statements.uid).lang\n _t = Translator(lang)\n\n return ' {} '.format(_t.get(_.aand)).join(texts)\n\n\ndef get_text_for_statement_uid(uid: int, colored_position=False):\n \"\"\"\n Returns text of statement with given uid\n\n :param uid: Statement.uid\n :param colored_position: Boolean\n :return: String\n \"\"\"\n warnings.warn(\"Use Statement.get_text() or Statement.get_html() instead.\", DeprecationWarning)\n\n if not isinstance(uid, int):\n return None\n db_statement = DBDiscussionSession.query(Statement).get(uid)\n if not db_statement:\n return None\n\n db_textversion = DBDiscussionSession.query(TextVersion).order_by(TextVersion.uid.desc()).get(\n db_statement.textversion_uid)\n content = db_textversion.content\n\n while content.endswith(('.', '?', '!')):\n content = content[:-1]\n\n sb, se = '', ''\n if colored_position:\n sb = '<{} data-argumentation-type=\"position\">'.format(tag_type)\n se = '</{}>'.format(tag_type)\n\n return sb + content + se\n\n\ndef get_text_for_premise(uid: int, colored_position: bool = False):\n \"\"\"\n Returns text of premise with given uid\n\n :param uid: Statement.uid\n :param colored_position: Boolean\n :return: String\n \"\"\"\n db_premise = DBDiscussionSession.query(Premise).get(uid)\n if db_premise:\n return db_premise.get_text(html=colored_position)\n else:\n return None\n\n\ndef get_text_for_conclusion(argument, start_with_intro=False, rearrange_intro=False, is_users_opinion=True):\n \"\"\"\n Check the arguments conclusion whether it is an statement or an argument and returns the text\n\n :param argument: Argument\n :param start_with_intro: Boolean\n :param rearrange_intro: Boolean\n :return: String\n \"\"\"\n if argument.argument_uid:\n return get_text_for_argument_uid(argument.argument_uid, start_with_intro, rearrange_intro=rearrange_intro,\n is_users_opinion=is_users_opinion)\n else:\n return argument.get_conclusion_text()\n\n\ndef resolve_issue_uid_to_slug(uid):\n \"\"\"\n Given the issue uid query database and return the correct slug of the issue.\n\n :param uid: issue_uid\n :type uid: int\n :return: Slug of issue\n :rtype: str\n \"\"\"\n issue = DBDiscussionSession.query(Issue).get(uid)\n return issue.slug if issue else None\n\n\ndef get_all_attacking_arg_uids_from_history(history):\n \"\"\"\n Returns all arguments of the history, which attacked the user\n\n :param history: String\n :return: [Arguments.uid]\n :rtype: list\n \"\"\"\n try:\n splitted_history = history.split('-')\n uids = []\n for part in splitted_history:\n if 'reaction' in part:\n parts = part.split('/')\n pos = parts.index('reaction')\n uids.append(part.split('/')[pos + 3])\n return uids\n except AttributeError:\n return []\n\n\ndef get_user_by_private_or_public_nickname(nickname):\n \"\"\"\n Gets the user by his (public) nickname, based on the option, whether his nickname is public or not\n\n :param nickname: Nickname of the user\n :return: Current user or None\n \"\"\"\n db_user = get_user_by_case_insensitive_nickname(nickname)\n db_public_user = get_user_by_case_insensitive_public_nickname(nickname)\n uid = 0\n\n if db_user:\n uid = db_user.uid\n elif db_public_user:\n uid = db_public_user.uid\n\n db_settings = DBDiscussionSession.query(Settings).filter_by(author_uid=uid).first()\n\n if not db_settings:\n return None\n\n if db_settings.should_show_public_nickname and db_user:\n return db_user\n elif not db_settings.should_show_public_nickname and db_public_user:\n return db_public_user\n\n return None\n\n\ndef get_user_by_case_insensitive_nickname(nickname):\n \"\"\"\n Returns user with given nickname\n\n :param nickname: String\n :return: User or None\n \"\"\"\n return DBDiscussionSession.query(User).filter(func.lower(User.nickname) == func.lower(nickname)).first()\n\n\ndef get_user_by_case_insensitive_public_nickname(public_nickname):\n \"\"\"\n Returns user with given public nickname\n\n :param public_nickname: String\n :return: User or None\n \"\"\"\n return DBDiscussionSession.query(User).filter(\n func.lower(User.public_nickname) == func.lower(public_nickname)).first()\n\n\ndef pretty_print_options(message):\n \"\"\"\n Some modifications for pretty printing.\n Use uppercase for first letter in text and a single dot for the end if there isn't one already.\n\n :param message: String\n :return: String\n \"\"\"\n\n # check for html\n if message[0:1] == '<':\n pos = message.index('>')\n message = message[0:pos + 1] + message[pos + 1:pos + 2].upper() + message[pos + 2:]\n else:\n message = message[0:1].upper() + message[1:]\n\n # check for html\n if message[-1] == '>':\n pos = message.rfind('<')\n if message[pos - 1:pos] not in ['.', '?', '!']:\n message = message[0:pos] + '.' + message[pos:]\n elif not message.endswith(tuple(['.', '?', '!'])) and id is not 'now':\n message += '.'\n\n return message\n\n\ndef create_speechbubble_dict(bubble_type: BubbleTypes, is_markable: bool=False, is_author: bool=False, uid: str='',\n bubble_url: str= '', content: str= '', omit_bubble_url: bool=False, omit_vote_info: bool=False,\n argument_uid: int=None, statement_uid: int=None, is_supportive: bool=False,\n nickname: str='anonymous', lang: str='en', is_users_opinion: bool=False, other_author: User=None):\n \"\"\"\n Creates an dictionary which includes every information needed for a bubble.\n\n :param bubble_type: BubbleTypes\n :param is_markable: True if the content itself could be flagged\n :param is_author: True if the current user is author of the content\n :param uid: Identifier for the bubble\n :param bubble_url: URL for the click event of the bubble\n :param content: Text of the bubble\n :param omit_bubble_url: True if the bubble should have a link\n :param omit_vote_info: True if the bubble have the little, grey information text\n :param argument_uid: Argument.uid\n :param statement_uid: Statement.uid\n :param is_supportive: Boolean\n :param nickname: String\n :param omit_bubble_url: Boolean\n :param lang: is_users_opinion\n :param is_users_opinion: Boolean\n :return: dict()\n \"\"\"\n gravatar_link = get_global_url() + '/static/images/icon.png'\n profile = None\n\n if uid is not 'now':\n content = pretty_print_options(content)\n\n if bubble_type is BubbleTypes.SYSTEM and other_author is not None:\n gravatar_link = get_profile_picture(other_author, 25)\n profile = '/user/{}'.format(other_author.uid),\n\n # check for users opinion\n if bubble_type is BubbleTypes.USER and nickname != 'anonymous':\n db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname).first()\n db_marked = None\n gravatar_link = get_profile_picture(db_user, 25)\n if argument_uid is not None and db_user is not None:\n db_marked = DBDiscussionSession.query(MarkedArgument).filter(\n MarkedArgument.argument_uid == argument_uid,\n MarkedArgument.author_uid == db_user.uid).first()\n\n if statement_uid is not None and db_user is not None:\n db_marked = DBDiscussionSession.query(MarkedStatement).filter(\n MarkedStatement.statement_uid == statement_uid,\n MarkedStatement.author_uid == db_user.uid).first()\n\n is_users_opinion = db_marked is not None\n\n speech = {\n 'is_user': bubble_type is BubbleTypes.USER,\n 'is_system': bubble_type is BubbleTypes.SYSTEM,\n 'is_status': bubble_type is BubbleTypes.STATUS,\n 'is_info': bubble_type is BubbleTypes.INFO,\n 'is_markable': is_markable,\n 'is_author': is_author,\n 'id': uid if len(str(uid)) > 0 else uuid4().hex,\n 'bubble_url': bubble_url,\n 'message': content,\n 'omit_bubble_url': omit_bubble_url,\n 'omit_vote_info': omit_vote_info,\n 'data_type': 'argument' if argument_uid else 'statement' if statement_uid else 'None',\n 'data_argument_uid': argument_uid,\n 'data_statement_uid': statement_uid,\n 'data_is_supportive': is_supportive,\n 'is_users_opinion': is_users_opinion,\n 'enemy': {\n 'avatar': gravatar_link,\n 'profile': profile,\n 'available': profile is not None\n }\n }\n\n votecount_keys = __get_text_for_click_and_mark_count(nickname, bubble_type is BubbleTypes.USER, argument_uid,\n statement_uid, speech, lang)\n\n speech['votecounts_message'] = votecount_keys[speech['votecounts']]\n\n return speech\n\n\ndef __get_text_for_click_and_mark_count(nickname, is_user, argument_uid, statement_uid, speech, lang):\n \"\"\"\n Build text for a bubble, how many other participants have the same interest?\n\n :param nickname: User.nickname\n :param is_user: boolean\n :param argument_uid: Argument.uid\n :param statement_uid: Statement.uid\n :param speech: dict()\n :param lang: ui_locales\n :return: [String]\n \"\"\"\n\n if not nickname:\n nickname = 'anonymous'\n\n db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname).first()\n if not db_user:\n db_user = DBDiscussionSession.query(User).filter_by(nickname='anonymous').first()\n\n db_clicks, db_marks = __get_clicks_and_marks(argument_uid, statement_uid, db_user)\n\n _t = Translator(lang)\n speech['votecounts'] = len(db_clicks) if db_clicks else 0\n if db_marks:\n speech['votecounts'] += len(db_marks)\n\n votecount_keys = defaultdict(lambda: \"{} {}.\".format(speech['votecounts'], _t.get(_.voteCountTextMore)))\n\n if is_user and db_user.gender == 'm':\n gender_key = _.voteCountTextFirstM\n elif is_user and db_user.gender == 'f':\n gender_key = _.voteCountTextFirstF\n else:\n gender_key = _.voteCountTextFirst\n\n votecount_keys[0] = '{}.'.format(_t.get(gender_key))\n votecount_keys[1] = _t.get(_.voteCountTextOneOther) + '.'\n\n return votecount_keys\n\n\ndef __get_clicks_and_marks(argument_uid, statement_uid, db_user):\n db_clicks = None\n db_marks = None\n if argument_uid:\n db_clicks = DBDiscussionSession.query(ClickedArgument). \\\n filter(ClickedArgument.argument_uid == argument_uid,\n ClickedArgument.is_up_vote == True,\n ClickedArgument.is_valid,\n ClickedArgument.author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedArgument). \\\n filter(MarkedArgument.argument_uid == argument_uid,\n MarkedArgument.author_uid != db_user.uid).all()\n\n elif statement_uid:\n db_clicks = DBDiscussionSession.query(ClickedStatement). \\\n filter(ClickedStatement.statement_uid == statement_uid,\n ClickedStatement.is_up_vote == True,\n ClickedStatement.is_valid,\n ClickedStatement.author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedStatement). \\\n filter(MarkedStatement.statement_uid == statement_uid,\n MarkedStatement.author_uid != db_user.uid).all()\n\n return db_clicks, db_marks\n\n\ndef is_argument_disabled_due_to_disabled_statements(argument):\n \"\"\"\n Returns true if any involved statement is disabled.\n\n :param argument: Argument\n :return: Boolean\n \"\"\"\n if argument.conclusion_uid is None:\n # check conclusion of given arguments conclusion\n db_argument = DBDiscussionSession.query(Argument).get(argument.argument_uid)\n conclusion = DBDiscussionSession(Statement).get(db_argument.conclusion_uid)\n if conclusion.is_disabled:\n return True\n # check premisegroup of given arguments conclusion\n premises = __get_all_premises_of_argument(db_argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n else:\n # check conclusion of given argument\n print(argument.conclusion_uid)\n conclusion = DBDiscussionSession.query(Statement).get(argument.conclusion_uid)\n if conclusion.is_disabled:\n return True\n\n # check premisegroup of given argument\n premises = __get_all_premises_of_argument(argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n\n return False\n\n\ndef is_author_of_statement(db_user: User, statement_uid: int) -> bool:\n \"\"\"\n Is the user with given nickname author of the statement?\n\n :param db_user: User\n :param statement_uid: Statement.uid\n :return: Boolean\n \"\"\"\n db_user = db_user if db_user and db_user.nickname != nick_of_anonymous_user else None\n if not db_user:\n return False\n\n db_textversion = DBDiscussionSession.query(TextVersion).filter_by(statement_uid=statement_uid).order_by(\n TextVersion.uid.asc()).first() # TODO #432\n if not db_textversion:\n return False\n return db_textversion.author_uid == db_user.uid\n\n\ndef is_author_of_argument(db_user: User, argument_uid: int) -> bool:\n \"\"\"\n Is the user with given nickname author of the argument?\n\n :param db_user: User\n :param argument_uid: Argument.uid\n :return: Boolean\n \"\"\"\n db_user = db_user if db_user and db_user.nickname != nick_of_anonymous_user else None\n if not db_user:\n return False\n db_argument = DBDiscussionSession.query(Argument).filter(Argument.uid == argument_uid,\n Argument.author_uid == db_user.uid).first()\n return True if db_argument else False\n\n\ndef __get_all_premises_of_argument(argument):\n \"\"\"\n Returns list with all premises of the argument.\n\n :param argument: Argument\n :return: list()\n \"\"\"\n ret_list = []\n db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid=argument.premisegroup_uid).join(\n Statement).all()\n for premise in db_premises:\n ret_list.append(premise)\n return ret_list\n\n\ndef get_profile_picture(user: User, size: int = 80, ignore_privacy_settings: bool = False):\n \"\"\"\n Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px\n\n :param user: User\n :param size: Integer, default 80\n :param ignore_privacy_settings:\n :return: String\n \"\"\"\n additional_id = ''\n if user and isinstance(user, User):\n additional_id = '' if user.settings.should_show_public_nickname or ignore_privacy_settings else 'x'\n\n return __get_gravatar(user, additional_id, size)\n\n\ndef get_public_profile_picture(user: User, size: int = 80):\n \"\"\"\n Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px\n If the user doesn't want an public profile, an anonymous image will be returned\n\n :param user: User\n :param size: Integer, default 80\n :return: String\n \"\"\"\n additional_id = ''\n if user.settings.should_show_public_nickname:\n additional_id = 'x'\n if len(str(user.oauth_provider)) > 0:\n additional_id = '{}{}'.format(user.oauth_provider, user.oauth_provider_id)\n\n return __get_gravatar(user, additional_id, size)\n\n\ndef __get_gravatar(user, additional_id, size):\n if user:\n if str(user.email) == 'None':\n email = (user.nickname + additional_id).encode('utf-8')\n else:\n email = (user.email + additional_id).encode('utf-8')\n else:\n email = 'unknown'.encode('utf-8')\n gravatar_url = 'https://secure.gravatar.com/avatar/{}?'.format(hashlib.md5(email.lower()).hexdigest())\n gravatar_url += parse.urlencode({'d': 'wavatar', 's': str(size)})\n\n return gravatar_url\n\n\ndef get_author_data(uid, gravatar_on_right_side=True, linked_with_users_page=True, profile_picture_size=20):\n \"\"\"\n Returns a-tag with gravatar of current author and users page as href\n\n :param uid: Uid of the author\n :param gravatar_on_right_side: True, if the gravatar is on the right of authors name\n :param linked_with_users_page: True, if the text is a link to the authors site\n :param profile_picture_size: Integer\n :return: HTML-String\n \"\"\"\n db_user = DBDiscussionSession.query(User).get(int(uid))\n if not db_user:\n return None, 'Missing author with uid ' + str(uid), False\n\n nick = db_user.global_nickname\n img_src = get_profile_picture(db_user, profile_picture_size)\n link_begin = ''\n link_end = ''\n if linked_with_users_page:\n link_begin = '<a href=\"/user/{}\" title=\"{}\">'.format(db_user.uid, nick)\n link_end = '</a>'\n\n side = 'left' if gravatar_on_right_side else 'right'\n img = '<img class=\"img-circle\" src=\"{}\" style=\"padding-{}: 0.3em\">'.format(img_src, side)\n\n if gravatar_on_right_side:\n return db_user, '{}{}{}{}'.format(link_begin, nick, img, link_end), True\n else:\n return db_user, '{}{}{}{}'.format(link_begin, img, nick, link_end), True\n\n\ndef bubbles_already_last_in_list(bubble_list, bubbles):\n \"\"\"\n Are the given bubbles already at the end of the bubble list\n\n :param bubble_list: list of Bubbles\n :param bubbles: list of bubbles\n :return: Boolean\n \"\"\"\n if isinstance(bubbles, list):\n length = len(bubbles)\n else:\n length = 1\n bubbles = [bubbles]\n\n if len(bubble_list) < length:\n return False\n\n for bubble in bubbles:\n if 'message' not in bubble:\n return False\n\n start_index = - length\n is_already_in = False\n for bubble in bubbles:\n\n last = bubble_list[start_index]\n if 'message' not in last or 'message' not in bubble:\n return False\n\n text1 = unhtmlify(last['message'].lower()).strip()\n text2 = unhtmlify(bubble['message'].lower()).strip()\n is_already_in = is_already_in or (text1 == text2)\n start_index += 1\n\n return is_already_in\n\n\ndef unhtmlify(html):\n \"\"\"\n Remove html-tags and unescape encoded html-entities.\n\n :param html: Evil-string containing html\n :return:\n \"\"\"\n return unescape(re.sub(r'<.*?>', '', html))\n", "step-ids": [ 29, 31, 47, 55, 60 ] }
[ 29, 31, 47, 55, 60 ]
from math import exp from math import e import numpy as np import decimal import pandas as pd pop = [] x = 0 for a in range(1,10001): pop.append((1.2)*e**(-1.2*x)) x =+0.0001 for k in range(100,10100,100): exec(f'S{k} =pop[1:k]') #################################################################################### import numpy as np for size in np.arange(100,10100,100): exec(f'S{size} = np.random.exponential(scale=1.2,size=size)') len(S10000) #################################################################################### import numpy as np #another way to do it #create a dictionary of samples dict_samples = {} for size in np.arange(100,10100,100): dict_samples[size]=np.random.exponential(scale=10/12,size=size) dict_samples[100] len(dict_samples[200]) 1/1.2 pos = 100 for pos in np.arange(100,10100,100): sample = dict_samples[pos] sample_mean = sample.mean() print("The mean for sample {} is {}".format(pos,sample_mean))
normal
{ "blob_id": "adfdd988b7e208229f195308df8d63fd2799046f", "index": 8941, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor a in range(1, 10001):\n pop.append(1.2 * e ** (-1.2 * x))\n x = +0.0001\nfor k in range(100, 10100, 100):\n exec(f'S{k} =pop[1:k]')\n<mask token>\nfor size in np.arange(100, 10100, 100):\n exec(f'S{size} = np.random.exponential(scale=1.2,size=size)')\nlen(S10000)\n<mask token>\nfor size in np.arange(100, 10100, 100):\n dict_samples[size] = np.random.exponential(scale=10 / 12, size=size)\ndict_samples[100]\nlen(dict_samples[200])\n1 / 1.2\n<mask token>\nfor pos in np.arange(100, 10100, 100):\n sample = dict_samples[pos]\n sample_mean = sample.mean()\n print('The mean for sample {} is {}'.format(pos, sample_mean))\n", "step-3": "<mask token>\npop = []\nx = 0\nfor a in range(1, 10001):\n pop.append(1.2 * e ** (-1.2 * x))\n x = +0.0001\nfor k in range(100, 10100, 100):\n exec(f'S{k} =pop[1:k]')\n<mask token>\nfor size in np.arange(100, 10100, 100):\n exec(f'S{size} = np.random.exponential(scale=1.2,size=size)')\nlen(S10000)\n<mask token>\ndict_samples = {}\nfor size in np.arange(100, 10100, 100):\n dict_samples[size] = np.random.exponential(scale=10 / 12, size=size)\ndict_samples[100]\nlen(dict_samples[200])\n1 / 1.2\npos = 100\nfor pos in np.arange(100, 10100, 100):\n sample = dict_samples[pos]\n sample_mean = sample.mean()\n print('The mean for sample {} is {}'.format(pos, sample_mean))\n", "step-4": "from math import exp\nfrom math import e\nimport numpy as np\nimport decimal\nimport pandas as pd\npop = []\nx = 0\nfor a in range(1, 10001):\n pop.append(1.2 * e ** (-1.2 * x))\n x = +0.0001\nfor k in range(100, 10100, 100):\n exec(f'S{k} =pop[1:k]')\nimport numpy as np\nfor size in np.arange(100, 10100, 100):\n exec(f'S{size} = np.random.exponential(scale=1.2,size=size)')\nlen(S10000)\nimport numpy as np\ndict_samples = {}\nfor size in np.arange(100, 10100, 100):\n dict_samples[size] = np.random.exponential(scale=10 / 12, size=size)\ndict_samples[100]\nlen(dict_samples[200])\n1 / 1.2\npos = 100\nfor pos in np.arange(100, 10100, 100):\n sample = dict_samples[pos]\n sample_mean = sample.mean()\n print('The mean for sample {} is {}'.format(pos, sample_mean))\n", "step-5": "\r\nfrom math import exp\r\nfrom math import e\r\nimport numpy as np\r\nimport decimal\r\nimport pandas as pd\r\n\r\n\r\n\r\n\r\npop = []\r\nx = 0\r\nfor a in range(1,10001):\r\n pop.append((1.2)*e**(-1.2*x))\r\n x =+0.0001\r\n\r\n\r\nfor k in range(100,10100,100):\r\n exec(f'S{k} =pop[1:k]')\r\n\r\n\r\n####################################################################################\r\n\r\nimport numpy as np\r\n\r\nfor size in np.arange(100,10100,100):\t\r\n exec(f'S{size} = np.random.exponential(scale=1.2,size=size)')\r\n\r\nlen(S10000)\r\n\r\n####################################################################################\r\nimport numpy as np\r\n#another way to do it\r\n#create a dictionary of samples\r\ndict_samples = {} \r\nfor size in np.arange(100,10100,100):\t\r\n dict_samples[size]=np.random.exponential(scale=10/12,size=size)\r\n\r\n\r\ndict_samples[100]\r\n \r\nlen(dict_samples[200])\r\n\r\n1/1.2\r\n\r\npos = 100\r\nfor pos in np.arange(100,10100,100):\r\n sample = dict_samples[pos]\r\n sample_mean = sample.mean()\r\n print(\"The mean for sample {} is {}\".format(pos,sample_mean))\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# Kipland Melton import psutil import math def convert_size(size_bytes): if size_bytes == 0: return "0B" size_name = ("%", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") i = int(math.floor(math.log(size_bytes, 1024))) p = math.pow(1024, i) s = round(size_bytes / p, 2) return "%s %s" % (s, size_name[i]) def RetrieveMemory(): # Holds returned information from Psutil library involving memory ram_info = psutil.virtual_memory() typePresented = ("Total : ","Used : ","Free : ", "Usage : ") # Main formatting data presentation loop counter = 0 print() for info in ram_info: #print("iteration:",counter) try: if info > 100: print(typePresented[counter],convert_size(info)) counter += 1 else: print(typePresented[3],convert_size(info)) except IndexError: continue if __name__ == "__main__": RetrieveMemory()
normal
{ "blob_id": "d960d3d1680f825f0f68fc6d66f491bbbba805ce", "index": 5004, "step-1": "<mask token>\n\n\ndef RetrieveMemory():\n ram_info = psutil.virtual_memory()\n typePresented = 'Total : ', 'Used : ', 'Free : ', 'Usage : '\n counter = 0\n print()\n for info in ram_info:\n try:\n if info > 100:\n print(typePresented[counter], convert_size(info))\n counter += 1\n else:\n print(typePresented[3], convert_size(info))\n except IndexError:\n continue\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef convert_size(size_bytes):\n if size_bytes == 0:\n return '0B'\n size_name = '%', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'\n i = int(math.floor(math.log(size_bytes, 1024)))\n p = math.pow(1024, i)\n s = round(size_bytes / p, 2)\n return '%s %s' % (s, size_name[i])\n\n\ndef RetrieveMemory():\n ram_info = psutil.virtual_memory()\n typePresented = 'Total : ', 'Used : ', 'Free : ', 'Usage : '\n counter = 0\n print()\n for info in ram_info:\n try:\n if info > 100:\n print(typePresented[counter], convert_size(info))\n counter += 1\n else:\n print(typePresented[3], convert_size(info))\n except IndexError:\n continue\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef convert_size(size_bytes):\n if size_bytes == 0:\n return '0B'\n size_name = '%', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'\n i = int(math.floor(math.log(size_bytes, 1024)))\n p = math.pow(1024, i)\n s = round(size_bytes / p, 2)\n return '%s %s' % (s, size_name[i])\n\n\ndef RetrieveMemory():\n ram_info = psutil.virtual_memory()\n typePresented = 'Total : ', 'Used : ', 'Free : ', 'Usage : '\n counter = 0\n print()\n for info in ram_info:\n try:\n if info > 100:\n print(typePresented[counter], convert_size(info))\n counter += 1\n else:\n print(typePresented[3], convert_size(info))\n except IndexError:\n continue\n\n\nif __name__ == '__main__':\n RetrieveMemory()\n", "step-4": "import psutil\nimport math\n\n\ndef convert_size(size_bytes):\n if size_bytes == 0:\n return '0B'\n size_name = '%', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'\n i = int(math.floor(math.log(size_bytes, 1024)))\n p = math.pow(1024, i)\n s = round(size_bytes / p, 2)\n return '%s %s' % (s, size_name[i])\n\n\ndef RetrieveMemory():\n ram_info = psutil.virtual_memory()\n typePresented = 'Total : ', 'Used : ', 'Free : ', 'Usage : '\n counter = 0\n print()\n for info in ram_info:\n try:\n if info > 100:\n print(typePresented[counter], convert_size(info))\n counter += 1\n else:\n print(typePresented[3], convert_size(info))\n except IndexError:\n continue\n\n\nif __name__ == '__main__':\n RetrieveMemory()\n", "step-5": "# Kipland Melton\nimport psutil\nimport math\n\ndef convert_size(size_bytes):\n if size_bytes == 0:\n return \"0B\"\n size_name = (\"%\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\", \"ZB\", \"YB\")\n i = int(math.floor(math.log(size_bytes, 1024)))\n p = math.pow(1024, i)\n s = round(size_bytes / p, 2)\n return \"%s %s\" % (s, size_name[i])\n\ndef RetrieveMemory():\n\n # Holds returned information from Psutil library involving memory\n ram_info = psutil.virtual_memory()\n \n typePresented = (\"Total : \",\"Used : \",\"Free : \", \"Usage : \")\n\n # Main formatting data presentation loop\n\n counter = 0\n\n print()\n\n for info in ram_info:\n #print(\"iteration:\",counter)\n try:\n if info > 100:\n print(typePresented[counter],convert_size(info))\n counter += 1\n \n else:\n print(typePresented[3],convert_size(info))\n except IndexError:\n continue\n\n\n\nif __name__ == \"__main__\":\n RetrieveMemory()", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import re import os import base64 os.popen("tshark -r log.pcap -d 'tcp.port==57000,http' -d 'tcp.port==44322,http' -d 'tcp.port==44818,http' -Y 'data-text-lines' -Tfields -e http.file_data > request") def evals(text): template = "{}\['__doc__'\]\[\d+\]" keys = map(str, range(10)) keys += ['\[\]','\(\)',"''"] rule = '|'.join(template.format(_) for _ in keys) regex = re.compile(rule + "|'[\w|\d]'") for i in regex.findall(text): r = i.replace("['__doc__']", ".__doc__") r = re.sub('^\d', 'int', r) r = re.sub('^\(\)', 'tuple', r) text = text.replace(i, eval(r)) text = text.replace('\n', '\\n') return text.replace('~','') def extract(text): regex = re.compile(r'-s (\d+) -l \d+ ([\w\.]+)\).*\[(\d+)\].*\((\w|\d|\\n)\)') return regex.findall(text)[0] requ = open('request').readlines()[:] result = dict() for x in requ: clean = x.strip('\n') clean = re.sub(r'\\n', '', clean) clean = base64.b64decode(clean) clean = evals(clean.split('=')[1]) if 'index' in clean: index, name, pos, char = extract(clean) key = result.get(name, dict()) index = int(index) pos = int(pos) if not key: result[name] = key lastIndexed = result[name].get(index, dict()) if not lastIndexed: result[name][index] = lastIndexed lastOccurence = result[name][index].get(pos, ['']) if not lastOccurence[0]: result[name][index][pos] = lastOccurence lastOccurence[0] = (index, pos, char) for k,v in result.iteritems(): print '[+] Saving', k temp = '' for kk in sorted(v): vv = result[k][kk] for kkk in sorted(vv): vvv = result[k][kk][kkk] char = vvv[0][-1] if char != '\\n': temp += vvv[0][-1] with open(k, 'wb') as f: content = temp.decode('hex') f.write(content)
normal
{ "blob_id": "c26bdc3f47aa9ac0cda0334e97bdaf3f9d56eb6c", "index": 437, "step-1": "import re\nimport os\nimport base64\n\nos.popen(\"tshark -r log.pcap -d 'tcp.port==57000,http' -d 'tcp.port==44322,http' -d 'tcp.port==44818,http' -Y 'data-text-lines' -Tfields -e http.file_data > request\")\n\ndef evals(text):\n template = \"{}\\['__doc__'\\]\\[\\d+\\]\"\n keys = map(str, range(10))\n keys += ['\\[\\]','\\(\\)',\"''\"]\n \n rule = '|'.join(template.format(_) for _ in keys)\n regex = re.compile(rule + \"|'[\\w|\\d]'\")\n\n for i in regex.findall(text):\n r = i.replace(\"['__doc__']\", \".__doc__\")\n r = re.sub('^\\d', 'int', r)\n r = re.sub('^\\(\\)', 'tuple', r)\n text = text.replace(i, eval(r))\n \n text = text.replace('\\n', '\\\\n')\n return text.replace('~','')\n\ndef extract(text):\n regex = re.compile(r'-s (\\d+) -l \\d+ ([\\w\\.]+)\\).*\\[(\\d+)\\].*\\((\\w|\\d|\\\\n)\\)')\n return regex.findall(text)[0]\n\nrequ = open('request').readlines()[:]\nresult = dict()\n\nfor x in requ:\n clean = x.strip('\\n')\n clean = re.sub(r'\\\\n', '', clean)\n clean = base64.b64decode(clean)\n clean = evals(clean.split('=')[1])\n\n if 'index' in clean:\n index, name, pos, char = extract(clean)\n key = result.get(name, dict())\n index = int(index)\n pos = int(pos)\n\n if not key:\n result[name] = key\n\n lastIndexed = result[name].get(index, dict())\n if not lastIndexed:\n result[name][index] = lastIndexed\n\n lastOccurence = result[name][index].get(pos, [''])\n if not lastOccurence[0]:\n result[name][index][pos] = lastOccurence\n \n lastOccurence[0] = (index, pos, char)\n\nfor k,v in result.iteritems():\n print '[+] Saving', k\n\n temp = ''\n for kk in sorted(v):\n vv = result[k][kk]\n\n for kkk in sorted(vv):\n vvv = result[k][kk][kkk]\n\n char = vvv[0][-1]\n if char != '\\\\n':\n temp += vvv[0][-1]\n\n with open(k, 'wb') as f:\n content = temp.decode('hex')\n f.write(content)", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from django.contrib import admin from main_app.models import sites, statuses, redirects # Register your models here. admin.site.register(statuses) admin.site.register(sites) admin.site.register(redirects)
normal
{ "blob_id": "2b8ca0c8c7878536da4f31652976988cdba62d89", "index": 491, "step-1": "<mask token>\n", "step-2": "<mask token>\nadmin.site.register(statuses)\nadmin.site.register(sites)\nadmin.site.register(redirects)\n", "step-3": "from django.contrib import admin\nfrom main_app.models import sites, statuses, redirects\nadmin.site.register(statuses)\nadmin.site.register(sites)\nadmin.site.register(redirects)\n", "step-4": "from django.contrib import admin\nfrom main_app.models import sites, statuses, redirects\n# Register your models here.\nadmin.site.register(statuses)\nadmin.site.register(sites)\nadmin.site.register(redirects)", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from glob import glob from PIL import Image import numpy as np from tqdm import tqdm import cv2 import os import matplotlib.pyplot as plt np.set_printoptions(precision=3, suppress=True) def get_index(path): """ get the length of index for voc2012 dataset. path: the index of train,val or test path """ with open(path,'r') as f: zz = f.readlines() return [index.split("\n")[0] for index in zz] def show_examples(images_base, labels_base, index_list, output_path): results= [] for index in tqdm(index_list): img = cv2.imread(os.path.join(images_base, index+".jpg")) # lab = cv2.imread(os.path.join(labels_base, index+".png"), 0) lab = np.array(Image.open(os.path.join(labels_base, index+".png")).convert('P')) results+= np.unique(lab).tolist() # # plt.figure(figsize=(4,2)) # plt.subplot(121) # plt.imshow(img) # plt.title("images") # plt.subplot(122) # plt.imshow(lab) # plt.title('label') # plt.tight_layout() # plt.savefig("%s/visual_%s.png"%(output_path, index), dpi=300) # plt.show() return list(set(results)) def get_info(label_dir): label_path = glob("%s/*" % label_dir) total_area = [] total_number = [] for label_name in tqdm(label_path): lab = np.array(Image.open(label_name).convert('P')) # print(lab.shape) masks = [(lab == v) for v in range(21)] # get each class area of images zz = np.mean(masks, axis =(1, 2)) total_area.append(zz.copy()) # get exist class of images zz[zz > 0] = 1 total_number.append(zz) print(np.sum(total_number, axis=0)) print(np.sum(total_area, axis=0)) if __name__=="__main__": import shutil output_dir = "visual_results" if os.path.exists(output_dir): shutil.rmtree(output_dir) os.makedirs(output_dir) index_dir = '/data/VOCdevkit/VOC2012/ImageSets/Segmentation' imge_dir = "/data/VOCdevkit/VOC2012/JPEGImages" label_dir = "/data/VOCdevkit/VOC2012/SegmentationClass" print("train_index:", len(get_index( os.path.join(index_dir, "train.txt") ) ) ) # 1464 print("val_index:", len( get_index( os.path.join(index_dir, "val.txt") ) ) ) # 1449 print("test_index:", len( get_index( os.path.join(index_dir, "test.txt") ) ) ) #1456 train_results= show_examples(imge_dir, label_dir, get_index(os.path.join(index_dir, "train.txt")), output_dir) train_results.sort() print("train label:", len(train_results), train_results) get_info(label_dir) """ train label: 20 [0, 14, 19, 33, 37, 38, 52, 57, 72, 75, 89, 94, 108, 112, 113, 128, 132, 147, 150, 220] number of each class: [2903. 178. 144. 208. 150. 183. 152. 255. 250. 271. 135. 157. 249. 147. 157. 888. 167. 120. 183. 167. 157.] are of each class: [2019.413 21.703 8.608 23.93 16.14 19.298 49.044 40.491 68.606 27.83 28.275 33.941 51.712 27.909 30.196 139.84 16.282 22.923 39.572 44.975 22.053] """
normal
{ "blob_id": "b1b478965ad939a98478b19b4a94f3250167e25a", "index": 2189, "step-1": "<mask token>\n\n\ndef show_examples(images_base, labels_base, index_list, output_path):\n results = []\n for index in tqdm(index_list):\n img = cv2.imread(os.path.join(images_base, index + '.jpg'))\n lab = np.array(Image.open(os.path.join(labels_base, index + '.png')\n ).convert('P'))\n results += np.unique(lab).tolist()\n return list(set(results))\n\n\ndef get_info(label_dir):\n label_path = glob('%s/*' % label_dir)\n total_area = []\n total_number = []\n for label_name in tqdm(label_path):\n lab = np.array(Image.open(label_name).convert('P'))\n masks = [(lab == v) for v in range(21)]\n zz = np.mean(masks, axis=(1, 2))\n total_area.append(zz.copy())\n zz[zz > 0] = 1\n total_number.append(zz)\n print(np.sum(total_number, axis=0))\n print(np.sum(total_area, axis=0))\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef get_index(path):\n \"\"\"\n get the length of index for voc2012 dataset.\n path: the index of train,val or test path\n \"\"\"\n with open(path, 'r') as f:\n zz = f.readlines()\n return [index.split('\\n')[0] for index in zz]\n\n\ndef show_examples(images_base, labels_base, index_list, output_path):\n results = []\n for index in tqdm(index_list):\n img = cv2.imread(os.path.join(images_base, index + '.jpg'))\n lab = np.array(Image.open(os.path.join(labels_base, index + '.png')\n ).convert('P'))\n results += np.unique(lab).tolist()\n return list(set(results))\n\n\ndef get_info(label_dir):\n label_path = glob('%s/*' % label_dir)\n total_area = []\n total_number = []\n for label_name in tqdm(label_path):\n lab = np.array(Image.open(label_name).convert('P'))\n masks = [(lab == v) for v in range(21)]\n zz = np.mean(masks, axis=(1, 2))\n total_area.append(zz.copy())\n zz[zz > 0] = 1\n total_number.append(zz)\n print(np.sum(total_number, axis=0))\n print(np.sum(total_area, axis=0))\n\n\n<mask token>\n", "step-3": "<mask token>\nnp.set_printoptions(precision=3, suppress=True)\n\n\ndef get_index(path):\n \"\"\"\n get the length of index for voc2012 dataset.\n path: the index of train,val or test path\n \"\"\"\n with open(path, 'r') as f:\n zz = f.readlines()\n return [index.split('\\n')[0] for index in zz]\n\n\ndef show_examples(images_base, labels_base, index_list, output_path):\n results = []\n for index in tqdm(index_list):\n img = cv2.imread(os.path.join(images_base, index + '.jpg'))\n lab = np.array(Image.open(os.path.join(labels_base, index + '.png')\n ).convert('P'))\n results += np.unique(lab).tolist()\n return list(set(results))\n\n\ndef get_info(label_dir):\n label_path = glob('%s/*' % label_dir)\n total_area = []\n total_number = []\n for label_name in tqdm(label_path):\n lab = np.array(Image.open(label_name).convert('P'))\n masks = [(lab == v) for v in range(21)]\n zz = np.mean(masks, axis=(1, 2))\n total_area.append(zz.copy())\n zz[zz > 0] = 1\n total_number.append(zz)\n print(np.sum(total_number, axis=0))\n print(np.sum(total_area, axis=0))\n\n\nif __name__ == '__main__':\n import shutil\n output_dir = 'visual_results'\n if os.path.exists(output_dir):\n shutil.rmtree(output_dir)\n os.makedirs(output_dir)\n index_dir = '/data/VOCdevkit/VOC2012/ImageSets/Segmentation'\n imge_dir = '/data/VOCdevkit/VOC2012/JPEGImages'\n label_dir = '/data/VOCdevkit/VOC2012/SegmentationClass'\n print('train_index:', len(get_index(os.path.join(index_dir, 'train.txt'))))\n print('val_index:', len(get_index(os.path.join(index_dir, 'val.txt'))))\n print('test_index:', len(get_index(os.path.join(index_dir, 'test.txt'))))\n train_results = show_examples(imge_dir, label_dir, get_index(os.path.\n join(index_dir, 'train.txt')), output_dir)\n train_results.sort()\n print('train label:', len(train_results), train_results)\n get_info(label_dir)\n<mask token>\n", "step-4": "from glob import glob\nfrom PIL import Image\nimport numpy as np\nfrom tqdm import tqdm\nimport cv2\nimport os\nimport matplotlib.pyplot as plt\nnp.set_printoptions(precision=3, suppress=True)\n\n\ndef get_index(path):\n \"\"\"\n get the length of index for voc2012 dataset.\n path: the index of train,val or test path\n \"\"\"\n with open(path, 'r') as f:\n zz = f.readlines()\n return [index.split('\\n')[0] for index in zz]\n\n\ndef show_examples(images_base, labels_base, index_list, output_path):\n results = []\n for index in tqdm(index_list):\n img = cv2.imread(os.path.join(images_base, index + '.jpg'))\n lab = np.array(Image.open(os.path.join(labels_base, index + '.png')\n ).convert('P'))\n results += np.unique(lab).tolist()\n return list(set(results))\n\n\ndef get_info(label_dir):\n label_path = glob('%s/*' % label_dir)\n total_area = []\n total_number = []\n for label_name in tqdm(label_path):\n lab = np.array(Image.open(label_name).convert('P'))\n masks = [(lab == v) for v in range(21)]\n zz = np.mean(masks, axis=(1, 2))\n total_area.append(zz.copy())\n zz[zz > 0] = 1\n total_number.append(zz)\n print(np.sum(total_number, axis=0))\n print(np.sum(total_area, axis=0))\n\n\nif __name__ == '__main__':\n import shutil\n output_dir = 'visual_results'\n if os.path.exists(output_dir):\n shutil.rmtree(output_dir)\n os.makedirs(output_dir)\n index_dir = '/data/VOCdevkit/VOC2012/ImageSets/Segmentation'\n imge_dir = '/data/VOCdevkit/VOC2012/JPEGImages'\n label_dir = '/data/VOCdevkit/VOC2012/SegmentationClass'\n print('train_index:', len(get_index(os.path.join(index_dir, 'train.txt'))))\n print('val_index:', len(get_index(os.path.join(index_dir, 'val.txt'))))\n print('test_index:', len(get_index(os.path.join(index_dir, 'test.txt'))))\n train_results = show_examples(imge_dir, label_dir, get_index(os.path.\n join(index_dir, 'train.txt')), output_dir)\n train_results.sort()\n print('train label:', len(train_results), train_results)\n get_info(label_dir)\n<mask token>\n", "step-5": "from glob import glob\nfrom PIL import Image\nimport numpy as np\nfrom tqdm import tqdm\nimport cv2\nimport os\nimport matplotlib.pyplot as plt\n\nnp.set_printoptions(precision=3, suppress=True)\n\n\ndef get_index(path):\n \"\"\"\n get the length of index for voc2012 dataset.\n path: the index of train,val or test path\n \"\"\"\n with open(path,'r') as f:\n zz = f.readlines()\n return [index.split(\"\\n\")[0] for index in zz]\n\n\ndef show_examples(images_base, labels_base, index_list, output_path):\n results= []\n for index in tqdm(index_list):\n img = cv2.imread(os.path.join(images_base, index+\".jpg\"))\n # lab = cv2.imread(os.path.join(labels_base, index+\".png\"), 0)\n lab = np.array(Image.open(os.path.join(labels_base, index+\".png\")).convert('P'))\n results+= np.unique(lab).tolist()\n #\n # plt.figure(figsize=(4,2))\n # plt.subplot(121)\n # plt.imshow(img)\n # plt.title(\"images\")\n # plt.subplot(122)\n # plt.imshow(lab)\n # plt.title('label')\n # plt.tight_layout()\n # plt.savefig(\"%s/visual_%s.png\"%(output_path, index), dpi=300)\n # plt.show()\n\n return list(set(results))\n\n\ndef get_info(label_dir):\n label_path = glob(\"%s/*\" % label_dir)\n total_area = []\n total_number = []\n\n for label_name in tqdm(label_path):\n lab = np.array(Image.open(label_name).convert('P'))\n # print(lab.shape)\n masks = [(lab == v) for v in range(21)]\n # get each class area of images\n zz = np.mean(masks, axis =(1, 2))\n total_area.append(zz.copy())\n # get exist class of images\n zz[zz > 0] = 1\n total_number.append(zz)\n\n print(np.sum(total_number, axis=0))\n print(np.sum(total_area, axis=0))\n\n\nif __name__==\"__main__\":\n\n import shutil\n output_dir = \"visual_results\"\n if os.path.exists(output_dir):\n shutil.rmtree(output_dir)\n os.makedirs(output_dir)\n\n index_dir = '/data/VOCdevkit/VOC2012/ImageSets/Segmentation'\n imge_dir = \"/data/VOCdevkit/VOC2012/JPEGImages\"\n label_dir = \"/data/VOCdevkit/VOC2012/SegmentationClass\"\n print(\"train_index:\", len(get_index( os.path.join(index_dir, \"train.txt\") ) ) ) # 1464\n print(\"val_index:\", len( get_index( os.path.join(index_dir, \"val.txt\") ) ) ) # 1449\n print(\"test_index:\", len( get_index( os.path.join(index_dir, \"test.txt\") ) ) ) #1456\n\n train_results= show_examples(imge_dir, label_dir, get_index(os.path.join(index_dir, \"train.txt\")), output_dir)\n train_results.sort()\n print(\"train label:\", len(train_results), train_results)\n get_info(label_dir)\n\n\n\"\"\"\ntrain label: 20 [0, 14, 19, 33, 37, 38, 52, 57, 72, 75, 89, 94, 108, 112, 113, 128, 132, 147, 150, 220]\n\nnumber of each class:\n[2903. 178. 144. 208. 150. 183. 152. 255. 250. 271. 135. 157. 249. 147. 157. 888. 167. 120. 183. 167. 157.]\n\nare of each class:\n[2019.413 21.703 8.608 23.93 16.14 19.298 49.044 40.491\n 68.606 27.83 28.275 33.941 51.712 27.909 30.196 139.84\n 16.282 22.923 39.572 44.975 22.053]\n\"\"\"", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
import daemon import time import sys #out = open("~/tmp/stdout", "a+") #err = open("~/tmp/stderr", "a+") # 如果设定为标准输出,那么关闭终端窗口,退出守护进程。 # Ctrl+c 不会退出进程 # 关闭终端窗口,退出守护进程 def do_main_program(): print("start the main program...") while True: time.sleep(1) print('another second passed') context = daemon.DaemonContext() context.stdout = sys.stdout context.stderr = sys.stderr with context: print("start the main program") do_main_program() print("end ")
normal
{ "blob_id": "3cb96607aaf58a7de3fa0a9cd61b7f4e3c6b061a", "index": 4802, "step-1": "<mask token>\n\n\ndef do_main_program():\n print('start the main program...')\n while True:\n time.sleep(1)\n print('another second passed')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef do_main_program():\n print('start the main program...')\n while True:\n time.sleep(1)\n print('another second passed')\n\n\n<mask token>\nwith context:\n print('start the main program')\n do_main_program()\nprint('end ')\n", "step-3": "<mask token>\n\n\ndef do_main_program():\n print('start the main program...')\n while True:\n time.sleep(1)\n print('another second passed')\n\n\ncontext = daemon.DaemonContext()\ncontext.stdout = sys.stdout\ncontext.stderr = sys.stderr\nwith context:\n print('start the main program')\n do_main_program()\nprint('end ')\n", "step-4": "import daemon\nimport time\nimport sys\n\n\ndef do_main_program():\n print('start the main program...')\n while True:\n time.sleep(1)\n print('another second passed')\n\n\ncontext = daemon.DaemonContext()\ncontext.stdout = sys.stdout\ncontext.stderr = sys.stderr\nwith context:\n print('start the main program')\n do_main_program()\nprint('end ')\n", "step-5": "import daemon\nimport time\nimport sys\n\n#out = open(\"~/tmp/stdout\", \"a+\")\n#err = open(\"~/tmp/stderr\", \"a+\")\n# 如果设定为标准输出,那么关闭终端窗口,退出守护进程。\n# Ctrl+c 不会退出进程\n# 关闭终端窗口,退出守护进程\n\ndef do_main_program():\n print(\"start the main program...\")\n while True:\n time.sleep(1)\n print('another second passed')\n\n\ncontext = daemon.DaemonContext()\n\ncontext.stdout = sys.stdout\ncontext.stderr = sys.stderr\n\n\n\nwith context:\n print(\"start the main program\")\n do_main_program()\n\nprint(\"end \")", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import cpt_tools from gui_helpers.gui_config import * chisqr_str = '\u03c72' mu_str = '\u03bc' sigma_str = '\u03c3' class FitWidget( object ) : def __init__( self, plotter_widget, analyzer = None ) : self.plotter_widget = plotter_widget self.plotter = plotter_widget.plotter self.hists = self.plotter.all_hists self.layout = QVBoxLayout() params_labels = [ 'A', mu_str, sigma_str, chisqr_str ] self.num_params = len( params_labels ) h_labels = [ '', '', 'Left', 'Right' ] h_labels.extend( params_labels ) v_labels = [ x.title for x in self.hists ] nrows = len( v_labels ) ncols = len( h_labels ) self.table = QTableWidget( nrows, ncols ) self.table.setMinimumWidth( 400 ) self.table.setMinimumHeight(100) # self.table.setMaximumHeight(200) # size_policy = QSizePolicy( QSizePolicy.Maximum, # QSizePolicy.Maximum ) # self.table.setSizePolicy( size_policy ) self.table.horizontalHeader().setSectionResizeMode( QHeaderView.Stretch ) self.table.verticalHeader().setSectionResizeMode( QHeaderView.Stretch ) # header = self.table.horizontalHeader() # header.setSectionResizeMode( 0, QHeaderView.Stretch ) # for j in range( 1, len( h_labels ) ) : # header.setSectionResizeMode( j, QHeaderView.ResizeToContents ) self.table.setHorizontalHeaderLabels( h_labels ) self.table.setVerticalHeaderLabels( v_labels ) self.bounds_entries = [] self.params_labels = [] self.fit_buttons = [] self.delete_buttons = [] for i in range( len( self.hists ) ) : self.bounds_entries.append( [ QLineEdit(), QLineEdit() ] ) self.params_labels.append( [ QLabel() for i in range( self.num_params ) ] ) self.fit_buttons.append( QPushButton( 'Fit' ) ) self.delete_buttons.append( QPushButton( 'Delete' ) ) self.fit_buttons[i].clicked.connect( lambda state, a=i : self.fit_button_clicked( a ) ) self.delete_buttons[i].clicked.connect( lambda state, a=i : self.delete_button_clicked( a ) ) # self.fit_buttons[i].clicked.emit() self.table.setCellWidget( i, 0, self.fit_buttons[i] ) self.table.setCellWidget( i, 1, self.delete_buttons[i] ) self.table.setCellWidget( i, 2, self.bounds_entries[i][0] ) self.table.setCellWidget( i, 3, self.bounds_entries[i][1] ) for j in range( self.num_params ) : self.table.setCellWidget( i, 4 + j, self.params_labels[i][j] ) # self.left_x_bound_entry.setMaximumWidth( PLOTTER_WIDGET_QLINEEDIT_WIDTH ) # self.right_x_bound_entry.setMaximumWidth( PLOTTER_WIDGET_QLINEEDIT_WIDTH ) # self.layout.setSpacing(0) # self.layout.addLayout( label_layout ) self.layout.addWidget( self.table ) def fit_button_clicked( self, i ) : print( self.bounds_entries[i][0].text() ) try : left_x_bound = float( self.bounds_entries[i][0].text() ) right_x_bound = float( self.bounds_entries[i][1].text() ) except : print( 'WARNING: please specify bounds for fit' ) return bounds = [ left_x_bound, right_x_bound ] fit = self.hists[i].apply_fit( bounds ) if fit is None : print( 'ERROR: fit failed' ) return self.set_fit_params( fit, i ) self.plotter.update_all() self.plotter_widget.reload_visualization_params() return fit def set_fit_params( self, fit, i ) : if fit is None : for j in range( self.num_params ) : self.params_labels[i][j].setText( '' ) return params = fit.params params_errors = fit.params_errors redchisqr = fit.redchisqr # params, params_errors, redchisqr = fit if params_errors is not None : labels = [ '%.1f\u00b1%.1f' % ( params[j], params_errors[j] ) for j in range( len(params) ) ] else : labels = [ '%.1f' % params[j] for j in range( len(params) ) ] labels.append( '%.1f' % redchisqr ) for j in range( len(params) + 1 ) : self.params_labels[i][j].setText( labels[j] ) def delete_button_clicked( self, i ) : self.hists[i].remove_fit()
normal
{ "blob_id": "aa51b2d4bfe4051f3302d14cf2123a3881a8a2e3", "index": 5668, "step-1": "<mask token>\n\n\nclass FitWidget(object):\n\n def __init__(self, plotter_widget, analyzer=None):\n self.plotter_widget = plotter_widget\n self.plotter = plotter_widget.plotter\n self.hists = self.plotter.all_hists\n self.layout = QVBoxLayout()\n params_labels = ['A', mu_str, sigma_str, chisqr_str]\n self.num_params = len(params_labels)\n h_labels = ['', '', 'Left', 'Right']\n h_labels.extend(params_labels)\n v_labels = [x.title for x in self.hists]\n nrows = len(v_labels)\n ncols = len(h_labels)\n self.table = QTableWidget(nrows, ncols)\n self.table.setMinimumWidth(400)\n self.table.setMinimumHeight(100)\n self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.table.verticalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.table.setHorizontalHeaderLabels(h_labels)\n self.table.setVerticalHeaderLabels(v_labels)\n self.bounds_entries = []\n self.params_labels = []\n self.fit_buttons = []\n self.delete_buttons = []\n for i in range(len(self.hists)):\n self.bounds_entries.append([QLineEdit(), QLineEdit()])\n self.params_labels.append([QLabel() for i in range(self.\n num_params)])\n self.fit_buttons.append(QPushButton('Fit'))\n self.delete_buttons.append(QPushButton('Delete'))\n self.fit_buttons[i].clicked.connect(lambda state, a=i: self.\n fit_button_clicked(a))\n self.delete_buttons[i].clicked.connect(lambda state, a=i: self.\n delete_button_clicked(a))\n self.table.setCellWidget(i, 0, self.fit_buttons[i])\n self.table.setCellWidget(i, 1, self.delete_buttons[i])\n self.table.setCellWidget(i, 2, self.bounds_entries[i][0])\n self.table.setCellWidget(i, 3, self.bounds_entries[i][1])\n for j in range(self.num_params):\n self.table.setCellWidget(i, 4 + j, self.params_labels[i][j])\n self.layout.addWidget(self.table)\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass FitWidget(object):\n\n def __init__(self, plotter_widget, analyzer=None):\n self.plotter_widget = plotter_widget\n self.plotter = plotter_widget.plotter\n self.hists = self.plotter.all_hists\n self.layout = QVBoxLayout()\n params_labels = ['A', mu_str, sigma_str, chisqr_str]\n self.num_params = len(params_labels)\n h_labels = ['', '', 'Left', 'Right']\n h_labels.extend(params_labels)\n v_labels = [x.title for x in self.hists]\n nrows = len(v_labels)\n ncols = len(h_labels)\n self.table = QTableWidget(nrows, ncols)\n self.table.setMinimumWidth(400)\n self.table.setMinimumHeight(100)\n self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.table.verticalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.table.setHorizontalHeaderLabels(h_labels)\n self.table.setVerticalHeaderLabels(v_labels)\n self.bounds_entries = []\n self.params_labels = []\n self.fit_buttons = []\n self.delete_buttons = []\n for i in range(len(self.hists)):\n self.bounds_entries.append([QLineEdit(), QLineEdit()])\n self.params_labels.append([QLabel() for i in range(self.\n num_params)])\n self.fit_buttons.append(QPushButton('Fit'))\n self.delete_buttons.append(QPushButton('Delete'))\n self.fit_buttons[i].clicked.connect(lambda state, a=i: self.\n fit_button_clicked(a))\n self.delete_buttons[i].clicked.connect(lambda state, a=i: self.\n delete_button_clicked(a))\n self.table.setCellWidget(i, 0, self.fit_buttons[i])\n self.table.setCellWidget(i, 1, self.delete_buttons[i])\n self.table.setCellWidget(i, 2, self.bounds_entries[i][0])\n self.table.setCellWidget(i, 3, self.bounds_entries[i][1])\n for j in range(self.num_params):\n self.table.setCellWidget(i, 4 + j, self.params_labels[i][j])\n self.layout.addWidget(self.table)\n\n def fit_button_clicked(self, i):\n print(self.bounds_entries[i][0].text())\n try:\n left_x_bound = float(self.bounds_entries[i][0].text())\n right_x_bound = float(self.bounds_entries[i][1].text())\n except:\n print('WARNING: please specify bounds for fit')\n return\n bounds = [left_x_bound, right_x_bound]\n fit = self.hists[i].apply_fit(bounds)\n if fit is None:\n print('ERROR: fit failed')\n return\n self.set_fit_params(fit, i)\n self.plotter.update_all()\n self.plotter_widget.reload_visualization_params()\n return fit\n\n def set_fit_params(self, fit, i):\n if fit is None:\n for j in range(self.num_params):\n self.params_labels[i][j].setText('')\n return\n params = fit.params\n params_errors = fit.params_errors\n redchisqr = fit.redchisqr\n if params_errors is not None:\n labels = [('%.1f±%.1f' % (params[j], params_errors[j])) for j in\n range(len(params))]\n else:\n labels = [('%.1f' % params[j]) for j in range(len(params))]\n labels.append('%.1f' % redchisqr)\n for j in range(len(params) + 1):\n self.params_labels[i][j].setText(labels[j])\n\n def delete_button_clicked(self, i):\n self.hists[i].remove_fit()\n", "step-3": "<mask token>\nchisqr_str = 'χ2'\nmu_str = 'μ'\nsigma_str = 'σ'\n\n\nclass FitWidget(object):\n\n def __init__(self, plotter_widget, analyzer=None):\n self.plotter_widget = plotter_widget\n self.plotter = plotter_widget.plotter\n self.hists = self.plotter.all_hists\n self.layout = QVBoxLayout()\n params_labels = ['A', mu_str, sigma_str, chisqr_str]\n self.num_params = len(params_labels)\n h_labels = ['', '', 'Left', 'Right']\n h_labels.extend(params_labels)\n v_labels = [x.title for x in self.hists]\n nrows = len(v_labels)\n ncols = len(h_labels)\n self.table = QTableWidget(nrows, ncols)\n self.table.setMinimumWidth(400)\n self.table.setMinimumHeight(100)\n self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.table.verticalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.table.setHorizontalHeaderLabels(h_labels)\n self.table.setVerticalHeaderLabels(v_labels)\n self.bounds_entries = []\n self.params_labels = []\n self.fit_buttons = []\n self.delete_buttons = []\n for i in range(len(self.hists)):\n self.bounds_entries.append([QLineEdit(), QLineEdit()])\n self.params_labels.append([QLabel() for i in range(self.\n num_params)])\n self.fit_buttons.append(QPushButton('Fit'))\n self.delete_buttons.append(QPushButton('Delete'))\n self.fit_buttons[i].clicked.connect(lambda state, a=i: self.\n fit_button_clicked(a))\n self.delete_buttons[i].clicked.connect(lambda state, a=i: self.\n delete_button_clicked(a))\n self.table.setCellWidget(i, 0, self.fit_buttons[i])\n self.table.setCellWidget(i, 1, self.delete_buttons[i])\n self.table.setCellWidget(i, 2, self.bounds_entries[i][0])\n self.table.setCellWidget(i, 3, self.bounds_entries[i][1])\n for j in range(self.num_params):\n self.table.setCellWidget(i, 4 + j, self.params_labels[i][j])\n self.layout.addWidget(self.table)\n\n def fit_button_clicked(self, i):\n print(self.bounds_entries[i][0].text())\n try:\n left_x_bound = float(self.bounds_entries[i][0].text())\n right_x_bound = float(self.bounds_entries[i][1].text())\n except:\n print('WARNING: please specify bounds for fit')\n return\n bounds = [left_x_bound, right_x_bound]\n fit = self.hists[i].apply_fit(bounds)\n if fit is None:\n print('ERROR: fit failed')\n return\n self.set_fit_params(fit, i)\n self.plotter.update_all()\n self.plotter_widget.reload_visualization_params()\n return fit\n\n def set_fit_params(self, fit, i):\n if fit is None:\n for j in range(self.num_params):\n self.params_labels[i][j].setText('')\n return\n params = fit.params\n params_errors = fit.params_errors\n redchisqr = fit.redchisqr\n if params_errors is not None:\n labels = [('%.1f±%.1f' % (params[j], params_errors[j])) for j in\n range(len(params))]\n else:\n labels = [('%.1f' % params[j]) for j in range(len(params))]\n labels.append('%.1f' % redchisqr)\n for j in range(len(params) + 1):\n self.params_labels[i][j].setText(labels[j])\n\n def delete_button_clicked(self, i):\n self.hists[i].remove_fit()\n", "step-4": "import cpt_tools\nfrom gui_helpers.gui_config import *\nchisqr_str = 'χ2'\nmu_str = 'μ'\nsigma_str = 'σ'\n\n\nclass FitWidget(object):\n\n def __init__(self, plotter_widget, analyzer=None):\n self.plotter_widget = plotter_widget\n self.plotter = plotter_widget.plotter\n self.hists = self.plotter.all_hists\n self.layout = QVBoxLayout()\n params_labels = ['A', mu_str, sigma_str, chisqr_str]\n self.num_params = len(params_labels)\n h_labels = ['', '', 'Left', 'Right']\n h_labels.extend(params_labels)\n v_labels = [x.title for x in self.hists]\n nrows = len(v_labels)\n ncols = len(h_labels)\n self.table = QTableWidget(nrows, ncols)\n self.table.setMinimumWidth(400)\n self.table.setMinimumHeight(100)\n self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.table.verticalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.table.setHorizontalHeaderLabels(h_labels)\n self.table.setVerticalHeaderLabels(v_labels)\n self.bounds_entries = []\n self.params_labels = []\n self.fit_buttons = []\n self.delete_buttons = []\n for i in range(len(self.hists)):\n self.bounds_entries.append([QLineEdit(), QLineEdit()])\n self.params_labels.append([QLabel() for i in range(self.\n num_params)])\n self.fit_buttons.append(QPushButton('Fit'))\n self.delete_buttons.append(QPushButton('Delete'))\n self.fit_buttons[i].clicked.connect(lambda state, a=i: self.\n fit_button_clicked(a))\n self.delete_buttons[i].clicked.connect(lambda state, a=i: self.\n delete_button_clicked(a))\n self.table.setCellWidget(i, 0, self.fit_buttons[i])\n self.table.setCellWidget(i, 1, self.delete_buttons[i])\n self.table.setCellWidget(i, 2, self.bounds_entries[i][0])\n self.table.setCellWidget(i, 3, self.bounds_entries[i][1])\n for j in range(self.num_params):\n self.table.setCellWidget(i, 4 + j, self.params_labels[i][j])\n self.layout.addWidget(self.table)\n\n def fit_button_clicked(self, i):\n print(self.bounds_entries[i][0].text())\n try:\n left_x_bound = float(self.bounds_entries[i][0].text())\n right_x_bound = float(self.bounds_entries[i][1].text())\n except:\n print('WARNING: please specify bounds for fit')\n return\n bounds = [left_x_bound, right_x_bound]\n fit = self.hists[i].apply_fit(bounds)\n if fit is None:\n print('ERROR: fit failed')\n return\n self.set_fit_params(fit, i)\n self.plotter.update_all()\n self.plotter_widget.reload_visualization_params()\n return fit\n\n def set_fit_params(self, fit, i):\n if fit is None:\n for j in range(self.num_params):\n self.params_labels[i][j].setText('')\n return\n params = fit.params\n params_errors = fit.params_errors\n redchisqr = fit.redchisqr\n if params_errors is not None:\n labels = [('%.1f±%.1f' % (params[j], params_errors[j])) for j in\n range(len(params))]\n else:\n labels = [('%.1f' % params[j]) for j in range(len(params))]\n labels.append('%.1f' % redchisqr)\n for j in range(len(params) + 1):\n self.params_labels[i][j].setText(labels[j])\n\n def delete_button_clicked(self, i):\n self.hists[i].remove_fit()\n", "step-5": "import cpt_tools\nfrom gui_helpers.gui_config import * \n\n\nchisqr_str = '\\u03c72'\nmu_str = '\\u03bc'\nsigma_str = '\\u03c3'\n\n\n\nclass FitWidget( object ) :\n\n def __init__( self, plotter_widget, analyzer = None ) :\n\n self.plotter_widget = plotter_widget \n self.plotter = plotter_widget.plotter\n self.hists = self.plotter.all_hists\n \n self.layout = QVBoxLayout()\n\n params_labels = [ 'A', mu_str, sigma_str, chisqr_str ]\n self.num_params = len( params_labels ) \n\n h_labels = [ '', '', 'Left', 'Right' ]\n h_labels.extend( params_labels ) \n v_labels = [ x.title for x in self.hists ] \n \n nrows = len( v_labels )\n ncols = len( h_labels ) \n\n self.table = QTableWidget( nrows, ncols ) \n self.table.setMinimumWidth( 400 ) \n self.table.setMinimumHeight(100)\n # self.table.setMaximumHeight(200)\n # size_policy = QSizePolicy( QSizePolicy.Maximum,\n # QSizePolicy.Maximum )\n \n # self.table.setSizePolicy( size_policy )\n \n self.table.horizontalHeader().setSectionResizeMode( QHeaderView.Stretch ) \n self.table.verticalHeader().setSectionResizeMode( QHeaderView.Stretch )\n # header = self.table.horizontalHeader() \n # header.setSectionResizeMode( 0, QHeaderView.Stretch )\n # for j in range( 1, len( h_labels ) ) : \n # header.setSectionResizeMode( j, QHeaderView.ResizeToContents )\n \n self.table.setHorizontalHeaderLabels( h_labels )\n self.table.setVerticalHeaderLabels( v_labels )\n\n self.bounds_entries = [] \n self.params_labels = []\n self.fit_buttons = []\n self.delete_buttons = [] \n \n for i in range( len( self.hists ) ) :\n \n self.bounds_entries.append( [ QLineEdit(), QLineEdit() ] )\n self.params_labels.append( [ QLabel() for i in range( self.num_params ) ] )\n\n self.fit_buttons.append( QPushButton( 'Fit' ) )\n self.delete_buttons.append( QPushButton( 'Delete' ) )\n \n self.fit_buttons[i].clicked.connect( lambda state, a=i : self.fit_button_clicked( a ) )\n self.delete_buttons[i].clicked.connect( lambda state, a=i : self.delete_button_clicked( a ) )\n # self.fit_buttons[i].clicked.emit() \n\n self.table.setCellWidget( i, 0, self.fit_buttons[i] )\n self.table.setCellWidget( i, 1, self.delete_buttons[i] ) \n\n self.table.setCellWidget( i, 2, self.bounds_entries[i][0] )\n self.table.setCellWidget( i, 3, self.bounds_entries[i][1] )\n\n for j in range( self.num_params ) :\n self.table.setCellWidget( i, 4 + j, self.params_labels[i][j] )\n\n # self.left_x_bound_entry.setMaximumWidth( PLOTTER_WIDGET_QLINEEDIT_WIDTH ) \n # self.right_x_bound_entry.setMaximumWidth( PLOTTER_WIDGET_QLINEEDIT_WIDTH ) \n \n # self.layout.setSpacing(0)\n # self.layout.addLayout( label_layout ) \n\n self.layout.addWidget( self.table )\n \n\n \n def fit_button_clicked( self, i ) :\n\n print( self.bounds_entries[i][0].text() )\n \n try : \n left_x_bound = float( self.bounds_entries[i][0].text() )\n right_x_bound = float( self.bounds_entries[i][1].text() )\n except :\n print( 'WARNING: please specify bounds for fit' )\n return\n\n bounds = [ left_x_bound, right_x_bound ] \n fit = self.hists[i].apply_fit( bounds ) \n if fit is None :\n print( 'ERROR: fit failed' ) \n return\n\n self.set_fit_params( fit, i ) \n self.plotter.update_all()\n self.plotter_widget.reload_visualization_params()\n return fit \n\n\n def set_fit_params( self, fit, i ) :\n\n if fit is None :\n for j in range( self.num_params ) :\n self.params_labels[i][j].setText( '' )\n return \n \n params = fit.params \n params_errors = fit.params_errors\n redchisqr = fit.redchisqr\n \n # params, params_errors, redchisqr = fit\n \n if params_errors is not None : \n labels = [ '%.1f\\u00b1%.1f' % ( params[j], params_errors[j] ) for j in range( len(params) ) ]\n else :\n labels = [ '%.1f' % params[j] for j in range( len(params) ) ]\n \n labels.append( '%.1f' % redchisqr )\n for j in range( len(params) + 1 ) : \n self.params_labels[i][j].setText( labels[j] )\n\n \n \n\n def delete_button_clicked( self, i ) :\n self.hists[i].remove_fit() \n", "step-ids": [ 2, 5, 6, 7, 8 ] }
[ 2, 5, 6, 7, 8 ]
# coding: utf-8 """ Adobe Experience Manager OSGI config (AEM) API Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API # noqa: E501 OpenAPI spec version: 1.0.0-pre.0 Contact: [email protected] Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six class OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'token_expiration': 'ConfigNodePropertyString', 'token_length': 'ConfigNodePropertyString', 'token_refresh': 'ConfigNodePropertyBoolean', 'token_cleanup_threshold': 'ConfigNodePropertyInteger', 'password_hash_algorithm': 'ConfigNodePropertyString', 'password_hash_iterations': 'ConfigNodePropertyInteger', 'password_salt_size': 'ConfigNodePropertyInteger' } attribute_map = { 'token_expiration': 'tokenExpiration', 'token_length': 'tokenLength', 'token_refresh': 'tokenRefresh', 'token_cleanup_threshold': 'tokenCleanupThreshold', 'password_hash_algorithm': 'passwordHashAlgorithm', 'password_hash_iterations': 'passwordHashIterations', 'password_salt_size': 'passwordSaltSize' } def __init__(self, token_expiration=None, token_length=None, token_refresh=None, token_cleanup_threshold=None, password_hash_algorithm=None, password_hash_iterations=None, password_salt_size=None): # noqa: E501 """OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties - a model defined in OpenAPI""" # noqa: E501 self._token_expiration = None self._token_length = None self._token_refresh = None self._token_cleanup_threshold = None self._password_hash_algorithm = None self._password_hash_iterations = None self._password_salt_size = None self.discriminator = None if token_expiration is not None: self.token_expiration = token_expiration if token_length is not None: self.token_length = token_length if token_refresh is not None: self.token_refresh = token_refresh if token_cleanup_threshold is not None: self.token_cleanup_threshold = token_cleanup_threshold if password_hash_algorithm is not None: self.password_hash_algorithm = password_hash_algorithm if password_hash_iterations is not None: self.password_hash_iterations = password_hash_iterations if password_salt_size is not None: self.password_salt_size = password_salt_size @property def token_expiration(self): """Gets the token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501 :return: The token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501 :rtype: ConfigNodePropertyString """ return self._token_expiration @token_expiration.setter def token_expiration(self, token_expiration): """Sets the token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. :param token_expiration: The token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501 :type: ConfigNodePropertyString """ self._token_expiration = token_expiration @property def token_length(self): """Gets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501 :return: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501 :rtype: ConfigNodePropertyString """ return self._token_length @token_length.setter def token_length(self, token_length): """Sets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. :param token_length: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501 :type: ConfigNodePropertyString """ self._token_length = token_length @property def token_refresh(self): """Gets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501 :return: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501 :rtype: ConfigNodePropertyBoolean """ return self._token_refresh @token_refresh.setter def token_refresh(self, token_refresh): """Sets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. :param token_refresh: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501 :type: ConfigNodePropertyBoolean """ self._token_refresh = token_refresh @property def token_cleanup_threshold(self): """Gets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501 :return: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501 :rtype: ConfigNodePropertyInteger """ return self._token_cleanup_threshold @token_cleanup_threshold.setter def token_cleanup_threshold(self, token_cleanup_threshold): """Sets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. :param token_cleanup_threshold: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501 :type: ConfigNodePropertyInteger """ self._token_cleanup_threshold = token_cleanup_threshold @property def password_hash_algorithm(self): """Gets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501 :return: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501 :rtype: ConfigNodePropertyString """ return self._password_hash_algorithm @password_hash_algorithm.setter def password_hash_algorithm(self, password_hash_algorithm): """Sets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. :param password_hash_algorithm: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501 :type: ConfigNodePropertyString """ self._password_hash_algorithm = password_hash_algorithm @property def password_hash_iterations(self): """Gets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501 :return: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501 :rtype: ConfigNodePropertyInteger """ return self._password_hash_iterations @password_hash_iterations.setter def password_hash_iterations(self, password_hash_iterations): """Sets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. :param password_hash_iterations: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501 :type: ConfigNodePropertyInteger """ self._password_hash_iterations = password_hash_iterations @property def password_salt_size(self): """Gets the password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501 :return: The password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501 :rtype: ConfigNodePropertyInteger """ return self._password_salt_size @password_salt_size.setter def password_salt_size(self, password_salt_size): """Sets the password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. :param password_salt_size: The password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501 :type: ConfigNodePropertyInteger """ self._password_salt_size = password_salt_size def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
normal
{ "blob_id": "0ddac0aac5bd001504ed37d31b74c6442304e350", "index": 5729, "step-1": "<mask token>\n\n\nclass OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties(\n object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def token_length(self):\n \"\"\"Gets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyString\n \"\"\"\n return self._token_length\n\n @token_length.setter\n def token_length(self, token_length):\n \"\"\"Sets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_length: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyString\n \"\"\"\n self._token_length = token_length\n\n @property\n def token_refresh(self):\n \"\"\"Gets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyBoolean\n \"\"\"\n return self._token_refresh\n <mask token>\n <mask token>\n\n @token_cleanup_threshold.setter\n def token_cleanup_threshold(self, token_cleanup_threshold):\n \"\"\"Sets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_cleanup_threshold: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyInteger\n \"\"\"\n self._token_cleanup_threshold = token_cleanup_threshold\n <mask token>\n\n @password_hash_algorithm.setter\n def password_hash_algorithm(self, password_hash_algorithm):\n \"\"\"Sets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param password_hash_algorithm: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyString\n \"\"\"\n self._password_hash_algorithm = password_hash_algorithm\n\n @property\n def password_hash_iterations(self):\n \"\"\"Gets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyInteger\n \"\"\"\n return self._password_hash_iterations\n\n @password_hash_iterations.setter\n def password_hash_iterations(self, password_hash_iterations):\n \"\"\"Sets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param password_hash_iterations: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyInteger\n \"\"\"\n self._password_hash_iterations = password_hash_iterations\n <mask token>\n <mask token>\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n <mask token>\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n", "step-2": "<mask token>\n\n\nclass OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties(\n object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, token_expiration=None, token_length=None,\n token_refresh=None, token_cleanup_threshold=None,\n password_hash_algorithm=None, password_hash_iterations=None,\n password_salt_size=None):\n \"\"\"OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties - a model defined in OpenAPI\"\"\"\n self._token_expiration = None\n self._token_length = None\n self._token_refresh = None\n self._token_cleanup_threshold = None\n self._password_hash_algorithm = None\n self._password_hash_iterations = None\n self._password_salt_size = None\n self.discriminator = None\n if token_expiration is not None:\n self.token_expiration = token_expiration\n if token_length is not None:\n self.token_length = token_length\n if token_refresh is not None:\n self.token_refresh = token_refresh\n if token_cleanup_threshold is not None:\n self.token_cleanup_threshold = token_cleanup_threshold\n if password_hash_algorithm is not None:\n self.password_hash_algorithm = password_hash_algorithm\n if password_hash_iterations is not None:\n self.password_hash_iterations = password_hash_iterations\n if password_salt_size is not None:\n self.password_salt_size = password_salt_size\n\n @property\n def token_expiration(self):\n \"\"\"Gets the token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyString\n \"\"\"\n return self._token_expiration\n <mask token>\n\n @property\n def token_length(self):\n \"\"\"Gets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyString\n \"\"\"\n return self._token_length\n\n @token_length.setter\n def token_length(self, token_length):\n \"\"\"Sets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_length: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyString\n \"\"\"\n self._token_length = token_length\n\n @property\n def token_refresh(self):\n \"\"\"Gets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyBoolean\n \"\"\"\n return self._token_refresh\n\n @token_refresh.setter\n def token_refresh(self, token_refresh):\n \"\"\"Sets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_refresh: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyBoolean\n \"\"\"\n self._token_refresh = token_refresh\n\n @property\n def token_cleanup_threshold(self):\n \"\"\"Gets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyInteger\n \"\"\"\n return self._token_cleanup_threshold\n\n @token_cleanup_threshold.setter\n def token_cleanup_threshold(self, token_cleanup_threshold):\n \"\"\"Sets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_cleanup_threshold: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyInteger\n \"\"\"\n self._token_cleanup_threshold = token_cleanup_threshold\n\n @property\n def password_hash_algorithm(self):\n \"\"\"Gets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyString\n \"\"\"\n return self._password_hash_algorithm\n\n @password_hash_algorithm.setter\n def password_hash_algorithm(self, password_hash_algorithm):\n \"\"\"Sets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param password_hash_algorithm: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyString\n \"\"\"\n self._password_hash_algorithm = password_hash_algorithm\n\n @property\n def password_hash_iterations(self):\n \"\"\"Gets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyInteger\n \"\"\"\n return self._password_hash_iterations\n\n @password_hash_iterations.setter\n def password_hash_iterations(self, password_hash_iterations):\n \"\"\"Sets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param password_hash_iterations: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyInteger\n \"\"\"\n self._password_hash_iterations = password_hash_iterations\n <mask token>\n\n @password_salt_size.setter\n def password_salt_size(self, password_salt_size):\n \"\"\"Sets the password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param password_salt_size: The password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyInteger\n \"\"\"\n self._password_salt_size = password_salt_size\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n <mask token>\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n", "step-3": "<mask token>\n\n\nclass OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties(\n object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, token_expiration=None, token_length=None,\n token_refresh=None, token_cleanup_threshold=None,\n password_hash_algorithm=None, password_hash_iterations=None,\n password_salt_size=None):\n \"\"\"OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties - a model defined in OpenAPI\"\"\"\n self._token_expiration = None\n self._token_length = None\n self._token_refresh = None\n self._token_cleanup_threshold = None\n self._password_hash_algorithm = None\n self._password_hash_iterations = None\n self._password_salt_size = None\n self.discriminator = None\n if token_expiration is not None:\n self.token_expiration = token_expiration\n if token_length is not None:\n self.token_length = token_length\n if token_refresh is not None:\n self.token_refresh = token_refresh\n if token_cleanup_threshold is not None:\n self.token_cleanup_threshold = token_cleanup_threshold\n if password_hash_algorithm is not None:\n self.password_hash_algorithm = password_hash_algorithm\n if password_hash_iterations is not None:\n self.password_hash_iterations = password_hash_iterations\n if password_salt_size is not None:\n self.password_salt_size = password_salt_size\n\n @property\n def token_expiration(self):\n \"\"\"Gets the token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyString\n \"\"\"\n return self._token_expiration\n <mask token>\n\n @property\n def token_length(self):\n \"\"\"Gets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyString\n \"\"\"\n return self._token_length\n\n @token_length.setter\n def token_length(self, token_length):\n \"\"\"Sets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_length: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyString\n \"\"\"\n self._token_length = token_length\n\n @property\n def token_refresh(self):\n \"\"\"Gets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyBoolean\n \"\"\"\n return self._token_refresh\n\n @token_refresh.setter\n def token_refresh(self, token_refresh):\n \"\"\"Sets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_refresh: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyBoolean\n \"\"\"\n self._token_refresh = token_refresh\n\n @property\n def token_cleanup_threshold(self):\n \"\"\"Gets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyInteger\n \"\"\"\n return self._token_cleanup_threshold\n\n @token_cleanup_threshold.setter\n def token_cleanup_threshold(self, token_cleanup_threshold):\n \"\"\"Sets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_cleanup_threshold: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyInteger\n \"\"\"\n self._token_cleanup_threshold = token_cleanup_threshold\n\n @property\n def password_hash_algorithm(self):\n \"\"\"Gets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyString\n \"\"\"\n return self._password_hash_algorithm\n\n @password_hash_algorithm.setter\n def password_hash_algorithm(self, password_hash_algorithm):\n \"\"\"Sets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param password_hash_algorithm: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyString\n \"\"\"\n self._password_hash_algorithm = password_hash_algorithm\n\n @property\n def password_hash_iterations(self):\n \"\"\"Gets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyInteger\n \"\"\"\n return self._password_hash_iterations\n\n @password_hash_iterations.setter\n def password_hash_iterations(self, password_hash_iterations):\n \"\"\"Sets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param password_hash_iterations: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyInteger\n \"\"\"\n self._password_hash_iterations = password_hash_iterations\n\n @property\n def password_salt_size(self):\n \"\"\"Gets the password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyInteger\n \"\"\"\n return self._password_salt_size\n\n @password_salt_size.setter\n def password_salt_size(self, password_salt_size):\n \"\"\"Sets the password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param password_salt_size: The password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyInteger\n \"\"\"\n self._password_salt_size = password_salt_size\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n <mask token>\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n", "step-4": "<mask token>\n\n\nclass OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties(\n object):\n <mask token>\n <mask token>\n openapi_types = {'token_expiration': 'ConfigNodePropertyString',\n 'token_length': 'ConfigNodePropertyString', 'token_refresh':\n 'ConfigNodePropertyBoolean', 'token_cleanup_threshold':\n 'ConfigNodePropertyInteger', 'password_hash_algorithm':\n 'ConfigNodePropertyString', 'password_hash_iterations':\n 'ConfigNodePropertyInteger', 'password_salt_size':\n 'ConfigNodePropertyInteger'}\n attribute_map = {'token_expiration': 'tokenExpiration', 'token_length':\n 'tokenLength', 'token_refresh': 'tokenRefresh',\n 'token_cleanup_threshold': 'tokenCleanupThreshold',\n 'password_hash_algorithm': 'passwordHashAlgorithm',\n 'password_hash_iterations': 'passwordHashIterations',\n 'password_salt_size': 'passwordSaltSize'}\n\n def __init__(self, token_expiration=None, token_length=None,\n token_refresh=None, token_cleanup_threshold=None,\n password_hash_algorithm=None, password_hash_iterations=None,\n password_salt_size=None):\n \"\"\"OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties - a model defined in OpenAPI\"\"\"\n self._token_expiration = None\n self._token_length = None\n self._token_refresh = None\n self._token_cleanup_threshold = None\n self._password_hash_algorithm = None\n self._password_hash_iterations = None\n self._password_salt_size = None\n self.discriminator = None\n if token_expiration is not None:\n self.token_expiration = token_expiration\n if token_length is not None:\n self.token_length = token_length\n if token_refresh is not None:\n self.token_refresh = token_refresh\n if token_cleanup_threshold is not None:\n self.token_cleanup_threshold = token_cleanup_threshold\n if password_hash_algorithm is not None:\n self.password_hash_algorithm = password_hash_algorithm\n if password_hash_iterations is not None:\n self.password_hash_iterations = password_hash_iterations\n if password_salt_size is not None:\n self.password_salt_size = password_salt_size\n\n @property\n def token_expiration(self):\n \"\"\"Gets the token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyString\n \"\"\"\n return self._token_expiration\n\n @token_expiration.setter\n def token_expiration(self, token_expiration):\n \"\"\"Sets the token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_expiration: The token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyString\n \"\"\"\n self._token_expiration = token_expiration\n\n @property\n def token_length(self):\n \"\"\"Gets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyString\n \"\"\"\n return self._token_length\n\n @token_length.setter\n def token_length(self, token_length):\n \"\"\"Sets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_length: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyString\n \"\"\"\n self._token_length = token_length\n\n @property\n def token_refresh(self):\n \"\"\"Gets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyBoolean\n \"\"\"\n return self._token_refresh\n\n @token_refresh.setter\n def token_refresh(self, token_refresh):\n \"\"\"Sets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_refresh: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyBoolean\n \"\"\"\n self._token_refresh = token_refresh\n\n @property\n def token_cleanup_threshold(self):\n \"\"\"Gets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyInteger\n \"\"\"\n return self._token_cleanup_threshold\n\n @token_cleanup_threshold.setter\n def token_cleanup_threshold(self, token_cleanup_threshold):\n \"\"\"Sets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_cleanup_threshold: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyInteger\n \"\"\"\n self._token_cleanup_threshold = token_cleanup_threshold\n\n @property\n def password_hash_algorithm(self):\n \"\"\"Gets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyString\n \"\"\"\n return self._password_hash_algorithm\n\n @password_hash_algorithm.setter\n def password_hash_algorithm(self, password_hash_algorithm):\n \"\"\"Sets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param password_hash_algorithm: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyString\n \"\"\"\n self._password_hash_algorithm = password_hash_algorithm\n\n @property\n def password_hash_iterations(self):\n \"\"\"Gets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyInteger\n \"\"\"\n return self._password_hash_iterations\n\n @password_hash_iterations.setter\n def password_hash_iterations(self, password_hash_iterations):\n \"\"\"Sets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param password_hash_iterations: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyInteger\n \"\"\"\n self._password_hash_iterations = password_hash_iterations\n\n @property\n def password_salt_size(self):\n \"\"\"Gets the password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyInteger\n \"\"\"\n return self._password_salt_size\n\n @password_salt_size.setter\n def password_salt_size(self, password_salt_size):\n \"\"\"Sets the password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param password_salt_size: The password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyInteger\n \"\"\"\n self._password_salt_size = password_salt_size\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other,\n OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties\n ):\n return False\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n", "step-5": "# coding: utf-8\n\n\"\"\"\n Adobe Experience Manager OSGI config (AEM) API\n\n Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API # noqa: E501\n\n OpenAPI spec version: 1.0.0-pre.0\n Contact: [email protected]\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'token_expiration': 'ConfigNodePropertyString',\n 'token_length': 'ConfigNodePropertyString',\n 'token_refresh': 'ConfigNodePropertyBoolean',\n 'token_cleanup_threshold': 'ConfigNodePropertyInteger',\n 'password_hash_algorithm': 'ConfigNodePropertyString',\n 'password_hash_iterations': 'ConfigNodePropertyInteger',\n 'password_salt_size': 'ConfigNodePropertyInteger'\n }\n\n attribute_map = {\n 'token_expiration': 'tokenExpiration',\n 'token_length': 'tokenLength',\n 'token_refresh': 'tokenRefresh',\n 'token_cleanup_threshold': 'tokenCleanupThreshold',\n 'password_hash_algorithm': 'passwordHashAlgorithm',\n 'password_hash_iterations': 'passwordHashIterations',\n 'password_salt_size': 'passwordSaltSize'\n }\n\n def __init__(self, token_expiration=None, token_length=None, token_refresh=None, token_cleanup_threshold=None, password_hash_algorithm=None, password_hash_iterations=None, password_salt_size=None): # noqa: E501\n \"\"\"OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties - a model defined in OpenAPI\"\"\" # noqa: E501\n\n self._token_expiration = None\n self._token_length = None\n self._token_refresh = None\n self._token_cleanup_threshold = None\n self._password_hash_algorithm = None\n self._password_hash_iterations = None\n self._password_salt_size = None\n self.discriminator = None\n\n if token_expiration is not None:\n self.token_expiration = token_expiration\n if token_length is not None:\n self.token_length = token_length\n if token_refresh is not None:\n self.token_refresh = token_refresh\n if token_cleanup_threshold is not None:\n self.token_cleanup_threshold = token_cleanup_threshold\n if password_hash_algorithm is not None:\n self.password_hash_algorithm = password_hash_algorithm\n if password_hash_iterations is not None:\n self.password_hash_iterations = password_hash_iterations\n if password_salt_size is not None:\n self.password_salt_size = password_salt_size\n\n @property\n def token_expiration(self):\n \"\"\"Gets the token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyString\n \"\"\"\n return self._token_expiration\n\n @token_expiration.setter\n def token_expiration(self, token_expiration):\n \"\"\"Sets the token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_expiration: The token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyString\n \"\"\"\n\n self._token_expiration = token_expiration\n\n @property\n def token_length(self):\n \"\"\"Gets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyString\n \"\"\"\n return self._token_length\n\n @token_length.setter\n def token_length(self, token_length):\n \"\"\"Sets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_length: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyString\n \"\"\"\n\n self._token_length = token_length\n\n @property\n def token_refresh(self):\n \"\"\"Gets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyBoolean\n \"\"\"\n return self._token_refresh\n\n @token_refresh.setter\n def token_refresh(self, token_refresh):\n \"\"\"Sets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_refresh: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyBoolean\n \"\"\"\n\n self._token_refresh = token_refresh\n\n @property\n def token_cleanup_threshold(self):\n \"\"\"Gets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyInteger\n \"\"\"\n return self._token_cleanup_threshold\n\n @token_cleanup_threshold.setter\n def token_cleanup_threshold(self, token_cleanup_threshold):\n \"\"\"Sets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_cleanup_threshold: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyInteger\n \"\"\"\n\n self._token_cleanup_threshold = token_cleanup_threshold\n\n @property\n def password_hash_algorithm(self):\n \"\"\"Gets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyString\n \"\"\"\n return self._password_hash_algorithm\n\n @password_hash_algorithm.setter\n def password_hash_algorithm(self, password_hash_algorithm):\n \"\"\"Sets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param password_hash_algorithm: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyString\n \"\"\"\n\n self._password_hash_algorithm = password_hash_algorithm\n\n @property\n def password_hash_iterations(self):\n \"\"\"Gets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyInteger\n \"\"\"\n return self._password_hash_iterations\n\n @password_hash_iterations.setter\n def password_hash_iterations(self, password_hash_iterations):\n \"\"\"Sets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param password_hash_iterations: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyInteger\n \"\"\"\n\n self._password_hash_iterations = password_hash_iterations\n\n @property\n def password_salt_size(self):\n \"\"\"Gets the password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyInteger\n \"\"\"\n return self._password_salt_size\n\n @password_salt_size.setter\n def password_salt_size(self, password_salt_size):\n \"\"\"Sets the password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param password_salt_size: The password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyInteger\n \"\"\"\n\n self._password_salt_size = password_salt_size\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n", "step-ids": [ 12, 18, 19, 22, 25 ] }
[ 12, 18, 19, 22, 25 ]
# coding=utf-8 # pylint: disable=too-many-lines # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import sys from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union from .. import _serialization if sys.version_info >= (3, 9): from collections.abc import MutableMapping else: from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from .. import models as _models JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object class AnswersFromTextOptions(_serialization.Model): """The question and text record parameters to answer. All required parameters must be populated in order to send to Azure. :ivar question: User question to query against the given text records. Required. :vartype question: str :ivar text_documents: Text records to be searched for given question. Required. :vartype text_documents: list[~azure.ai.language.questionanswering.models.TextDocument] :ivar language: Language of the text records. This is BCP-47 representation of a language. For example, use "en" for English; "es" for Spanish etc. If not set, use "en" for English as default. :vartype language: str """ _validation = { "question": {"required": True}, "text_documents": {"required": True}, } _attribute_map = { "question": {"key": "question", "type": "str"}, "text_documents": {"key": "records", "type": "[TextDocument]"}, "language": {"key": "language", "type": "str"}, } def __init__( self, *, question: str, text_documents: List["_models.TextDocument"], language: Optional[str] = None, **kwargs: Any ) -> None: """ :keyword question: User question to query against the given text records. Required. :paramtype question: str :keyword text_documents: Text records to be searched for given question. Required. :paramtype text_documents: list[~azure.ai.language.questionanswering.models.TextDocument] :keyword language: Language of the text records. This is BCP-47 representation of a language. For example, use "en" for English; "es" for Spanish etc. If not set, use "en" for English as default. :paramtype language: str """ super().__init__(**kwargs) self.question = question self.text_documents = text_documents self.language = language class AnswersFromTextResult(_serialization.Model): """Represents the answer results. :ivar answers: Represents the answer results. :vartype answers: list[~azure.ai.language.questionanswering.models.TextAnswer] """ _attribute_map = { "answers": {"key": "answers", "type": "[TextAnswer]"}, } def __init__(self, *, answers: Optional[List["_models.TextAnswer"]] = None, **kwargs: Any) -> None: """ :keyword answers: Represents the answer results. :paramtype answers: list[~azure.ai.language.questionanswering.models.TextAnswer] """ super().__init__(**kwargs) self.answers = answers class AnswersOptions(_serialization.Model): """Parameters to query a knowledge base. :ivar qna_id: Exact QnA ID to fetch from the knowledge base, this field takes priority over question. :vartype qna_id: int :ivar question: User question to query against the knowledge base. :vartype question: str :ivar top: Max number of answers to be returned for the question. :vartype top: int :ivar user_id: Unique identifier for the user. :vartype user_id: str :ivar confidence_threshold: Minimum threshold score for answers, value ranges from 0 to 1. :vartype confidence_threshold: float :ivar answer_context: Context object with previous QnA's information. :vartype answer_context: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerContext :ivar ranker_kind: Type of ranker to be used. :vartype ranker_kind: str :ivar filters: Filter QnAs based on given metadata list and knowledge base sources. :vartype filters: ~azure.ai.language.questionanswering.models.QueryFilters :ivar short_answer_options: To configure Answer span prediction feature. :vartype short_answer_options: ~azure.ai.language.questionanswering.models.ShortAnswerOptions :ivar include_unstructured_sources: (Optional) Flag to enable Query over Unstructured Sources. :vartype include_unstructured_sources: bool """ _validation = { "confidence_threshold": {"maximum": 1, "minimum": 0}, } _attribute_map = { "qna_id": {"key": "qnaId", "type": "int"}, "question": {"key": "question", "type": "str"}, "top": {"key": "top", "type": "int"}, "user_id": {"key": "userId", "type": "str"}, "confidence_threshold": {"key": "confidenceScoreThreshold", "type": "float"}, "answer_context": {"key": "context", "type": "KnowledgeBaseAnswerContext"}, "ranker_kind": {"key": "rankerType", "type": "str"}, "filters": {"key": "filters", "type": "QueryFilters"}, "short_answer_options": {"key": "answerSpanRequest", "type": "ShortAnswerOptions"}, "include_unstructured_sources": {"key": "includeUnstructuredSources", "type": "bool"}, } def __init__( self, *, qna_id: Optional[int] = None, question: Optional[str] = None, top: Optional[int] = None, user_id: Optional[str] = None, confidence_threshold: Optional[float] = None, answer_context: Optional["_models.KnowledgeBaseAnswerContext"] = None, ranker_kind: Optional[str] = None, filters: Optional["_models.QueryFilters"] = None, short_answer_options: Optional["_models.ShortAnswerOptions"] = None, include_unstructured_sources: Optional[bool] = None, **kwargs: Any ) -> None: """ :keyword qna_id: Exact QnA ID to fetch from the knowledge base, this field takes priority over question. :paramtype qna_id: int :keyword question: User question to query against the knowledge base. :paramtype question: str :keyword top: Max number of answers to be returned for the question. :paramtype top: int :keyword user_id: Unique identifier for the user. :paramtype user_id: str :keyword confidence_threshold: Minimum threshold score for answers, value ranges from 0 to 1. :paramtype confidence_threshold: float :keyword answer_context: Context object with previous QnA's information. :paramtype answer_context: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerContext :keyword ranker_kind: Type of ranker to be used. :paramtype ranker_kind: str :keyword filters: Filter QnAs based on given metadata list and knowledge base sources. :paramtype filters: ~azure.ai.language.questionanswering.models.QueryFilters :keyword short_answer_options: To configure Answer span prediction feature. :paramtype short_answer_options: ~azure.ai.language.questionanswering.models.ShortAnswerOptions :keyword include_unstructured_sources: (Optional) Flag to enable Query over Unstructured Sources. :paramtype include_unstructured_sources: bool """ super().__init__(**kwargs) self.qna_id = qna_id self.question = question self.top = top self.user_id = user_id self.confidence_threshold = confidence_threshold self.answer_context = answer_context self.ranker_kind = ranker_kind self.filters = filters self.short_answer_options = short_answer_options self.include_unstructured_sources = include_unstructured_sources class AnswerSpan(_serialization.Model): """Answer span object of QnA. :ivar text: Predicted text of answer span. :vartype text: str :ivar confidence: Predicted score of answer span, value ranges from 0 to 1. :vartype confidence: float :ivar offset: The answer span offset from the start of answer. :vartype offset: int :ivar length: The length of the answer span. :vartype length: int """ _validation = { "confidence": {"maximum": 1, "minimum": 0}, } _attribute_map = { "text": {"key": "text", "type": "str"}, "confidence": {"key": "confidenceScore", "type": "float"}, "offset": {"key": "offset", "type": "int"}, "length": {"key": "length", "type": "int"}, } def __init__( self, *, text: Optional[str] = None, confidence: Optional[float] = None, offset: Optional[int] = None, length: Optional[int] = None, **kwargs: Any ) -> None: """ :keyword text: Predicted text of answer span. :paramtype text: str :keyword confidence: Predicted score of answer span, value ranges from 0 to 1. :paramtype confidence: float :keyword offset: The answer span offset from the start of answer. :paramtype offset: int :keyword length: The length of the answer span. :paramtype length: int """ super().__init__(**kwargs) self.text = text self.confidence = confidence self.offset = offset self.length = length class AnswersResult(_serialization.Model): """Represents List of Question Answers. :ivar answers: Represents Answer Result list. :vartype answers: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswer] """ _attribute_map = { "answers": {"key": "answers", "type": "[KnowledgeBaseAnswer]"}, } def __init__(self, *, answers: Optional[List["_models.KnowledgeBaseAnswer"]] = None, **kwargs: Any) -> None: """ :keyword answers: Represents Answer Result list. :paramtype answers: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswer] """ super().__init__(**kwargs) self.answers = answers class Error(_serialization.Model): """The error object. All required parameters must be populated in order to send to Azure. :ivar code: One of a server-defined set of error codes. Required. Known values are: "InvalidRequest", "InvalidArgument", "Unauthorized", "Forbidden", "NotFound", "ProjectNotFound", "OperationNotFound", "AzureCognitiveSearchNotFound", "AzureCognitiveSearchIndexNotFound", "TooManyRequests", "AzureCognitiveSearchThrottling", "AzureCognitiveSearchIndexLimitReached", "InternalServerError", and "ServiceUnavailable". :vartype code: str or ~azure.ai.language.questionanswering.models.ErrorCode :ivar message: A human-readable representation of the error. Required. :vartype message: str :ivar target: The target of the error. :vartype target: str :ivar details: An array of details about specific errors that led to this reported error. :vartype details: list[~azure.ai.language.questionanswering.models.Error] :ivar innererror: An object containing more specific information than the current object about the error. :vartype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel """ _validation = { "code": {"required": True}, "message": {"required": True}, } _attribute_map = { "code": {"key": "code", "type": "str"}, "message": {"key": "message", "type": "str"}, "target": {"key": "target", "type": "str"}, "details": {"key": "details", "type": "[Error]"}, "innererror": {"key": "innererror", "type": "InnerErrorModel"}, } def __init__( self, *, code: Union[str, "_models.ErrorCode"], message: str, target: Optional[str] = None, details: Optional[List["_models.Error"]] = None, innererror: Optional["_models.InnerErrorModel"] = None, **kwargs: Any ) -> None: """ :keyword code: One of a server-defined set of error codes. Required. Known values are: "InvalidRequest", "InvalidArgument", "Unauthorized", "Forbidden", "NotFound", "ProjectNotFound", "OperationNotFound", "AzureCognitiveSearchNotFound", "AzureCognitiveSearchIndexNotFound", "TooManyRequests", "AzureCognitiveSearchThrottling", "AzureCognitiveSearchIndexLimitReached", "InternalServerError", and "ServiceUnavailable". :paramtype code: str or ~azure.ai.language.questionanswering.models.ErrorCode :keyword message: A human-readable representation of the error. Required. :paramtype message: str :keyword target: The target of the error. :paramtype target: str :keyword details: An array of details about specific errors that led to this reported error. :paramtype details: list[~azure.ai.language.questionanswering.models.Error] :keyword innererror: An object containing more specific information than the current object about the error. :paramtype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel """ super().__init__(**kwargs) self.code = code self.message = message self.target = target self.details = details self.innererror = innererror class ErrorResponse(_serialization.Model): """Error response. :ivar error: The error object. :vartype error: ~azure.ai.language.questionanswering.models.Error """ _attribute_map = { "error": {"key": "error", "type": "Error"}, } def __init__(self, *, error: Optional["_models.Error"] = None, **kwargs: Any) -> None: """ :keyword error: The error object. :paramtype error: ~azure.ai.language.questionanswering.models.Error """ super().__init__(**kwargs) self.error = error class InnerErrorModel(_serialization.Model): """An object containing more specific information about the error. As per Microsoft One API guidelines - https://github.com/Microsoft/api-guidelines/blob/vNext/Guidelines.md#7102-error-condition-responses. All required parameters must be populated in order to send to Azure. :ivar code: One of a server-defined set of error codes. Required. Known values are: "InvalidRequest", "InvalidParameterValue", "KnowledgeBaseNotFound", "AzureCognitiveSearchNotFound", "AzureCognitiveSearchThrottling", and "ExtractionFailure". :vartype code: str or ~azure.ai.language.questionanswering.models.InnerErrorCode :ivar message: Error message. Required. :vartype message: str :ivar details: Error details. :vartype details: dict[str, str] :ivar target: Error target. :vartype target: str :ivar innererror: An object containing more specific information than the current object about the error. :vartype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel """ _validation = { "code": {"required": True}, "message": {"required": True}, } _attribute_map = { "code": {"key": "code", "type": "str"}, "message": {"key": "message", "type": "str"}, "details": {"key": "details", "type": "{str}"}, "target": {"key": "target", "type": "str"}, "innererror": {"key": "innererror", "type": "InnerErrorModel"}, } def __init__( self, *, code: Union[str, "_models.InnerErrorCode"], message: str, details: Optional[Dict[str, str]] = None, target: Optional[str] = None, innererror: Optional["_models.InnerErrorModel"] = None, **kwargs: Any ) -> None: """ :keyword code: One of a server-defined set of error codes. Required. Known values are: "InvalidRequest", "InvalidParameterValue", "KnowledgeBaseNotFound", "AzureCognitiveSearchNotFound", "AzureCognitiveSearchThrottling", and "ExtractionFailure". :paramtype code: str or ~azure.ai.language.questionanswering.models.InnerErrorCode :keyword message: Error message. Required. :paramtype message: str :keyword details: Error details. :paramtype details: dict[str, str] :keyword target: Error target. :paramtype target: str :keyword innererror: An object containing more specific information than the current object about the error. :paramtype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel """ super().__init__(**kwargs) self.code = code self.message = message self.details = details self.target = target self.innererror = innererror class KnowledgeBaseAnswer(_serialization.Model): """Represents knowledge base answer. :ivar questions: List of questions associated with the answer. :vartype questions: list[str] :ivar answer: Answer text. :vartype answer: str :ivar confidence: Answer confidence score, value ranges from 0 to 1. :vartype confidence: float :ivar qna_id: ID of the QnA result. :vartype qna_id: int :ivar source: Source of QnA result. :vartype source: str :ivar metadata: Metadata associated with the answer, useful to categorize or filter question answers. :vartype metadata: dict[str, str] :ivar dialog: Dialog associated with Answer. :vartype dialog: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerDialog :ivar short_answer: Answer span object of QnA with respect to user's question. :vartype short_answer: ~azure.ai.language.questionanswering.models.AnswerSpan """ _validation = { "confidence": {"maximum": 1, "minimum": 0}, } _attribute_map = { "questions": {"key": "questions", "type": "[str]"}, "answer": {"key": "answer", "type": "str"}, "confidence": {"key": "confidenceScore", "type": "float"}, "qna_id": {"key": "id", "type": "int"}, "source": {"key": "source", "type": "str"}, "metadata": {"key": "metadata", "type": "{str}"}, "dialog": {"key": "dialog", "type": "KnowledgeBaseAnswerDialog"}, "short_answer": {"key": "answerSpan", "type": "AnswerSpan"}, } def __init__( self, *, questions: Optional[List[str]] = None, answer: Optional[str] = None, confidence: Optional[float] = None, qna_id: Optional[int] = None, source: Optional[str] = None, metadata: Optional[Dict[str, str]] = None, dialog: Optional["_models.KnowledgeBaseAnswerDialog"] = None, short_answer: Optional["_models.AnswerSpan"] = None, **kwargs: Any ) -> None: """ :keyword questions: List of questions associated with the answer. :paramtype questions: list[str] :keyword answer: Answer text. :paramtype answer: str :keyword confidence: Answer confidence score, value ranges from 0 to 1. :paramtype confidence: float :keyword qna_id: ID of the QnA result. :paramtype qna_id: int :keyword source: Source of QnA result. :paramtype source: str :keyword metadata: Metadata associated with the answer, useful to categorize or filter question answers. :paramtype metadata: dict[str, str] :keyword dialog: Dialog associated with Answer. :paramtype dialog: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerDialog :keyword short_answer: Answer span object of QnA with respect to user's question. :paramtype short_answer: ~azure.ai.language.questionanswering.models.AnswerSpan """ super().__init__(**kwargs) self.questions = questions self.answer = answer self.confidence = confidence self.qna_id = qna_id self.source = source self.metadata = metadata self.dialog = dialog self.short_answer = short_answer class KnowledgeBaseAnswerContext(_serialization.Model): """Context object with previous QnA's information. All required parameters must be populated in order to send to Azure. :ivar previous_qna_id: Previous turn top answer result QnA ID. Required. :vartype previous_qna_id: int :ivar previous_question: Previous user query. :vartype previous_question: str """ _validation = { "previous_qna_id": {"required": True}, } _attribute_map = { "previous_qna_id": {"key": "previousQnaId", "type": "int"}, "previous_question": {"key": "previousUserQuery", "type": "str"}, } def __init__(self, *, previous_qna_id: int, previous_question: Optional[str] = None, **kwargs: Any) -> None: """ :keyword previous_qna_id: Previous turn top answer result QnA ID. Required. :paramtype previous_qna_id: int :keyword previous_question: Previous user query. :paramtype previous_question: str """ super().__init__(**kwargs) self.previous_qna_id = previous_qna_id self.previous_question = previous_question class KnowledgeBaseAnswerDialog(_serialization.Model): """Dialog associated with Answer. :ivar is_context_only: To mark if a prompt is relevant only with a previous question or not. If true, do not include this QnA as search result for queries without context; otherwise, if false, ignores context and includes this QnA in search result. :vartype is_context_only: bool :ivar prompts: List of prompts associated with the answer. :vartype prompts: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerPrompt] """ _validation = { "prompts": {"max_items": 20, "min_items": 0}, } _attribute_map = { "is_context_only": {"key": "isContextOnly", "type": "bool"}, "prompts": {"key": "prompts", "type": "[KnowledgeBaseAnswerPrompt]"}, } def __init__( self, *, is_context_only: Optional[bool] = None, prompts: Optional[List["_models.KnowledgeBaseAnswerPrompt"]] = None, **kwargs: Any ) -> None: """ :keyword is_context_only: To mark if a prompt is relevant only with a previous question or not. If true, do not include this QnA as search result for queries without context; otherwise, if false, ignores context and includes this QnA in search result. :paramtype is_context_only: bool :keyword prompts: List of prompts associated with the answer. :paramtype prompts: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerPrompt] """ super().__init__(**kwargs) self.is_context_only = is_context_only self.prompts = prompts class KnowledgeBaseAnswerPrompt(_serialization.Model): """Prompt for an answer. :ivar display_order: Index of the prompt - used in ordering of the prompts. :vartype display_order: int :ivar qna_id: QnA ID corresponding to the prompt. :vartype qna_id: int :ivar display_text: Text displayed to represent a follow up question prompt. :vartype display_text: str """ _validation = { "display_text": {"max_length": 200}, } _attribute_map = { "display_order": {"key": "displayOrder", "type": "int"}, "qna_id": {"key": "qnaId", "type": "int"}, "display_text": {"key": "displayText", "type": "str"}, } def __init__( self, *, display_order: Optional[int] = None, qna_id: Optional[int] = None, display_text: Optional[str] = None, **kwargs: Any ) -> None: """ :keyword display_order: Index of the prompt - used in ordering of the prompts. :paramtype display_order: int :keyword qna_id: QnA ID corresponding to the prompt. :paramtype qna_id: int :keyword display_text: Text displayed to represent a follow up question prompt. :paramtype display_text: str """ super().__init__(**kwargs) self.display_order = display_order self.qna_id = qna_id self.display_text = display_text class MetadataFilter(_serialization.Model): """Find QnAs that are associated with the given list of metadata. :ivar metadata: :vartype metadata: list[JSON] :ivar logical_operation: Operation used to join metadata filters. :vartype logical_operation: str """ _attribute_map = { "metadata": {"key": "metadata", "type": "[object]"}, "logical_operation": {"key": "logicalOperation", "type": "str"}, } def __init__( self, *, metadata: Optional[List[JSON]] = None, logical_operation: Optional[str] = None, **kwargs: Any ) -> None: """ :keyword metadata: :paramtype metadata: list[JSON] :keyword logical_operation: Operation used to join metadata filters. :paramtype logical_operation: str """ super().__init__(**kwargs) self.metadata = metadata self.logical_operation = logical_operation class QueryFilters(_serialization.Model): """filters over knowledge base. :ivar metadata_filter: Find QnAs that are associated with the given list of metadata. :vartype metadata_filter: ~azure.ai.language.questionanswering.models.MetadataFilter :ivar source_filter: Find QnAs that are associated with any of the given list of sources in knowledge base. :vartype source_filter: list[str] :ivar logical_operation: Logical operation used to join metadata filter with source filter. :vartype logical_operation: str """ _attribute_map = { "metadata_filter": {"key": "metadataFilter", "type": "MetadataFilter"}, "source_filter": {"key": "sourceFilter", "type": "[str]"}, "logical_operation": {"key": "logicalOperation", "type": "str"}, } def __init__( self, *, metadata_filter: Optional["_models.MetadataFilter"] = None, source_filter: Optional[List[str]] = None, logical_operation: Optional[str] = None, **kwargs: Any ) -> None: """ :keyword metadata_filter: Find QnAs that are associated with the given list of metadata. :paramtype metadata_filter: ~azure.ai.language.questionanswering.models.MetadataFilter :keyword source_filter: Find QnAs that are associated with any of the given list of sources in knowledge base. :paramtype source_filter: list[str] :keyword logical_operation: Logical operation used to join metadata filter with source filter. :paramtype logical_operation: str """ super().__init__(**kwargs) self.metadata_filter = metadata_filter self.source_filter = source_filter self.logical_operation = logical_operation class ShortAnswerOptions(_serialization.Model): """To configure Answer span prediction feature. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar enable: Enable or disable Answer Span prediction. Required. Default value is True. :vartype enable: bool :ivar confidence_threshold: Minimum threshold score required to include an answer span, value ranges from 0 to 1. :vartype confidence_threshold: float :ivar top: Number of Top answers to be considered for span prediction from 1 to 10. :vartype top: int """ _validation = { "enable": {"required": True, "constant": True}, "confidence_threshold": {"maximum": 1, "minimum": 0}, "top": {"maximum": 10, "minimum": 1}, } _attribute_map = { "enable": {"key": "enable", "type": "bool"}, "confidence_threshold": {"key": "confidenceScoreThreshold", "type": "float"}, "top": {"key": "topAnswersWithSpan", "type": "int"}, } enable = True def __init__( self, *, confidence_threshold: Optional[float] = None, top: Optional[int] = None, **kwargs: Any ) -> None: """ :keyword confidence_threshold: Minimum threshold score required to include an answer span, value ranges from 0 to 1. :paramtype confidence_threshold: float :keyword top: Number of Top answers to be considered for span prediction from 1 to 10. :paramtype top: int """ super().__init__(**kwargs) self.confidence_threshold = confidence_threshold self.top = top class TextAnswer(_serialization.Model): """Represents answer result. :ivar answer: Answer. :vartype answer: str :ivar confidence: answer confidence score, value ranges from 0 to 1. :vartype confidence: float :ivar id: record ID. :vartype id: str :ivar short_answer: Answer span object with respect to user's question. :vartype short_answer: ~azure.ai.language.questionanswering.models.AnswerSpan :ivar offset: The sentence offset from the start of the document. :vartype offset: int :ivar length: The length of the sentence. :vartype length: int """ _validation = { "confidence": {"maximum": 1, "minimum": 0}, } _attribute_map = { "answer": {"key": "answer", "type": "str"}, "confidence": {"key": "confidenceScore", "type": "float"}, "id": {"key": "id", "type": "str"}, "short_answer": {"key": "answerSpan", "type": "AnswerSpan"}, "offset": {"key": "offset", "type": "int"}, "length": {"key": "length", "type": "int"}, } def __init__( self, *, answer: Optional[str] = None, confidence: Optional[float] = None, id: Optional[str] = None, # pylint: disable=redefined-builtin short_answer: Optional["_models.AnswerSpan"] = None, offset: Optional[int] = None, length: Optional[int] = None, **kwargs: Any ) -> None: """ :keyword answer: Answer. :paramtype answer: str :keyword confidence: answer confidence score, value ranges from 0 to 1. :paramtype confidence: float :keyword id: record ID. :paramtype id: str :keyword short_answer: Answer span object with respect to user's question. :paramtype short_answer: ~azure.ai.language.questionanswering.models.AnswerSpan :keyword offset: The sentence offset from the start of the document. :paramtype offset: int :keyword length: The length of the sentence. :paramtype length: int """ super().__init__(**kwargs) self.answer = answer self.confidence = confidence self.id = id self.short_answer = short_answer self.offset = offset self.length = length class TextDocument(_serialization.Model): """Represent input text record to be queried. All required parameters must be populated in order to send to Azure. :ivar id: Unique identifier for the text record. Required. :vartype id: str :ivar text: Text contents of the record. Required. :vartype text: str """ _validation = { "id": {"required": True}, "text": {"required": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "text": {"key": "text", "type": "str"}, } def __init__(self, *, id: str, text: str, **kwargs: Any) -> None: # pylint: disable=redefined-builtin """ :keyword id: Unique identifier for the text record. Required. :paramtype id: str :keyword text: Text contents of the record. Required. :paramtype text: str """ super().__init__(**kwargs) self.id = id self.text = text
normal
{ "blob_id": "fb258521fdfded0062cbe30651268bf5410d3384", "index": 9864, "step-1": "<mask token>\n\n\nclass KnowledgeBaseAnswer(_serialization.Model):\n \"\"\"Represents knowledge base answer.\n\n :ivar questions: List of questions associated with the answer.\n :vartype questions: list[str]\n :ivar answer: Answer text.\n :vartype answer: str\n :ivar confidence: Answer confidence score, value ranges from 0 to 1.\n :vartype confidence: float\n :ivar qna_id: ID of the QnA result.\n :vartype qna_id: int\n :ivar source: Source of QnA result.\n :vartype source: str\n :ivar metadata: Metadata associated with the answer, useful to categorize or filter question\n answers.\n :vartype metadata: dict[str, str]\n :ivar dialog: Dialog associated with Answer.\n :vartype dialog: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerDialog\n :ivar short_answer: Answer span object of QnA with respect to user's question.\n :vartype short_answer: ~azure.ai.language.questionanswering.models.AnswerSpan\n \"\"\"\n _validation = {'confidence': {'maximum': 1, 'minimum': 0}}\n _attribute_map = {'questions': {'key': 'questions', 'type': '[str]'},\n 'answer': {'key': 'answer', 'type': 'str'}, 'confidence': {'key':\n 'confidenceScore', 'type': 'float'}, 'qna_id': {'key': 'id', 'type':\n 'int'}, 'source': {'key': 'source', 'type': 'str'}, 'metadata': {\n 'key': 'metadata', 'type': '{str}'}, 'dialog': {'key': 'dialog',\n 'type': 'KnowledgeBaseAnswerDialog'}, 'short_answer': {'key':\n 'answerSpan', 'type': 'AnswerSpan'}}\n\n def __init__(self, *, questions: Optional[List[str]]=None, answer:\n Optional[str]=None, confidence: Optional[float]=None, qna_id:\n Optional[int]=None, source: Optional[str]=None, metadata: Optional[\n Dict[str, str]]=None, dialog: Optional[\n '_models.KnowledgeBaseAnswerDialog']=None, short_answer: Optional[\n '_models.AnswerSpan']=None, **kwargs: Any) ->None:\n \"\"\"\n :keyword questions: List of questions associated with the answer.\n :paramtype questions: list[str]\n :keyword answer: Answer text.\n :paramtype answer: str\n :keyword confidence: Answer confidence score, value ranges from 0 to 1.\n :paramtype confidence: float\n :keyword qna_id: ID of the QnA result.\n :paramtype qna_id: int\n :keyword source: Source of QnA result.\n :paramtype source: str\n :keyword metadata: Metadata associated with the answer, useful to categorize or filter question\n answers.\n :paramtype metadata: dict[str, str]\n :keyword dialog: Dialog associated with Answer.\n :paramtype dialog: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerDialog\n :keyword short_answer: Answer span object of QnA with respect to user's question.\n :paramtype short_answer: ~azure.ai.language.questionanswering.models.AnswerSpan\n \"\"\"\n super().__init__(**kwargs)\n self.questions = questions\n self.answer = answer\n self.confidence = confidence\n self.qna_id = qna_id\n self.source = source\n self.metadata = metadata\n self.dialog = dialog\n self.short_answer = short_answer\n\n\nclass KnowledgeBaseAnswerContext(_serialization.Model):\n \"\"\"Context object with previous QnA's information.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar previous_qna_id: Previous turn top answer result QnA ID. Required.\n :vartype previous_qna_id: int\n :ivar previous_question: Previous user query.\n :vartype previous_question: str\n \"\"\"\n _validation = {'previous_qna_id': {'required': True}}\n _attribute_map = {'previous_qna_id': {'key': 'previousQnaId', 'type':\n 'int'}, 'previous_question': {'key': 'previousUserQuery', 'type':\n 'str'}}\n\n def __init__(self, *, previous_qna_id: int, previous_question: Optional\n [str]=None, **kwargs: Any) ->None:\n \"\"\"\n :keyword previous_qna_id: Previous turn top answer result QnA ID. Required.\n :paramtype previous_qna_id: int\n :keyword previous_question: Previous user query.\n :paramtype previous_question: str\n \"\"\"\n super().__init__(**kwargs)\n self.previous_qna_id = previous_qna_id\n self.previous_question = previous_question\n\n\nclass KnowledgeBaseAnswerDialog(_serialization.Model):\n \"\"\"Dialog associated with Answer.\n\n :ivar is_context_only: To mark if a prompt is relevant only with a previous question or not. If\n true, do not include this QnA as search result for queries without context; otherwise, if\n false, ignores context and includes this QnA in search result.\n :vartype is_context_only: bool\n :ivar prompts: List of prompts associated with the answer.\n :vartype prompts: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerPrompt]\n \"\"\"\n _validation = {'prompts': {'max_items': 20, 'min_items': 0}}\n _attribute_map = {'is_context_only': {'key': 'isContextOnly', 'type':\n 'bool'}, 'prompts': {'key': 'prompts', 'type':\n '[KnowledgeBaseAnswerPrompt]'}}\n\n def __init__(self, *, is_context_only: Optional[bool]=None, prompts:\n Optional[List['_models.KnowledgeBaseAnswerPrompt']]=None, **kwargs: Any\n ) ->None:\n \"\"\"\n :keyword is_context_only: To mark if a prompt is relevant only with a previous question or not.\n If true, do not include this QnA as search result for queries without context; otherwise, if\n false, ignores context and includes this QnA in search result.\n :paramtype is_context_only: bool\n :keyword prompts: List of prompts associated with the answer.\n :paramtype prompts: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerPrompt]\n \"\"\"\n super().__init__(**kwargs)\n self.is_context_only = is_context_only\n self.prompts = prompts\n\n\nclass KnowledgeBaseAnswerPrompt(_serialization.Model):\n \"\"\"Prompt for an answer.\n\n :ivar display_order: Index of the prompt - used in ordering of the prompts.\n :vartype display_order: int\n :ivar qna_id: QnA ID corresponding to the prompt.\n :vartype qna_id: int\n :ivar display_text: Text displayed to represent a follow up question prompt.\n :vartype display_text: str\n \"\"\"\n _validation = {'display_text': {'max_length': 200}}\n _attribute_map = {'display_order': {'key': 'displayOrder', 'type':\n 'int'}, 'qna_id': {'key': 'qnaId', 'type': 'int'}, 'display_text':\n {'key': 'displayText', 'type': 'str'}}\n\n def __init__(self, *, display_order: Optional[int]=None, qna_id:\n Optional[int]=None, display_text: Optional[str]=None, **kwargs: Any\n ) ->None:\n \"\"\"\n :keyword display_order: Index of the prompt - used in ordering of the prompts.\n :paramtype display_order: int\n :keyword qna_id: QnA ID corresponding to the prompt.\n :paramtype qna_id: int\n :keyword display_text: Text displayed to represent a follow up question prompt.\n :paramtype display_text: str\n \"\"\"\n super().__init__(**kwargs)\n self.display_order = display_order\n self.qna_id = qna_id\n self.display_text = display_text\n\n\nclass MetadataFilter(_serialization.Model):\n \"\"\"Find QnAs that are associated with the given list of metadata.\n\n :ivar metadata:\n :vartype metadata: list[JSON]\n :ivar logical_operation: Operation used to join metadata filters.\n :vartype logical_operation: str\n \"\"\"\n _attribute_map = {'metadata': {'key': 'metadata', 'type': '[object]'},\n 'logical_operation': {'key': 'logicalOperation', 'type': 'str'}}\n\n def __init__(self, *, metadata: Optional[List[JSON]]=None,\n logical_operation: Optional[str]=None, **kwargs: Any) ->None:\n \"\"\"\n :keyword metadata:\n :paramtype metadata: list[JSON]\n :keyword logical_operation: Operation used to join metadata filters.\n :paramtype logical_operation: str\n \"\"\"\n super().__init__(**kwargs)\n self.metadata = metadata\n self.logical_operation = logical_operation\n\n\nclass QueryFilters(_serialization.Model):\n \"\"\"filters over knowledge base.\n\n :ivar metadata_filter: Find QnAs that are associated with the given list of metadata.\n :vartype metadata_filter: ~azure.ai.language.questionanswering.models.MetadataFilter\n :ivar source_filter: Find QnAs that are associated with any of the given list of sources in\n knowledge base.\n :vartype source_filter: list[str]\n :ivar logical_operation: Logical operation used to join metadata filter with source filter.\n :vartype logical_operation: str\n \"\"\"\n _attribute_map = {'metadata_filter': {'key': 'metadataFilter', 'type':\n 'MetadataFilter'}, 'source_filter': {'key': 'sourceFilter', 'type':\n '[str]'}, 'logical_operation': {'key': 'logicalOperation', 'type':\n 'str'}}\n\n def __init__(self, *, metadata_filter: Optional[\n '_models.MetadataFilter']=None, source_filter: Optional[List[str]]=\n None, logical_operation: Optional[str]=None, **kwargs: Any) ->None:\n \"\"\"\n :keyword metadata_filter: Find QnAs that are associated with the given list of metadata.\n :paramtype metadata_filter: ~azure.ai.language.questionanswering.models.MetadataFilter\n :keyword source_filter: Find QnAs that are associated with any of the given list of sources in\n knowledge base.\n :paramtype source_filter: list[str]\n :keyword logical_operation: Logical operation used to join metadata filter with source filter.\n :paramtype logical_operation: str\n \"\"\"\n super().__init__(**kwargs)\n self.metadata_filter = metadata_filter\n self.source_filter = source_filter\n self.logical_operation = logical_operation\n\n\nclass ShortAnswerOptions(_serialization.Model):\n \"\"\"To configure Answer span prediction feature.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar enable: Enable or disable Answer Span prediction. Required. Default value is True.\n :vartype enable: bool\n :ivar confidence_threshold: Minimum threshold score required to include an answer span, value\n ranges from 0 to 1.\n :vartype confidence_threshold: float\n :ivar top: Number of Top answers to be considered for span prediction from 1 to 10.\n :vartype top: int\n \"\"\"\n _validation = {'enable': {'required': True, 'constant': True},\n 'confidence_threshold': {'maximum': 1, 'minimum': 0}, 'top': {\n 'maximum': 10, 'minimum': 1}}\n _attribute_map = {'enable': {'key': 'enable', 'type': 'bool'},\n 'confidence_threshold': {'key': 'confidenceScoreThreshold', 'type':\n 'float'}, 'top': {'key': 'topAnswersWithSpan', 'type': 'int'}}\n enable = True\n\n def __init__(self, *, confidence_threshold: Optional[float]=None, top:\n Optional[int]=None, **kwargs: Any) ->None:\n \"\"\"\n :keyword confidence_threshold: Minimum threshold score required to include an answer span,\n value ranges from 0 to 1.\n :paramtype confidence_threshold: float\n :keyword top: Number of Top answers to be considered for span prediction from 1 to 10.\n :paramtype top: int\n \"\"\"\n super().__init__(**kwargs)\n self.confidence_threshold = confidence_threshold\n self.top = top\n\n\nclass TextAnswer(_serialization.Model):\n \"\"\"Represents answer result.\n\n :ivar answer: Answer.\n :vartype answer: str\n :ivar confidence: answer confidence score, value ranges from 0 to 1.\n :vartype confidence: float\n :ivar id: record ID.\n :vartype id: str\n :ivar short_answer: Answer span object with respect to user's question.\n :vartype short_answer: ~azure.ai.language.questionanswering.models.AnswerSpan\n :ivar offset: The sentence offset from the start of the document.\n :vartype offset: int\n :ivar length: The length of the sentence.\n :vartype length: int\n \"\"\"\n _validation = {'confidence': {'maximum': 1, 'minimum': 0}}\n _attribute_map = {'answer': {'key': 'answer', 'type': 'str'},\n 'confidence': {'key': 'confidenceScore', 'type': 'float'}, 'id': {\n 'key': 'id', 'type': 'str'}, 'short_answer': {'key': 'answerSpan',\n 'type': 'AnswerSpan'}, 'offset': {'key': 'offset', 'type': 'int'},\n 'length': {'key': 'length', 'type': 'int'}}\n\n def __init__(self, *, answer: Optional[str]=None, confidence: Optional[\n float]=None, id: Optional[str]=None, short_answer: Optional[\n '_models.AnswerSpan']=None, offset: Optional[int]=None, length:\n Optional[int]=None, **kwargs: Any) ->None:\n \"\"\"\n :keyword answer: Answer.\n :paramtype answer: str\n :keyword confidence: answer confidence score, value ranges from 0 to 1.\n :paramtype confidence: float\n :keyword id: record ID.\n :paramtype id: str\n :keyword short_answer: Answer span object with respect to user's question.\n :paramtype short_answer: ~azure.ai.language.questionanswering.models.AnswerSpan\n :keyword offset: The sentence offset from the start of the document.\n :paramtype offset: int\n :keyword length: The length of the sentence.\n :paramtype length: int\n \"\"\"\n super().__init__(**kwargs)\n self.answer = answer\n self.confidence = confidence\n self.id = id\n self.short_answer = short_answer\n self.offset = offset\n self.length = length\n\n\nclass TextDocument(_serialization.Model):\n \"\"\"Represent input text record to be queried.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar id: Unique identifier for the text record. Required.\n :vartype id: str\n :ivar text: Text contents of the record. Required.\n :vartype text: str\n \"\"\"\n _validation = {'id': {'required': True}, 'text': {'required': True}}\n _attribute_map = {'id': {'key': 'id', 'type': 'str'}, 'text': {'key':\n 'text', 'type': 'str'}}\n\n def __init__(self, *, id: str, text: str, **kwargs: Any) ->None:\n \"\"\"\n :keyword id: Unique identifier for the text record. Required.\n :paramtype id: str\n :keyword text: Text contents of the record. Required.\n :paramtype text: str\n \"\"\"\n super().__init__(**kwargs)\n self.id = id\n self.text = text\n", "step-2": "<mask token>\n\n\nclass InnerErrorModel(_serialization.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass KnowledgeBaseAnswer(_serialization.Model):\n \"\"\"Represents knowledge base answer.\n\n :ivar questions: List of questions associated with the answer.\n :vartype questions: list[str]\n :ivar answer: Answer text.\n :vartype answer: str\n :ivar confidence: Answer confidence score, value ranges from 0 to 1.\n :vartype confidence: float\n :ivar qna_id: ID of the QnA result.\n :vartype qna_id: int\n :ivar source: Source of QnA result.\n :vartype source: str\n :ivar metadata: Metadata associated with the answer, useful to categorize or filter question\n answers.\n :vartype metadata: dict[str, str]\n :ivar dialog: Dialog associated with Answer.\n :vartype dialog: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerDialog\n :ivar short_answer: Answer span object of QnA with respect to user's question.\n :vartype short_answer: ~azure.ai.language.questionanswering.models.AnswerSpan\n \"\"\"\n _validation = {'confidence': {'maximum': 1, 'minimum': 0}}\n _attribute_map = {'questions': {'key': 'questions', 'type': '[str]'},\n 'answer': {'key': 'answer', 'type': 'str'}, 'confidence': {'key':\n 'confidenceScore', 'type': 'float'}, 'qna_id': {'key': 'id', 'type':\n 'int'}, 'source': {'key': 'source', 'type': 'str'}, 'metadata': {\n 'key': 'metadata', 'type': '{str}'}, 'dialog': {'key': 'dialog',\n 'type': 'KnowledgeBaseAnswerDialog'}, 'short_answer': {'key':\n 'answerSpan', 'type': 'AnswerSpan'}}\n\n def __init__(self, *, questions: Optional[List[str]]=None, answer:\n Optional[str]=None, confidence: Optional[float]=None, qna_id:\n Optional[int]=None, source: Optional[str]=None, metadata: Optional[\n Dict[str, str]]=None, dialog: Optional[\n '_models.KnowledgeBaseAnswerDialog']=None, short_answer: Optional[\n '_models.AnswerSpan']=None, **kwargs: Any) ->None:\n \"\"\"\n :keyword questions: List of questions associated with the answer.\n :paramtype questions: list[str]\n :keyword answer: Answer text.\n :paramtype answer: str\n :keyword confidence: Answer confidence score, value ranges from 0 to 1.\n :paramtype confidence: float\n :keyword qna_id: ID of the QnA result.\n :paramtype qna_id: int\n :keyword source: Source of QnA result.\n :paramtype source: str\n :keyword metadata: Metadata associated with the answer, useful to categorize or filter question\n answers.\n :paramtype metadata: dict[str, str]\n :keyword dialog: Dialog associated with Answer.\n :paramtype dialog: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerDialog\n :keyword short_answer: Answer span object of QnA with respect to user's question.\n :paramtype short_answer: ~azure.ai.language.questionanswering.models.AnswerSpan\n \"\"\"\n super().__init__(**kwargs)\n self.questions = questions\n self.answer = answer\n self.confidence = confidence\n self.qna_id = qna_id\n self.source = source\n self.metadata = metadata\n self.dialog = dialog\n self.short_answer = short_answer\n\n\nclass KnowledgeBaseAnswerContext(_serialization.Model):\n \"\"\"Context object with previous QnA's information.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar previous_qna_id: Previous turn top answer result QnA ID. Required.\n :vartype previous_qna_id: int\n :ivar previous_question: Previous user query.\n :vartype previous_question: str\n \"\"\"\n _validation = {'previous_qna_id': {'required': True}}\n _attribute_map = {'previous_qna_id': {'key': 'previousQnaId', 'type':\n 'int'}, 'previous_question': {'key': 'previousUserQuery', 'type':\n 'str'}}\n\n def __init__(self, *, previous_qna_id: int, previous_question: Optional\n [str]=None, **kwargs: Any) ->None:\n \"\"\"\n :keyword previous_qna_id: Previous turn top answer result QnA ID. Required.\n :paramtype previous_qna_id: int\n :keyword previous_question: Previous user query.\n :paramtype previous_question: str\n \"\"\"\n super().__init__(**kwargs)\n self.previous_qna_id = previous_qna_id\n self.previous_question = previous_question\n\n\nclass KnowledgeBaseAnswerDialog(_serialization.Model):\n \"\"\"Dialog associated with Answer.\n\n :ivar is_context_only: To mark if a prompt is relevant only with a previous question or not. If\n true, do not include this QnA as search result for queries without context; otherwise, if\n false, ignores context and includes this QnA in search result.\n :vartype is_context_only: bool\n :ivar prompts: List of prompts associated with the answer.\n :vartype prompts: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerPrompt]\n \"\"\"\n _validation = {'prompts': {'max_items': 20, 'min_items': 0}}\n _attribute_map = {'is_context_only': {'key': 'isContextOnly', 'type':\n 'bool'}, 'prompts': {'key': 'prompts', 'type':\n '[KnowledgeBaseAnswerPrompt]'}}\n\n def __init__(self, *, is_context_only: Optional[bool]=None, prompts:\n Optional[List['_models.KnowledgeBaseAnswerPrompt']]=None, **kwargs: Any\n ) ->None:\n \"\"\"\n :keyword is_context_only: To mark if a prompt is relevant only with a previous question or not.\n If true, do not include this QnA as search result for queries without context; otherwise, if\n false, ignores context and includes this QnA in search result.\n :paramtype is_context_only: bool\n :keyword prompts: List of prompts associated with the answer.\n :paramtype prompts: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerPrompt]\n \"\"\"\n super().__init__(**kwargs)\n self.is_context_only = is_context_only\n self.prompts = prompts\n\n\nclass KnowledgeBaseAnswerPrompt(_serialization.Model):\n \"\"\"Prompt for an answer.\n\n :ivar display_order: Index of the prompt - used in ordering of the prompts.\n :vartype display_order: int\n :ivar qna_id: QnA ID corresponding to the prompt.\n :vartype qna_id: int\n :ivar display_text: Text displayed to represent a follow up question prompt.\n :vartype display_text: str\n \"\"\"\n _validation = {'display_text': {'max_length': 200}}\n _attribute_map = {'display_order': {'key': 'displayOrder', 'type':\n 'int'}, 'qna_id': {'key': 'qnaId', 'type': 'int'}, 'display_text':\n {'key': 'displayText', 'type': 'str'}}\n\n def __init__(self, *, display_order: Optional[int]=None, qna_id:\n Optional[int]=None, display_text: Optional[str]=None, **kwargs: Any\n ) ->None:\n \"\"\"\n :keyword display_order: Index of the prompt - used in ordering of the prompts.\n :paramtype display_order: int\n :keyword qna_id: QnA ID corresponding to the prompt.\n :paramtype qna_id: int\n :keyword display_text: Text displayed to represent a follow up question prompt.\n :paramtype display_text: str\n \"\"\"\n super().__init__(**kwargs)\n self.display_order = display_order\n self.qna_id = qna_id\n self.display_text = display_text\n\n\nclass MetadataFilter(_serialization.Model):\n \"\"\"Find QnAs that are associated with the given list of metadata.\n\n :ivar metadata:\n :vartype metadata: list[JSON]\n :ivar logical_operation: Operation used to join metadata filters.\n :vartype logical_operation: str\n \"\"\"\n _attribute_map = {'metadata': {'key': 'metadata', 'type': '[object]'},\n 'logical_operation': {'key': 'logicalOperation', 'type': 'str'}}\n\n def __init__(self, *, metadata: Optional[List[JSON]]=None,\n logical_operation: Optional[str]=None, **kwargs: Any) ->None:\n \"\"\"\n :keyword metadata:\n :paramtype metadata: list[JSON]\n :keyword logical_operation: Operation used to join metadata filters.\n :paramtype logical_operation: str\n \"\"\"\n super().__init__(**kwargs)\n self.metadata = metadata\n self.logical_operation = logical_operation\n\n\nclass QueryFilters(_serialization.Model):\n \"\"\"filters over knowledge base.\n\n :ivar metadata_filter: Find QnAs that are associated with the given list of metadata.\n :vartype metadata_filter: ~azure.ai.language.questionanswering.models.MetadataFilter\n :ivar source_filter: Find QnAs that are associated with any of the given list of sources in\n knowledge base.\n :vartype source_filter: list[str]\n :ivar logical_operation: Logical operation used to join metadata filter with source filter.\n :vartype logical_operation: str\n \"\"\"\n _attribute_map = {'metadata_filter': {'key': 'metadataFilter', 'type':\n 'MetadataFilter'}, 'source_filter': {'key': 'sourceFilter', 'type':\n '[str]'}, 'logical_operation': {'key': 'logicalOperation', 'type':\n 'str'}}\n\n def __init__(self, *, metadata_filter: Optional[\n '_models.MetadataFilter']=None, source_filter: Optional[List[str]]=\n None, logical_operation: Optional[str]=None, **kwargs: Any) ->None:\n \"\"\"\n :keyword metadata_filter: Find QnAs that are associated with the given list of metadata.\n :paramtype metadata_filter: ~azure.ai.language.questionanswering.models.MetadataFilter\n :keyword source_filter: Find QnAs that are associated with any of the given list of sources in\n knowledge base.\n :paramtype source_filter: list[str]\n :keyword logical_operation: Logical operation used to join metadata filter with source filter.\n :paramtype logical_operation: str\n \"\"\"\n super().__init__(**kwargs)\n self.metadata_filter = metadata_filter\n self.source_filter = source_filter\n self.logical_operation = logical_operation\n\n\nclass ShortAnswerOptions(_serialization.Model):\n \"\"\"To configure Answer span prediction feature.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar enable: Enable or disable Answer Span prediction. Required. Default value is True.\n :vartype enable: bool\n :ivar confidence_threshold: Minimum threshold score required to include an answer span, value\n ranges from 0 to 1.\n :vartype confidence_threshold: float\n :ivar top: Number of Top answers to be considered for span prediction from 1 to 10.\n :vartype top: int\n \"\"\"\n _validation = {'enable': {'required': True, 'constant': True},\n 'confidence_threshold': {'maximum': 1, 'minimum': 0}, 'top': {\n 'maximum': 10, 'minimum': 1}}\n _attribute_map = {'enable': {'key': 'enable', 'type': 'bool'},\n 'confidence_threshold': {'key': 'confidenceScoreThreshold', 'type':\n 'float'}, 'top': {'key': 'topAnswersWithSpan', 'type': 'int'}}\n enable = True\n\n def __init__(self, *, confidence_threshold: Optional[float]=None, top:\n Optional[int]=None, **kwargs: Any) ->None:\n \"\"\"\n :keyword confidence_threshold: Minimum threshold score required to include an answer span,\n value ranges from 0 to 1.\n :paramtype confidence_threshold: float\n :keyword top: Number of Top answers to be considered for span prediction from 1 to 10.\n :paramtype top: int\n \"\"\"\n super().__init__(**kwargs)\n self.confidence_threshold = confidence_threshold\n self.top = top\n\n\nclass TextAnswer(_serialization.Model):\n \"\"\"Represents answer result.\n\n :ivar answer: Answer.\n :vartype answer: str\n :ivar confidence: answer confidence score, value ranges from 0 to 1.\n :vartype confidence: float\n :ivar id: record ID.\n :vartype id: str\n :ivar short_answer: Answer span object with respect to user's question.\n :vartype short_answer: ~azure.ai.language.questionanswering.models.AnswerSpan\n :ivar offset: The sentence offset from the start of the document.\n :vartype offset: int\n :ivar length: The length of the sentence.\n :vartype length: int\n \"\"\"\n _validation = {'confidence': {'maximum': 1, 'minimum': 0}}\n _attribute_map = {'answer': {'key': 'answer', 'type': 'str'},\n 'confidence': {'key': 'confidenceScore', 'type': 'float'}, 'id': {\n 'key': 'id', 'type': 'str'}, 'short_answer': {'key': 'answerSpan',\n 'type': 'AnswerSpan'}, 'offset': {'key': 'offset', 'type': 'int'},\n 'length': {'key': 'length', 'type': 'int'}}\n\n def __init__(self, *, answer: Optional[str]=None, confidence: Optional[\n float]=None, id: Optional[str]=None, short_answer: Optional[\n '_models.AnswerSpan']=None, offset: Optional[int]=None, length:\n Optional[int]=None, **kwargs: Any) ->None:\n \"\"\"\n :keyword answer: Answer.\n :paramtype answer: str\n :keyword confidence: answer confidence score, value ranges from 0 to 1.\n :paramtype confidence: float\n :keyword id: record ID.\n :paramtype id: str\n :keyword short_answer: Answer span object with respect to user's question.\n :paramtype short_answer: ~azure.ai.language.questionanswering.models.AnswerSpan\n :keyword offset: The sentence offset from the start of the document.\n :paramtype offset: int\n :keyword length: The length of the sentence.\n :paramtype length: int\n \"\"\"\n super().__init__(**kwargs)\n self.answer = answer\n self.confidence = confidence\n self.id = id\n self.short_answer = short_answer\n self.offset = offset\n self.length = length\n\n\nclass TextDocument(_serialization.Model):\n \"\"\"Represent input text record to be queried.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar id: Unique identifier for the text record. Required.\n :vartype id: str\n :ivar text: Text contents of the record. Required.\n :vartype text: str\n \"\"\"\n _validation = {'id': {'required': True}, 'text': {'required': True}}\n _attribute_map = {'id': {'key': 'id', 'type': 'str'}, 'text': {'key':\n 'text', 'type': 'str'}}\n\n def __init__(self, *, id: str, text: str, **kwargs: Any) ->None:\n \"\"\"\n :keyword id: Unique identifier for the text record. Required.\n :paramtype id: str\n :keyword text: Text contents of the record. Required.\n :paramtype text: str\n \"\"\"\n super().__init__(**kwargs)\n self.id = id\n self.text = text\n", "step-3": "<mask token>\n\n\nclass AnswersResult(_serialization.Model):\n <mask token>\n _attribute_map = {'answers': {'key': 'answers', 'type':\n '[KnowledgeBaseAnswer]'}}\n\n def __init__(self, *, answers: Optional[List[\n '_models.KnowledgeBaseAnswer']]=None, **kwargs: Any) ->None:\n \"\"\"\n :keyword answers: Represents Answer Result list.\n :paramtype answers: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswer]\n \"\"\"\n super().__init__(**kwargs)\n self.answers = answers\n\n\nclass Error(_serialization.Model):\n \"\"\"The error object.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar code: One of a server-defined set of error codes. Required. Known values are:\n \"InvalidRequest\", \"InvalidArgument\", \"Unauthorized\", \"Forbidden\", \"NotFound\",\n \"ProjectNotFound\", \"OperationNotFound\", \"AzureCognitiveSearchNotFound\",\n \"AzureCognitiveSearchIndexNotFound\", \"TooManyRequests\", \"AzureCognitiveSearchThrottling\",\n \"AzureCognitiveSearchIndexLimitReached\", \"InternalServerError\", and \"ServiceUnavailable\".\n :vartype code: str or ~azure.ai.language.questionanswering.models.ErrorCode\n :ivar message: A human-readable representation of the error. Required.\n :vartype message: str\n :ivar target: The target of the error.\n :vartype target: str\n :ivar details: An array of details about specific errors that led to this reported error.\n :vartype details: list[~azure.ai.language.questionanswering.models.Error]\n :ivar innererror: An object containing more specific information than the current object about\n the error.\n :vartype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel\n \"\"\"\n _validation = {'code': {'required': True}, 'message': {'required': True}}\n _attribute_map = {'code': {'key': 'code', 'type': 'str'}, 'message': {\n 'key': 'message', 'type': 'str'}, 'target': {'key': 'target',\n 'type': 'str'}, 'details': {'key': 'details', 'type': '[Error]'},\n 'innererror': {'key': 'innererror', 'type': 'InnerErrorModel'}}\n\n def __init__(self, *, code: Union[str, '_models.ErrorCode'], message:\n str, target: Optional[str]=None, details: Optional[List[\n '_models.Error']]=None, innererror: Optional[\n '_models.InnerErrorModel']=None, **kwargs: Any) ->None:\n \"\"\"\n :keyword code: One of a server-defined set of error codes. Required. Known values are:\n \"InvalidRequest\", \"InvalidArgument\", \"Unauthorized\", \"Forbidden\", \"NotFound\",\n \"ProjectNotFound\", \"OperationNotFound\", \"AzureCognitiveSearchNotFound\",\n \"AzureCognitiveSearchIndexNotFound\", \"TooManyRequests\", \"AzureCognitiveSearchThrottling\",\n \"AzureCognitiveSearchIndexLimitReached\", \"InternalServerError\", and \"ServiceUnavailable\".\n :paramtype code: str or ~azure.ai.language.questionanswering.models.ErrorCode\n :keyword message: A human-readable representation of the error. Required.\n :paramtype message: str\n :keyword target: The target of the error.\n :paramtype target: str\n :keyword details: An array of details about specific errors that led to this reported error.\n :paramtype details: list[~azure.ai.language.questionanswering.models.Error]\n :keyword innererror: An object containing more specific information than the current object\n about the error.\n :paramtype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel\n \"\"\"\n super().__init__(**kwargs)\n self.code = code\n self.message = message\n self.target = target\n self.details = details\n self.innererror = innererror\n\n\nclass ErrorResponse(_serialization.Model):\n \"\"\"Error response.\n\n :ivar error: The error object.\n :vartype error: ~azure.ai.language.questionanswering.models.Error\n \"\"\"\n _attribute_map = {'error': {'key': 'error', 'type': 'Error'}}\n\n def __init__(self, *, error: Optional['_models.Error']=None, **kwargs: Any\n ) ->None:\n \"\"\"\n :keyword error: The error object.\n :paramtype error: ~azure.ai.language.questionanswering.models.Error\n \"\"\"\n super().__init__(**kwargs)\n self.error = error\n\n\nclass InnerErrorModel(_serialization.Model):\n \"\"\"An object containing more specific information about the error. As per Microsoft One API\n guidelines -\n https://github.com/Microsoft/api-guidelines/blob/vNext/Guidelines.md#7102-error-condition-responses.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar code: One of a server-defined set of error codes. Required. Known values are:\n \"InvalidRequest\", \"InvalidParameterValue\", \"KnowledgeBaseNotFound\",\n \"AzureCognitiveSearchNotFound\", \"AzureCognitiveSearchThrottling\", and \"ExtractionFailure\".\n :vartype code: str or ~azure.ai.language.questionanswering.models.InnerErrorCode\n :ivar message: Error message. Required.\n :vartype message: str\n :ivar details: Error details.\n :vartype details: dict[str, str]\n :ivar target: Error target.\n :vartype target: str\n :ivar innererror: An object containing more specific information than the current object about\n the error.\n :vartype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel\n \"\"\"\n _validation = {'code': {'required': True}, 'message': {'required': True}}\n _attribute_map = {'code': {'key': 'code', 'type': 'str'}, 'message': {\n 'key': 'message', 'type': 'str'}, 'details': {'key': 'details',\n 'type': '{str}'}, 'target': {'key': 'target', 'type': 'str'},\n 'innererror': {'key': 'innererror', 'type': 'InnerErrorModel'}}\n\n def __init__(self, *, code: Union[str, '_models.InnerErrorCode'],\n message: str, details: Optional[Dict[str, str]]=None, target:\n Optional[str]=None, innererror: Optional['_models.InnerErrorModel']\n =None, **kwargs: Any) ->None:\n \"\"\"\n :keyword code: One of a server-defined set of error codes. Required. Known values are:\n \"InvalidRequest\", \"InvalidParameterValue\", \"KnowledgeBaseNotFound\",\n \"AzureCognitiveSearchNotFound\", \"AzureCognitiveSearchThrottling\", and \"ExtractionFailure\".\n :paramtype code: str or ~azure.ai.language.questionanswering.models.InnerErrorCode\n :keyword message: Error message. Required.\n :paramtype message: str\n :keyword details: Error details.\n :paramtype details: dict[str, str]\n :keyword target: Error target.\n :paramtype target: str\n :keyword innererror: An object containing more specific information than the current object\n about the error.\n :paramtype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel\n \"\"\"\n super().__init__(**kwargs)\n self.code = code\n self.message = message\n self.details = details\n self.target = target\n self.innererror = innererror\n\n\nclass KnowledgeBaseAnswer(_serialization.Model):\n \"\"\"Represents knowledge base answer.\n\n :ivar questions: List of questions associated with the answer.\n :vartype questions: list[str]\n :ivar answer: Answer text.\n :vartype answer: str\n :ivar confidence: Answer confidence score, value ranges from 0 to 1.\n :vartype confidence: float\n :ivar qna_id: ID of the QnA result.\n :vartype qna_id: int\n :ivar source: Source of QnA result.\n :vartype source: str\n :ivar metadata: Metadata associated with the answer, useful to categorize or filter question\n answers.\n :vartype metadata: dict[str, str]\n :ivar dialog: Dialog associated with Answer.\n :vartype dialog: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerDialog\n :ivar short_answer: Answer span object of QnA with respect to user's question.\n :vartype short_answer: ~azure.ai.language.questionanswering.models.AnswerSpan\n \"\"\"\n _validation = {'confidence': {'maximum': 1, 'minimum': 0}}\n _attribute_map = {'questions': {'key': 'questions', 'type': '[str]'},\n 'answer': {'key': 'answer', 'type': 'str'}, 'confidence': {'key':\n 'confidenceScore', 'type': 'float'}, 'qna_id': {'key': 'id', 'type':\n 'int'}, 'source': {'key': 'source', 'type': 'str'}, 'metadata': {\n 'key': 'metadata', 'type': '{str}'}, 'dialog': {'key': 'dialog',\n 'type': 'KnowledgeBaseAnswerDialog'}, 'short_answer': {'key':\n 'answerSpan', 'type': 'AnswerSpan'}}\n\n def __init__(self, *, questions: Optional[List[str]]=None, answer:\n Optional[str]=None, confidence: Optional[float]=None, qna_id:\n Optional[int]=None, source: Optional[str]=None, metadata: Optional[\n Dict[str, str]]=None, dialog: Optional[\n '_models.KnowledgeBaseAnswerDialog']=None, short_answer: Optional[\n '_models.AnswerSpan']=None, **kwargs: Any) ->None:\n \"\"\"\n :keyword questions: List of questions associated with the answer.\n :paramtype questions: list[str]\n :keyword answer: Answer text.\n :paramtype answer: str\n :keyword confidence: Answer confidence score, value ranges from 0 to 1.\n :paramtype confidence: float\n :keyword qna_id: ID of the QnA result.\n :paramtype qna_id: int\n :keyword source: Source of QnA result.\n :paramtype source: str\n :keyword metadata: Metadata associated with the answer, useful to categorize or filter question\n answers.\n :paramtype metadata: dict[str, str]\n :keyword dialog: Dialog associated with Answer.\n :paramtype dialog: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerDialog\n :keyword short_answer: Answer span object of QnA with respect to user's question.\n :paramtype short_answer: ~azure.ai.language.questionanswering.models.AnswerSpan\n \"\"\"\n super().__init__(**kwargs)\n self.questions = questions\n self.answer = answer\n self.confidence = confidence\n self.qna_id = qna_id\n self.source = source\n self.metadata = metadata\n self.dialog = dialog\n self.short_answer = short_answer\n\n\nclass KnowledgeBaseAnswerContext(_serialization.Model):\n \"\"\"Context object with previous QnA's information.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar previous_qna_id: Previous turn top answer result QnA ID. Required.\n :vartype previous_qna_id: int\n :ivar previous_question: Previous user query.\n :vartype previous_question: str\n \"\"\"\n _validation = {'previous_qna_id': {'required': True}}\n _attribute_map = {'previous_qna_id': {'key': 'previousQnaId', 'type':\n 'int'}, 'previous_question': {'key': 'previousUserQuery', 'type':\n 'str'}}\n\n def __init__(self, *, previous_qna_id: int, previous_question: Optional\n [str]=None, **kwargs: Any) ->None:\n \"\"\"\n :keyword previous_qna_id: Previous turn top answer result QnA ID. Required.\n :paramtype previous_qna_id: int\n :keyword previous_question: Previous user query.\n :paramtype previous_question: str\n \"\"\"\n super().__init__(**kwargs)\n self.previous_qna_id = previous_qna_id\n self.previous_question = previous_question\n\n\nclass KnowledgeBaseAnswerDialog(_serialization.Model):\n \"\"\"Dialog associated with Answer.\n\n :ivar is_context_only: To mark if a prompt is relevant only with a previous question or not. If\n true, do not include this QnA as search result for queries without context; otherwise, if\n false, ignores context and includes this QnA in search result.\n :vartype is_context_only: bool\n :ivar prompts: List of prompts associated with the answer.\n :vartype prompts: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerPrompt]\n \"\"\"\n _validation = {'prompts': {'max_items': 20, 'min_items': 0}}\n _attribute_map = {'is_context_only': {'key': 'isContextOnly', 'type':\n 'bool'}, 'prompts': {'key': 'prompts', 'type':\n '[KnowledgeBaseAnswerPrompt]'}}\n\n def __init__(self, *, is_context_only: Optional[bool]=None, prompts:\n Optional[List['_models.KnowledgeBaseAnswerPrompt']]=None, **kwargs: Any\n ) ->None:\n \"\"\"\n :keyword is_context_only: To mark if a prompt is relevant only with a previous question or not.\n If true, do not include this QnA as search result for queries without context; otherwise, if\n false, ignores context and includes this QnA in search result.\n :paramtype is_context_only: bool\n :keyword prompts: List of prompts associated with the answer.\n :paramtype prompts: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerPrompt]\n \"\"\"\n super().__init__(**kwargs)\n self.is_context_only = is_context_only\n self.prompts = prompts\n\n\nclass KnowledgeBaseAnswerPrompt(_serialization.Model):\n \"\"\"Prompt for an answer.\n\n :ivar display_order: Index of the prompt - used in ordering of the prompts.\n :vartype display_order: int\n :ivar qna_id: QnA ID corresponding to the prompt.\n :vartype qna_id: int\n :ivar display_text: Text displayed to represent a follow up question prompt.\n :vartype display_text: str\n \"\"\"\n _validation = {'display_text': {'max_length': 200}}\n _attribute_map = {'display_order': {'key': 'displayOrder', 'type':\n 'int'}, 'qna_id': {'key': 'qnaId', 'type': 'int'}, 'display_text':\n {'key': 'displayText', 'type': 'str'}}\n\n def __init__(self, *, display_order: Optional[int]=None, qna_id:\n Optional[int]=None, display_text: Optional[str]=None, **kwargs: Any\n ) ->None:\n \"\"\"\n :keyword display_order: Index of the prompt - used in ordering of the prompts.\n :paramtype display_order: int\n :keyword qna_id: QnA ID corresponding to the prompt.\n :paramtype qna_id: int\n :keyword display_text: Text displayed to represent a follow up question prompt.\n :paramtype display_text: str\n \"\"\"\n super().__init__(**kwargs)\n self.display_order = display_order\n self.qna_id = qna_id\n self.display_text = display_text\n\n\nclass MetadataFilter(_serialization.Model):\n \"\"\"Find QnAs that are associated with the given list of metadata.\n\n :ivar metadata:\n :vartype metadata: list[JSON]\n :ivar logical_operation: Operation used to join metadata filters.\n :vartype logical_operation: str\n \"\"\"\n _attribute_map = {'metadata': {'key': 'metadata', 'type': '[object]'},\n 'logical_operation': {'key': 'logicalOperation', 'type': 'str'}}\n\n def __init__(self, *, metadata: Optional[List[JSON]]=None,\n logical_operation: Optional[str]=None, **kwargs: Any) ->None:\n \"\"\"\n :keyword metadata:\n :paramtype metadata: list[JSON]\n :keyword logical_operation: Operation used to join metadata filters.\n :paramtype logical_operation: str\n \"\"\"\n super().__init__(**kwargs)\n self.metadata = metadata\n self.logical_operation = logical_operation\n\n\nclass QueryFilters(_serialization.Model):\n \"\"\"filters over knowledge base.\n\n :ivar metadata_filter: Find QnAs that are associated with the given list of metadata.\n :vartype metadata_filter: ~azure.ai.language.questionanswering.models.MetadataFilter\n :ivar source_filter: Find QnAs that are associated with any of the given list of sources in\n knowledge base.\n :vartype source_filter: list[str]\n :ivar logical_operation: Logical operation used to join metadata filter with source filter.\n :vartype logical_operation: str\n \"\"\"\n _attribute_map = {'metadata_filter': {'key': 'metadataFilter', 'type':\n 'MetadataFilter'}, 'source_filter': {'key': 'sourceFilter', 'type':\n '[str]'}, 'logical_operation': {'key': 'logicalOperation', 'type':\n 'str'}}\n\n def __init__(self, *, metadata_filter: Optional[\n '_models.MetadataFilter']=None, source_filter: Optional[List[str]]=\n None, logical_operation: Optional[str]=None, **kwargs: Any) ->None:\n \"\"\"\n :keyword metadata_filter: Find QnAs that are associated with the given list of metadata.\n :paramtype metadata_filter: ~azure.ai.language.questionanswering.models.MetadataFilter\n :keyword source_filter: Find QnAs that are associated with any of the given list of sources in\n knowledge base.\n :paramtype source_filter: list[str]\n :keyword logical_operation: Logical operation used to join metadata filter with source filter.\n :paramtype logical_operation: str\n \"\"\"\n super().__init__(**kwargs)\n self.metadata_filter = metadata_filter\n self.source_filter = source_filter\n self.logical_operation = logical_operation\n\n\nclass ShortAnswerOptions(_serialization.Model):\n \"\"\"To configure Answer span prediction feature.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar enable: Enable or disable Answer Span prediction. Required. Default value is True.\n :vartype enable: bool\n :ivar confidence_threshold: Minimum threshold score required to include an answer span, value\n ranges from 0 to 1.\n :vartype confidence_threshold: float\n :ivar top: Number of Top answers to be considered for span prediction from 1 to 10.\n :vartype top: int\n \"\"\"\n _validation = {'enable': {'required': True, 'constant': True},\n 'confidence_threshold': {'maximum': 1, 'minimum': 0}, 'top': {\n 'maximum': 10, 'minimum': 1}}\n _attribute_map = {'enable': {'key': 'enable', 'type': 'bool'},\n 'confidence_threshold': {'key': 'confidenceScoreThreshold', 'type':\n 'float'}, 'top': {'key': 'topAnswersWithSpan', 'type': 'int'}}\n enable = True\n\n def __init__(self, *, confidence_threshold: Optional[float]=None, top:\n Optional[int]=None, **kwargs: Any) ->None:\n \"\"\"\n :keyword confidence_threshold: Minimum threshold score required to include an answer span,\n value ranges from 0 to 1.\n :paramtype confidence_threshold: float\n :keyword top: Number of Top answers to be considered for span prediction from 1 to 10.\n :paramtype top: int\n \"\"\"\n super().__init__(**kwargs)\n self.confidence_threshold = confidence_threshold\n self.top = top\n\n\nclass TextAnswer(_serialization.Model):\n \"\"\"Represents answer result.\n\n :ivar answer: Answer.\n :vartype answer: str\n :ivar confidence: answer confidence score, value ranges from 0 to 1.\n :vartype confidence: float\n :ivar id: record ID.\n :vartype id: str\n :ivar short_answer: Answer span object with respect to user's question.\n :vartype short_answer: ~azure.ai.language.questionanswering.models.AnswerSpan\n :ivar offset: The sentence offset from the start of the document.\n :vartype offset: int\n :ivar length: The length of the sentence.\n :vartype length: int\n \"\"\"\n _validation = {'confidence': {'maximum': 1, 'minimum': 0}}\n _attribute_map = {'answer': {'key': 'answer', 'type': 'str'},\n 'confidence': {'key': 'confidenceScore', 'type': 'float'}, 'id': {\n 'key': 'id', 'type': 'str'}, 'short_answer': {'key': 'answerSpan',\n 'type': 'AnswerSpan'}, 'offset': {'key': 'offset', 'type': 'int'},\n 'length': {'key': 'length', 'type': 'int'}}\n\n def __init__(self, *, answer: Optional[str]=None, confidence: Optional[\n float]=None, id: Optional[str]=None, short_answer: Optional[\n '_models.AnswerSpan']=None, offset: Optional[int]=None, length:\n Optional[int]=None, **kwargs: Any) ->None:\n \"\"\"\n :keyword answer: Answer.\n :paramtype answer: str\n :keyword confidence: answer confidence score, value ranges from 0 to 1.\n :paramtype confidence: float\n :keyword id: record ID.\n :paramtype id: str\n :keyword short_answer: Answer span object with respect to user's question.\n :paramtype short_answer: ~azure.ai.language.questionanswering.models.AnswerSpan\n :keyword offset: The sentence offset from the start of the document.\n :paramtype offset: int\n :keyword length: The length of the sentence.\n :paramtype length: int\n \"\"\"\n super().__init__(**kwargs)\n self.answer = answer\n self.confidence = confidence\n self.id = id\n self.short_answer = short_answer\n self.offset = offset\n self.length = length\n\n\nclass TextDocument(_serialization.Model):\n \"\"\"Represent input text record to be queried.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar id: Unique identifier for the text record. Required.\n :vartype id: str\n :ivar text: Text contents of the record. Required.\n :vartype text: str\n \"\"\"\n _validation = {'id': {'required': True}, 'text': {'required': True}}\n _attribute_map = {'id': {'key': 'id', 'type': 'str'}, 'text': {'key':\n 'text', 'type': 'str'}}\n\n def __init__(self, *, id: str, text: str, **kwargs: Any) ->None:\n \"\"\"\n :keyword id: Unique identifier for the text record. Required.\n :paramtype id: str\n :keyword text: Text contents of the record. Required.\n :paramtype text: str\n \"\"\"\n super().__init__(**kwargs)\n self.id = id\n self.text = text\n", "step-4": "<mask token>\n\n\nclass AnswerSpan(_serialization.Model):\n \"\"\"Answer span object of QnA.\n\n :ivar text: Predicted text of answer span.\n :vartype text: str\n :ivar confidence: Predicted score of answer span, value ranges from 0 to 1.\n :vartype confidence: float\n :ivar offset: The answer span offset from the start of answer.\n :vartype offset: int\n :ivar length: The length of the answer span.\n :vartype length: int\n \"\"\"\n _validation = {'confidence': {'maximum': 1, 'minimum': 0}}\n _attribute_map = {'text': {'key': 'text', 'type': 'str'}, 'confidence':\n {'key': 'confidenceScore', 'type': 'float'}, 'offset': {'key':\n 'offset', 'type': 'int'}, 'length': {'key': 'length', 'type': 'int'}}\n\n def __init__(self, *, text: Optional[str]=None, confidence: Optional[\n float]=None, offset: Optional[int]=None, length: Optional[int]=None,\n **kwargs: Any) ->None:\n \"\"\"\n :keyword text: Predicted text of answer span.\n :paramtype text: str\n :keyword confidence: Predicted score of answer span, value ranges from 0 to 1.\n :paramtype confidence: float\n :keyword offset: The answer span offset from the start of answer.\n :paramtype offset: int\n :keyword length: The length of the answer span.\n :paramtype length: int\n \"\"\"\n super().__init__(**kwargs)\n self.text = text\n self.confidence = confidence\n self.offset = offset\n self.length = length\n\n\nclass AnswersResult(_serialization.Model):\n \"\"\"Represents List of Question Answers.\n\n :ivar answers: Represents Answer Result list.\n :vartype answers: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswer]\n \"\"\"\n _attribute_map = {'answers': {'key': 'answers', 'type':\n '[KnowledgeBaseAnswer]'}}\n\n def __init__(self, *, answers: Optional[List[\n '_models.KnowledgeBaseAnswer']]=None, **kwargs: Any) ->None:\n \"\"\"\n :keyword answers: Represents Answer Result list.\n :paramtype answers: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswer]\n \"\"\"\n super().__init__(**kwargs)\n self.answers = answers\n\n\nclass Error(_serialization.Model):\n \"\"\"The error object.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar code: One of a server-defined set of error codes. Required. Known values are:\n \"InvalidRequest\", \"InvalidArgument\", \"Unauthorized\", \"Forbidden\", \"NotFound\",\n \"ProjectNotFound\", \"OperationNotFound\", \"AzureCognitiveSearchNotFound\",\n \"AzureCognitiveSearchIndexNotFound\", \"TooManyRequests\", \"AzureCognitiveSearchThrottling\",\n \"AzureCognitiveSearchIndexLimitReached\", \"InternalServerError\", and \"ServiceUnavailable\".\n :vartype code: str or ~azure.ai.language.questionanswering.models.ErrorCode\n :ivar message: A human-readable representation of the error. Required.\n :vartype message: str\n :ivar target: The target of the error.\n :vartype target: str\n :ivar details: An array of details about specific errors that led to this reported error.\n :vartype details: list[~azure.ai.language.questionanswering.models.Error]\n :ivar innererror: An object containing more specific information than the current object about\n the error.\n :vartype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel\n \"\"\"\n _validation = {'code': {'required': True}, 'message': {'required': True}}\n _attribute_map = {'code': {'key': 'code', 'type': 'str'}, 'message': {\n 'key': 'message', 'type': 'str'}, 'target': {'key': 'target',\n 'type': 'str'}, 'details': {'key': 'details', 'type': '[Error]'},\n 'innererror': {'key': 'innererror', 'type': 'InnerErrorModel'}}\n\n def __init__(self, *, code: Union[str, '_models.ErrorCode'], message:\n str, target: Optional[str]=None, details: Optional[List[\n '_models.Error']]=None, innererror: Optional[\n '_models.InnerErrorModel']=None, **kwargs: Any) ->None:\n \"\"\"\n :keyword code: One of a server-defined set of error codes. Required. Known values are:\n \"InvalidRequest\", \"InvalidArgument\", \"Unauthorized\", \"Forbidden\", \"NotFound\",\n \"ProjectNotFound\", \"OperationNotFound\", \"AzureCognitiveSearchNotFound\",\n \"AzureCognitiveSearchIndexNotFound\", \"TooManyRequests\", \"AzureCognitiveSearchThrottling\",\n \"AzureCognitiveSearchIndexLimitReached\", \"InternalServerError\", and \"ServiceUnavailable\".\n :paramtype code: str or ~azure.ai.language.questionanswering.models.ErrorCode\n :keyword message: A human-readable representation of the error. Required.\n :paramtype message: str\n :keyword target: The target of the error.\n :paramtype target: str\n :keyword details: An array of details about specific errors that led to this reported error.\n :paramtype details: list[~azure.ai.language.questionanswering.models.Error]\n :keyword innererror: An object containing more specific information than the current object\n about the error.\n :paramtype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel\n \"\"\"\n super().__init__(**kwargs)\n self.code = code\n self.message = message\n self.target = target\n self.details = details\n self.innererror = innererror\n\n\nclass ErrorResponse(_serialization.Model):\n \"\"\"Error response.\n\n :ivar error: The error object.\n :vartype error: ~azure.ai.language.questionanswering.models.Error\n \"\"\"\n _attribute_map = {'error': {'key': 'error', 'type': 'Error'}}\n\n def __init__(self, *, error: Optional['_models.Error']=None, **kwargs: Any\n ) ->None:\n \"\"\"\n :keyword error: The error object.\n :paramtype error: ~azure.ai.language.questionanswering.models.Error\n \"\"\"\n super().__init__(**kwargs)\n self.error = error\n\n\nclass InnerErrorModel(_serialization.Model):\n \"\"\"An object containing more specific information about the error. As per Microsoft One API\n guidelines -\n https://github.com/Microsoft/api-guidelines/blob/vNext/Guidelines.md#7102-error-condition-responses.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar code: One of a server-defined set of error codes. Required. Known values are:\n \"InvalidRequest\", \"InvalidParameterValue\", \"KnowledgeBaseNotFound\",\n \"AzureCognitiveSearchNotFound\", \"AzureCognitiveSearchThrottling\", and \"ExtractionFailure\".\n :vartype code: str or ~azure.ai.language.questionanswering.models.InnerErrorCode\n :ivar message: Error message. Required.\n :vartype message: str\n :ivar details: Error details.\n :vartype details: dict[str, str]\n :ivar target: Error target.\n :vartype target: str\n :ivar innererror: An object containing more specific information than the current object about\n the error.\n :vartype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel\n \"\"\"\n _validation = {'code': {'required': True}, 'message': {'required': True}}\n _attribute_map = {'code': {'key': 'code', 'type': 'str'}, 'message': {\n 'key': 'message', 'type': 'str'}, 'details': {'key': 'details',\n 'type': '{str}'}, 'target': {'key': 'target', 'type': 'str'},\n 'innererror': {'key': 'innererror', 'type': 'InnerErrorModel'}}\n\n def __init__(self, *, code: Union[str, '_models.InnerErrorCode'],\n message: str, details: Optional[Dict[str, str]]=None, target:\n Optional[str]=None, innererror: Optional['_models.InnerErrorModel']\n =None, **kwargs: Any) ->None:\n \"\"\"\n :keyword code: One of a server-defined set of error codes. Required. Known values are:\n \"InvalidRequest\", \"InvalidParameterValue\", \"KnowledgeBaseNotFound\",\n \"AzureCognitiveSearchNotFound\", \"AzureCognitiveSearchThrottling\", and \"ExtractionFailure\".\n :paramtype code: str or ~azure.ai.language.questionanswering.models.InnerErrorCode\n :keyword message: Error message. Required.\n :paramtype message: str\n :keyword details: Error details.\n :paramtype details: dict[str, str]\n :keyword target: Error target.\n :paramtype target: str\n :keyword innererror: An object containing more specific information than the current object\n about the error.\n :paramtype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel\n \"\"\"\n super().__init__(**kwargs)\n self.code = code\n self.message = message\n self.details = details\n self.target = target\n self.innererror = innererror\n\n\nclass KnowledgeBaseAnswer(_serialization.Model):\n \"\"\"Represents knowledge base answer.\n\n :ivar questions: List of questions associated with the answer.\n :vartype questions: list[str]\n :ivar answer: Answer text.\n :vartype answer: str\n :ivar confidence: Answer confidence score, value ranges from 0 to 1.\n :vartype confidence: float\n :ivar qna_id: ID of the QnA result.\n :vartype qna_id: int\n :ivar source: Source of QnA result.\n :vartype source: str\n :ivar metadata: Metadata associated with the answer, useful to categorize or filter question\n answers.\n :vartype metadata: dict[str, str]\n :ivar dialog: Dialog associated with Answer.\n :vartype dialog: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerDialog\n :ivar short_answer: Answer span object of QnA with respect to user's question.\n :vartype short_answer: ~azure.ai.language.questionanswering.models.AnswerSpan\n \"\"\"\n _validation = {'confidence': {'maximum': 1, 'minimum': 0}}\n _attribute_map = {'questions': {'key': 'questions', 'type': '[str]'},\n 'answer': {'key': 'answer', 'type': 'str'}, 'confidence': {'key':\n 'confidenceScore', 'type': 'float'}, 'qna_id': {'key': 'id', 'type':\n 'int'}, 'source': {'key': 'source', 'type': 'str'}, 'metadata': {\n 'key': 'metadata', 'type': '{str}'}, 'dialog': {'key': 'dialog',\n 'type': 'KnowledgeBaseAnswerDialog'}, 'short_answer': {'key':\n 'answerSpan', 'type': 'AnswerSpan'}}\n\n def __init__(self, *, questions: Optional[List[str]]=None, answer:\n Optional[str]=None, confidence: Optional[float]=None, qna_id:\n Optional[int]=None, source: Optional[str]=None, metadata: Optional[\n Dict[str, str]]=None, dialog: Optional[\n '_models.KnowledgeBaseAnswerDialog']=None, short_answer: Optional[\n '_models.AnswerSpan']=None, **kwargs: Any) ->None:\n \"\"\"\n :keyword questions: List of questions associated with the answer.\n :paramtype questions: list[str]\n :keyword answer: Answer text.\n :paramtype answer: str\n :keyword confidence: Answer confidence score, value ranges from 0 to 1.\n :paramtype confidence: float\n :keyword qna_id: ID of the QnA result.\n :paramtype qna_id: int\n :keyword source: Source of QnA result.\n :paramtype source: str\n :keyword metadata: Metadata associated with the answer, useful to categorize or filter question\n answers.\n :paramtype metadata: dict[str, str]\n :keyword dialog: Dialog associated with Answer.\n :paramtype dialog: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerDialog\n :keyword short_answer: Answer span object of QnA with respect to user's question.\n :paramtype short_answer: ~azure.ai.language.questionanswering.models.AnswerSpan\n \"\"\"\n super().__init__(**kwargs)\n self.questions = questions\n self.answer = answer\n self.confidence = confidence\n self.qna_id = qna_id\n self.source = source\n self.metadata = metadata\n self.dialog = dialog\n self.short_answer = short_answer\n\n\nclass KnowledgeBaseAnswerContext(_serialization.Model):\n \"\"\"Context object with previous QnA's information.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar previous_qna_id: Previous turn top answer result QnA ID. Required.\n :vartype previous_qna_id: int\n :ivar previous_question: Previous user query.\n :vartype previous_question: str\n \"\"\"\n _validation = {'previous_qna_id': {'required': True}}\n _attribute_map = {'previous_qna_id': {'key': 'previousQnaId', 'type':\n 'int'}, 'previous_question': {'key': 'previousUserQuery', 'type':\n 'str'}}\n\n def __init__(self, *, previous_qna_id: int, previous_question: Optional\n [str]=None, **kwargs: Any) ->None:\n \"\"\"\n :keyword previous_qna_id: Previous turn top answer result QnA ID. Required.\n :paramtype previous_qna_id: int\n :keyword previous_question: Previous user query.\n :paramtype previous_question: str\n \"\"\"\n super().__init__(**kwargs)\n self.previous_qna_id = previous_qna_id\n self.previous_question = previous_question\n\n\nclass KnowledgeBaseAnswerDialog(_serialization.Model):\n \"\"\"Dialog associated with Answer.\n\n :ivar is_context_only: To mark if a prompt is relevant only with a previous question or not. If\n true, do not include this QnA as search result for queries without context; otherwise, if\n false, ignores context and includes this QnA in search result.\n :vartype is_context_only: bool\n :ivar prompts: List of prompts associated with the answer.\n :vartype prompts: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerPrompt]\n \"\"\"\n _validation = {'prompts': {'max_items': 20, 'min_items': 0}}\n _attribute_map = {'is_context_only': {'key': 'isContextOnly', 'type':\n 'bool'}, 'prompts': {'key': 'prompts', 'type':\n '[KnowledgeBaseAnswerPrompt]'}}\n\n def __init__(self, *, is_context_only: Optional[bool]=None, prompts:\n Optional[List['_models.KnowledgeBaseAnswerPrompt']]=None, **kwargs: Any\n ) ->None:\n \"\"\"\n :keyword is_context_only: To mark if a prompt is relevant only with a previous question or not.\n If true, do not include this QnA as search result for queries without context; otherwise, if\n false, ignores context and includes this QnA in search result.\n :paramtype is_context_only: bool\n :keyword prompts: List of prompts associated with the answer.\n :paramtype prompts: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerPrompt]\n \"\"\"\n super().__init__(**kwargs)\n self.is_context_only = is_context_only\n self.prompts = prompts\n\n\nclass KnowledgeBaseAnswerPrompt(_serialization.Model):\n \"\"\"Prompt for an answer.\n\n :ivar display_order: Index of the prompt - used in ordering of the prompts.\n :vartype display_order: int\n :ivar qna_id: QnA ID corresponding to the prompt.\n :vartype qna_id: int\n :ivar display_text: Text displayed to represent a follow up question prompt.\n :vartype display_text: str\n \"\"\"\n _validation = {'display_text': {'max_length': 200}}\n _attribute_map = {'display_order': {'key': 'displayOrder', 'type':\n 'int'}, 'qna_id': {'key': 'qnaId', 'type': 'int'}, 'display_text':\n {'key': 'displayText', 'type': 'str'}}\n\n def __init__(self, *, display_order: Optional[int]=None, qna_id:\n Optional[int]=None, display_text: Optional[str]=None, **kwargs: Any\n ) ->None:\n \"\"\"\n :keyword display_order: Index of the prompt - used in ordering of the prompts.\n :paramtype display_order: int\n :keyword qna_id: QnA ID corresponding to the prompt.\n :paramtype qna_id: int\n :keyword display_text: Text displayed to represent a follow up question prompt.\n :paramtype display_text: str\n \"\"\"\n super().__init__(**kwargs)\n self.display_order = display_order\n self.qna_id = qna_id\n self.display_text = display_text\n\n\nclass MetadataFilter(_serialization.Model):\n \"\"\"Find QnAs that are associated with the given list of metadata.\n\n :ivar metadata:\n :vartype metadata: list[JSON]\n :ivar logical_operation: Operation used to join metadata filters.\n :vartype logical_operation: str\n \"\"\"\n _attribute_map = {'metadata': {'key': 'metadata', 'type': '[object]'},\n 'logical_operation': {'key': 'logicalOperation', 'type': 'str'}}\n\n def __init__(self, *, metadata: Optional[List[JSON]]=None,\n logical_operation: Optional[str]=None, **kwargs: Any) ->None:\n \"\"\"\n :keyword metadata:\n :paramtype metadata: list[JSON]\n :keyword logical_operation: Operation used to join metadata filters.\n :paramtype logical_operation: str\n \"\"\"\n super().__init__(**kwargs)\n self.metadata = metadata\n self.logical_operation = logical_operation\n\n\nclass QueryFilters(_serialization.Model):\n \"\"\"filters over knowledge base.\n\n :ivar metadata_filter: Find QnAs that are associated with the given list of metadata.\n :vartype metadata_filter: ~azure.ai.language.questionanswering.models.MetadataFilter\n :ivar source_filter: Find QnAs that are associated with any of the given list of sources in\n knowledge base.\n :vartype source_filter: list[str]\n :ivar logical_operation: Logical operation used to join metadata filter with source filter.\n :vartype logical_operation: str\n \"\"\"\n _attribute_map = {'metadata_filter': {'key': 'metadataFilter', 'type':\n 'MetadataFilter'}, 'source_filter': {'key': 'sourceFilter', 'type':\n '[str]'}, 'logical_operation': {'key': 'logicalOperation', 'type':\n 'str'}}\n\n def __init__(self, *, metadata_filter: Optional[\n '_models.MetadataFilter']=None, source_filter: Optional[List[str]]=\n None, logical_operation: Optional[str]=None, **kwargs: Any) ->None:\n \"\"\"\n :keyword metadata_filter: Find QnAs that are associated with the given list of metadata.\n :paramtype metadata_filter: ~azure.ai.language.questionanswering.models.MetadataFilter\n :keyword source_filter: Find QnAs that are associated with any of the given list of sources in\n knowledge base.\n :paramtype source_filter: list[str]\n :keyword logical_operation: Logical operation used to join metadata filter with source filter.\n :paramtype logical_operation: str\n \"\"\"\n super().__init__(**kwargs)\n self.metadata_filter = metadata_filter\n self.source_filter = source_filter\n self.logical_operation = logical_operation\n\n\nclass ShortAnswerOptions(_serialization.Model):\n \"\"\"To configure Answer span prediction feature.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar enable: Enable or disable Answer Span prediction. Required. Default value is True.\n :vartype enable: bool\n :ivar confidence_threshold: Minimum threshold score required to include an answer span, value\n ranges from 0 to 1.\n :vartype confidence_threshold: float\n :ivar top: Number of Top answers to be considered for span prediction from 1 to 10.\n :vartype top: int\n \"\"\"\n _validation = {'enable': {'required': True, 'constant': True},\n 'confidence_threshold': {'maximum': 1, 'minimum': 0}, 'top': {\n 'maximum': 10, 'minimum': 1}}\n _attribute_map = {'enable': {'key': 'enable', 'type': 'bool'},\n 'confidence_threshold': {'key': 'confidenceScoreThreshold', 'type':\n 'float'}, 'top': {'key': 'topAnswersWithSpan', 'type': 'int'}}\n enable = True\n\n def __init__(self, *, confidence_threshold: Optional[float]=None, top:\n Optional[int]=None, **kwargs: Any) ->None:\n \"\"\"\n :keyword confidence_threshold: Minimum threshold score required to include an answer span,\n value ranges from 0 to 1.\n :paramtype confidence_threshold: float\n :keyword top: Number of Top answers to be considered for span prediction from 1 to 10.\n :paramtype top: int\n \"\"\"\n super().__init__(**kwargs)\n self.confidence_threshold = confidence_threshold\n self.top = top\n\n\nclass TextAnswer(_serialization.Model):\n \"\"\"Represents answer result.\n\n :ivar answer: Answer.\n :vartype answer: str\n :ivar confidence: answer confidence score, value ranges from 0 to 1.\n :vartype confidence: float\n :ivar id: record ID.\n :vartype id: str\n :ivar short_answer: Answer span object with respect to user's question.\n :vartype short_answer: ~azure.ai.language.questionanswering.models.AnswerSpan\n :ivar offset: The sentence offset from the start of the document.\n :vartype offset: int\n :ivar length: The length of the sentence.\n :vartype length: int\n \"\"\"\n _validation = {'confidence': {'maximum': 1, 'minimum': 0}}\n _attribute_map = {'answer': {'key': 'answer', 'type': 'str'},\n 'confidence': {'key': 'confidenceScore', 'type': 'float'}, 'id': {\n 'key': 'id', 'type': 'str'}, 'short_answer': {'key': 'answerSpan',\n 'type': 'AnswerSpan'}, 'offset': {'key': 'offset', 'type': 'int'},\n 'length': {'key': 'length', 'type': 'int'}}\n\n def __init__(self, *, answer: Optional[str]=None, confidence: Optional[\n float]=None, id: Optional[str]=None, short_answer: Optional[\n '_models.AnswerSpan']=None, offset: Optional[int]=None, length:\n Optional[int]=None, **kwargs: Any) ->None:\n \"\"\"\n :keyword answer: Answer.\n :paramtype answer: str\n :keyword confidence: answer confidence score, value ranges from 0 to 1.\n :paramtype confidence: float\n :keyword id: record ID.\n :paramtype id: str\n :keyword short_answer: Answer span object with respect to user's question.\n :paramtype short_answer: ~azure.ai.language.questionanswering.models.AnswerSpan\n :keyword offset: The sentence offset from the start of the document.\n :paramtype offset: int\n :keyword length: The length of the sentence.\n :paramtype length: int\n \"\"\"\n super().__init__(**kwargs)\n self.answer = answer\n self.confidence = confidence\n self.id = id\n self.short_answer = short_answer\n self.offset = offset\n self.length = length\n\n\nclass TextDocument(_serialization.Model):\n \"\"\"Represent input text record to be queried.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar id: Unique identifier for the text record. Required.\n :vartype id: str\n :ivar text: Text contents of the record. Required.\n :vartype text: str\n \"\"\"\n _validation = {'id': {'required': True}, 'text': {'required': True}}\n _attribute_map = {'id': {'key': 'id', 'type': 'str'}, 'text': {'key':\n 'text', 'type': 'str'}}\n\n def __init__(self, *, id: str, text: str, **kwargs: Any) ->None:\n \"\"\"\n :keyword id: Unique identifier for the text record. Required.\n :paramtype id: str\n :keyword text: Text contents of the record. Required.\n :paramtype text: str\n \"\"\"\n super().__init__(**kwargs)\n self.id = id\n self.text = text\n", "step-5": "# coding=utf-8\n# pylint: disable=too-many-lines\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is regenerated.\n# --------------------------------------------------------------------------\n\nimport sys\nfrom typing import Any, Dict, List, Optional, TYPE_CHECKING, Union\n\nfrom .. import _serialization\n\nif sys.version_info >= (3, 9):\n from collections.abc import MutableMapping\nelse:\n from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports\n\nif TYPE_CHECKING:\n # pylint: disable=unused-import,ungrouped-imports\n from .. import models as _models\nJSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object\n\n\nclass AnswersFromTextOptions(_serialization.Model):\n \"\"\"The question and text record parameters to answer.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar question: User question to query against the given text records. Required.\n :vartype question: str\n :ivar text_documents: Text records to be searched for given question. Required.\n :vartype text_documents: list[~azure.ai.language.questionanswering.models.TextDocument]\n :ivar language: Language of the text records. This is BCP-47 representation of a language. For\n example, use \"en\" for English; \"es\" for Spanish etc. If not set, use \"en\" for English as\n default.\n :vartype language: str\n \"\"\"\n\n _validation = {\n \"question\": {\"required\": True},\n \"text_documents\": {\"required\": True},\n }\n\n _attribute_map = {\n \"question\": {\"key\": \"question\", \"type\": \"str\"},\n \"text_documents\": {\"key\": \"records\", \"type\": \"[TextDocument]\"},\n \"language\": {\"key\": \"language\", \"type\": \"str\"},\n }\n\n def __init__(\n self,\n *,\n question: str,\n text_documents: List[\"_models.TextDocument\"],\n language: Optional[str] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword question: User question to query against the given text records. Required.\n :paramtype question: str\n :keyword text_documents: Text records to be searched for given question. Required.\n :paramtype text_documents: list[~azure.ai.language.questionanswering.models.TextDocument]\n :keyword language: Language of the text records. This is BCP-47 representation of a language.\n For example, use \"en\" for English; \"es\" for Spanish etc. If not set, use \"en\" for English as\n default.\n :paramtype language: str\n \"\"\"\n super().__init__(**kwargs)\n self.question = question\n self.text_documents = text_documents\n self.language = language\n\n\nclass AnswersFromTextResult(_serialization.Model):\n \"\"\"Represents the answer results.\n\n :ivar answers: Represents the answer results.\n :vartype answers: list[~azure.ai.language.questionanswering.models.TextAnswer]\n \"\"\"\n\n _attribute_map = {\n \"answers\": {\"key\": \"answers\", \"type\": \"[TextAnswer]\"},\n }\n\n def __init__(self, *, answers: Optional[List[\"_models.TextAnswer\"]] = None, **kwargs: Any) -> None:\n \"\"\"\n :keyword answers: Represents the answer results.\n :paramtype answers: list[~azure.ai.language.questionanswering.models.TextAnswer]\n \"\"\"\n super().__init__(**kwargs)\n self.answers = answers\n\n\nclass AnswersOptions(_serialization.Model):\n \"\"\"Parameters to query a knowledge base.\n\n :ivar qna_id: Exact QnA ID to fetch from the knowledge base, this field takes priority over\n question.\n :vartype qna_id: int\n :ivar question: User question to query against the knowledge base.\n :vartype question: str\n :ivar top: Max number of answers to be returned for the question.\n :vartype top: int\n :ivar user_id: Unique identifier for the user.\n :vartype user_id: str\n :ivar confidence_threshold: Minimum threshold score for answers, value ranges from 0 to 1.\n :vartype confidence_threshold: float\n :ivar answer_context: Context object with previous QnA's information.\n :vartype answer_context: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerContext\n :ivar ranker_kind: Type of ranker to be used.\n :vartype ranker_kind: str\n :ivar filters: Filter QnAs based on given metadata list and knowledge base sources.\n :vartype filters: ~azure.ai.language.questionanswering.models.QueryFilters\n :ivar short_answer_options: To configure Answer span prediction feature.\n :vartype short_answer_options: ~azure.ai.language.questionanswering.models.ShortAnswerOptions\n :ivar include_unstructured_sources: (Optional) Flag to enable Query over Unstructured Sources.\n :vartype include_unstructured_sources: bool\n \"\"\"\n\n _validation = {\n \"confidence_threshold\": {\"maximum\": 1, \"minimum\": 0},\n }\n\n _attribute_map = {\n \"qna_id\": {\"key\": \"qnaId\", \"type\": \"int\"},\n \"question\": {\"key\": \"question\", \"type\": \"str\"},\n \"top\": {\"key\": \"top\", \"type\": \"int\"},\n \"user_id\": {\"key\": \"userId\", \"type\": \"str\"},\n \"confidence_threshold\": {\"key\": \"confidenceScoreThreshold\", \"type\": \"float\"},\n \"answer_context\": {\"key\": \"context\", \"type\": \"KnowledgeBaseAnswerContext\"},\n \"ranker_kind\": {\"key\": \"rankerType\", \"type\": \"str\"},\n \"filters\": {\"key\": \"filters\", \"type\": \"QueryFilters\"},\n \"short_answer_options\": {\"key\": \"answerSpanRequest\", \"type\": \"ShortAnswerOptions\"},\n \"include_unstructured_sources\": {\"key\": \"includeUnstructuredSources\", \"type\": \"bool\"},\n }\n\n def __init__(\n self,\n *,\n qna_id: Optional[int] = None,\n question: Optional[str] = None,\n top: Optional[int] = None,\n user_id: Optional[str] = None,\n confidence_threshold: Optional[float] = None,\n answer_context: Optional[\"_models.KnowledgeBaseAnswerContext\"] = None,\n ranker_kind: Optional[str] = None,\n filters: Optional[\"_models.QueryFilters\"] = None,\n short_answer_options: Optional[\"_models.ShortAnswerOptions\"] = None,\n include_unstructured_sources: Optional[bool] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword qna_id: Exact QnA ID to fetch from the knowledge base, this field takes priority over\n question.\n :paramtype qna_id: int\n :keyword question: User question to query against the knowledge base.\n :paramtype question: str\n :keyword top: Max number of answers to be returned for the question.\n :paramtype top: int\n :keyword user_id: Unique identifier for the user.\n :paramtype user_id: str\n :keyword confidence_threshold: Minimum threshold score for answers, value ranges from 0 to 1.\n :paramtype confidence_threshold: float\n :keyword answer_context: Context object with previous QnA's information.\n :paramtype answer_context:\n ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerContext\n :keyword ranker_kind: Type of ranker to be used.\n :paramtype ranker_kind: str\n :keyword filters: Filter QnAs based on given metadata list and knowledge base sources.\n :paramtype filters: ~azure.ai.language.questionanswering.models.QueryFilters\n :keyword short_answer_options: To configure Answer span prediction feature.\n :paramtype short_answer_options: ~azure.ai.language.questionanswering.models.ShortAnswerOptions\n :keyword include_unstructured_sources: (Optional) Flag to enable Query over Unstructured\n Sources.\n :paramtype include_unstructured_sources: bool\n \"\"\"\n super().__init__(**kwargs)\n self.qna_id = qna_id\n self.question = question\n self.top = top\n self.user_id = user_id\n self.confidence_threshold = confidence_threshold\n self.answer_context = answer_context\n self.ranker_kind = ranker_kind\n self.filters = filters\n self.short_answer_options = short_answer_options\n self.include_unstructured_sources = include_unstructured_sources\n\n\nclass AnswerSpan(_serialization.Model):\n \"\"\"Answer span object of QnA.\n\n :ivar text: Predicted text of answer span.\n :vartype text: str\n :ivar confidence: Predicted score of answer span, value ranges from 0 to 1.\n :vartype confidence: float\n :ivar offset: The answer span offset from the start of answer.\n :vartype offset: int\n :ivar length: The length of the answer span.\n :vartype length: int\n \"\"\"\n\n _validation = {\n \"confidence\": {\"maximum\": 1, \"minimum\": 0},\n }\n\n _attribute_map = {\n \"text\": {\"key\": \"text\", \"type\": \"str\"},\n \"confidence\": {\"key\": \"confidenceScore\", \"type\": \"float\"},\n \"offset\": {\"key\": \"offset\", \"type\": \"int\"},\n \"length\": {\"key\": \"length\", \"type\": \"int\"},\n }\n\n def __init__(\n self,\n *,\n text: Optional[str] = None,\n confidence: Optional[float] = None,\n offset: Optional[int] = None,\n length: Optional[int] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword text: Predicted text of answer span.\n :paramtype text: str\n :keyword confidence: Predicted score of answer span, value ranges from 0 to 1.\n :paramtype confidence: float\n :keyword offset: The answer span offset from the start of answer.\n :paramtype offset: int\n :keyword length: The length of the answer span.\n :paramtype length: int\n \"\"\"\n super().__init__(**kwargs)\n self.text = text\n self.confidence = confidence\n self.offset = offset\n self.length = length\n\n\nclass AnswersResult(_serialization.Model):\n \"\"\"Represents List of Question Answers.\n\n :ivar answers: Represents Answer Result list.\n :vartype answers: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswer]\n \"\"\"\n\n _attribute_map = {\n \"answers\": {\"key\": \"answers\", \"type\": \"[KnowledgeBaseAnswer]\"},\n }\n\n def __init__(self, *, answers: Optional[List[\"_models.KnowledgeBaseAnswer\"]] = None, **kwargs: Any) -> None:\n \"\"\"\n :keyword answers: Represents Answer Result list.\n :paramtype answers: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswer]\n \"\"\"\n super().__init__(**kwargs)\n self.answers = answers\n\n\nclass Error(_serialization.Model):\n \"\"\"The error object.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar code: One of a server-defined set of error codes. Required. Known values are:\n \"InvalidRequest\", \"InvalidArgument\", \"Unauthorized\", \"Forbidden\", \"NotFound\",\n \"ProjectNotFound\", \"OperationNotFound\", \"AzureCognitiveSearchNotFound\",\n \"AzureCognitiveSearchIndexNotFound\", \"TooManyRequests\", \"AzureCognitiveSearchThrottling\",\n \"AzureCognitiveSearchIndexLimitReached\", \"InternalServerError\", and \"ServiceUnavailable\".\n :vartype code: str or ~azure.ai.language.questionanswering.models.ErrorCode\n :ivar message: A human-readable representation of the error. Required.\n :vartype message: str\n :ivar target: The target of the error.\n :vartype target: str\n :ivar details: An array of details about specific errors that led to this reported error.\n :vartype details: list[~azure.ai.language.questionanswering.models.Error]\n :ivar innererror: An object containing more specific information than the current object about\n the error.\n :vartype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel\n \"\"\"\n\n _validation = {\n \"code\": {\"required\": True},\n \"message\": {\"required\": True},\n }\n\n _attribute_map = {\n \"code\": {\"key\": \"code\", \"type\": \"str\"},\n \"message\": {\"key\": \"message\", \"type\": \"str\"},\n \"target\": {\"key\": \"target\", \"type\": \"str\"},\n \"details\": {\"key\": \"details\", \"type\": \"[Error]\"},\n \"innererror\": {\"key\": \"innererror\", \"type\": \"InnerErrorModel\"},\n }\n\n def __init__(\n self,\n *,\n code: Union[str, \"_models.ErrorCode\"],\n message: str,\n target: Optional[str] = None,\n details: Optional[List[\"_models.Error\"]] = None,\n innererror: Optional[\"_models.InnerErrorModel\"] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword code: One of a server-defined set of error codes. Required. Known values are:\n \"InvalidRequest\", \"InvalidArgument\", \"Unauthorized\", \"Forbidden\", \"NotFound\",\n \"ProjectNotFound\", \"OperationNotFound\", \"AzureCognitiveSearchNotFound\",\n \"AzureCognitiveSearchIndexNotFound\", \"TooManyRequests\", \"AzureCognitiveSearchThrottling\",\n \"AzureCognitiveSearchIndexLimitReached\", \"InternalServerError\", and \"ServiceUnavailable\".\n :paramtype code: str or ~azure.ai.language.questionanswering.models.ErrorCode\n :keyword message: A human-readable representation of the error. Required.\n :paramtype message: str\n :keyword target: The target of the error.\n :paramtype target: str\n :keyword details: An array of details about specific errors that led to this reported error.\n :paramtype details: list[~azure.ai.language.questionanswering.models.Error]\n :keyword innererror: An object containing more specific information than the current object\n about the error.\n :paramtype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel\n \"\"\"\n super().__init__(**kwargs)\n self.code = code\n self.message = message\n self.target = target\n self.details = details\n self.innererror = innererror\n\n\nclass ErrorResponse(_serialization.Model):\n \"\"\"Error response.\n\n :ivar error: The error object.\n :vartype error: ~azure.ai.language.questionanswering.models.Error\n \"\"\"\n\n _attribute_map = {\n \"error\": {\"key\": \"error\", \"type\": \"Error\"},\n }\n\n def __init__(self, *, error: Optional[\"_models.Error\"] = None, **kwargs: Any) -> None:\n \"\"\"\n :keyword error: The error object.\n :paramtype error: ~azure.ai.language.questionanswering.models.Error\n \"\"\"\n super().__init__(**kwargs)\n self.error = error\n\n\nclass InnerErrorModel(_serialization.Model):\n \"\"\"An object containing more specific information about the error. As per Microsoft One API\n guidelines -\n https://github.com/Microsoft/api-guidelines/blob/vNext/Guidelines.md#7102-error-condition-responses.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar code: One of a server-defined set of error codes. Required. Known values are:\n \"InvalidRequest\", \"InvalidParameterValue\", \"KnowledgeBaseNotFound\",\n \"AzureCognitiveSearchNotFound\", \"AzureCognitiveSearchThrottling\", and \"ExtractionFailure\".\n :vartype code: str or ~azure.ai.language.questionanswering.models.InnerErrorCode\n :ivar message: Error message. Required.\n :vartype message: str\n :ivar details: Error details.\n :vartype details: dict[str, str]\n :ivar target: Error target.\n :vartype target: str\n :ivar innererror: An object containing more specific information than the current object about\n the error.\n :vartype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel\n \"\"\"\n\n _validation = {\n \"code\": {\"required\": True},\n \"message\": {\"required\": True},\n }\n\n _attribute_map = {\n \"code\": {\"key\": \"code\", \"type\": \"str\"},\n \"message\": {\"key\": \"message\", \"type\": \"str\"},\n \"details\": {\"key\": \"details\", \"type\": \"{str}\"},\n \"target\": {\"key\": \"target\", \"type\": \"str\"},\n \"innererror\": {\"key\": \"innererror\", \"type\": \"InnerErrorModel\"},\n }\n\n def __init__(\n self,\n *,\n code: Union[str, \"_models.InnerErrorCode\"],\n message: str,\n details: Optional[Dict[str, str]] = None,\n target: Optional[str] = None,\n innererror: Optional[\"_models.InnerErrorModel\"] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword code: One of a server-defined set of error codes. Required. Known values are:\n \"InvalidRequest\", \"InvalidParameterValue\", \"KnowledgeBaseNotFound\",\n \"AzureCognitiveSearchNotFound\", \"AzureCognitiveSearchThrottling\", and \"ExtractionFailure\".\n :paramtype code: str or ~azure.ai.language.questionanswering.models.InnerErrorCode\n :keyword message: Error message. Required.\n :paramtype message: str\n :keyword details: Error details.\n :paramtype details: dict[str, str]\n :keyword target: Error target.\n :paramtype target: str\n :keyword innererror: An object containing more specific information than the current object\n about the error.\n :paramtype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel\n \"\"\"\n super().__init__(**kwargs)\n self.code = code\n self.message = message\n self.details = details\n self.target = target\n self.innererror = innererror\n\n\nclass KnowledgeBaseAnswer(_serialization.Model):\n \"\"\"Represents knowledge base answer.\n\n :ivar questions: List of questions associated with the answer.\n :vartype questions: list[str]\n :ivar answer: Answer text.\n :vartype answer: str\n :ivar confidence: Answer confidence score, value ranges from 0 to 1.\n :vartype confidence: float\n :ivar qna_id: ID of the QnA result.\n :vartype qna_id: int\n :ivar source: Source of QnA result.\n :vartype source: str\n :ivar metadata: Metadata associated with the answer, useful to categorize or filter question\n answers.\n :vartype metadata: dict[str, str]\n :ivar dialog: Dialog associated with Answer.\n :vartype dialog: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerDialog\n :ivar short_answer: Answer span object of QnA with respect to user's question.\n :vartype short_answer: ~azure.ai.language.questionanswering.models.AnswerSpan\n \"\"\"\n\n _validation = {\n \"confidence\": {\"maximum\": 1, \"minimum\": 0},\n }\n\n _attribute_map = {\n \"questions\": {\"key\": \"questions\", \"type\": \"[str]\"},\n \"answer\": {\"key\": \"answer\", \"type\": \"str\"},\n \"confidence\": {\"key\": \"confidenceScore\", \"type\": \"float\"},\n \"qna_id\": {\"key\": \"id\", \"type\": \"int\"},\n \"source\": {\"key\": \"source\", \"type\": \"str\"},\n \"metadata\": {\"key\": \"metadata\", \"type\": \"{str}\"},\n \"dialog\": {\"key\": \"dialog\", \"type\": \"KnowledgeBaseAnswerDialog\"},\n \"short_answer\": {\"key\": \"answerSpan\", \"type\": \"AnswerSpan\"},\n }\n\n def __init__(\n self,\n *,\n questions: Optional[List[str]] = None,\n answer: Optional[str] = None,\n confidence: Optional[float] = None,\n qna_id: Optional[int] = None,\n source: Optional[str] = None,\n metadata: Optional[Dict[str, str]] = None,\n dialog: Optional[\"_models.KnowledgeBaseAnswerDialog\"] = None,\n short_answer: Optional[\"_models.AnswerSpan\"] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword questions: List of questions associated with the answer.\n :paramtype questions: list[str]\n :keyword answer: Answer text.\n :paramtype answer: str\n :keyword confidence: Answer confidence score, value ranges from 0 to 1.\n :paramtype confidence: float\n :keyword qna_id: ID of the QnA result.\n :paramtype qna_id: int\n :keyword source: Source of QnA result.\n :paramtype source: str\n :keyword metadata: Metadata associated with the answer, useful to categorize or filter question\n answers.\n :paramtype metadata: dict[str, str]\n :keyword dialog: Dialog associated with Answer.\n :paramtype dialog: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerDialog\n :keyword short_answer: Answer span object of QnA with respect to user's question.\n :paramtype short_answer: ~azure.ai.language.questionanswering.models.AnswerSpan\n \"\"\"\n super().__init__(**kwargs)\n self.questions = questions\n self.answer = answer\n self.confidence = confidence\n self.qna_id = qna_id\n self.source = source\n self.metadata = metadata\n self.dialog = dialog\n self.short_answer = short_answer\n\n\nclass KnowledgeBaseAnswerContext(_serialization.Model):\n \"\"\"Context object with previous QnA's information.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar previous_qna_id: Previous turn top answer result QnA ID. Required.\n :vartype previous_qna_id: int\n :ivar previous_question: Previous user query.\n :vartype previous_question: str\n \"\"\"\n\n _validation = {\n \"previous_qna_id\": {\"required\": True},\n }\n\n _attribute_map = {\n \"previous_qna_id\": {\"key\": \"previousQnaId\", \"type\": \"int\"},\n \"previous_question\": {\"key\": \"previousUserQuery\", \"type\": \"str\"},\n }\n\n def __init__(self, *, previous_qna_id: int, previous_question: Optional[str] = None, **kwargs: Any) -> None:\n \"\"\"\n :keyword previous_qna_id: Previous turn top answer result QnA ID. Required.\n :paramtype previous_qna_id: int\n :keyword previous_question: Previous user query.\n :paramtype previous_question: str\n \"\"\"\n super().__init__(**kwargs)\n self.previous_qna_id = previous_qna_id\n self.previous_question = previous_question\n\n\nclass KnowledgeBaseAnswerDialog(_serialization.Model):\n \"\"\"Dialog associated with Answer.\n\n :ivar is_context_only: To mark if a prompt is relevant only with a previous question or not. If\n true, do not include this QnA as search result for queries without context; otherwise, if\n false, ignores context and includes this QnA in search result.\n :vartype is_context_only: bool\n :ivar prompts: List of prompts associated with the answer.\n :vartype prompts: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerPrompt]\n \"\"\"\n\n _validation = {\n \"prompts\": {\"max_items\": 20, \"min_items\": 0},\n }\n\n _attribute_map = {\n \"is_context_only\": {\"key\": \"isContextOnly\", \"type\": \"bool\"},\n \"prompts\": {\"key\": \"prompts\", \"type\": \"[KnowledgeBaseAnswerPrompt]\"},\n }\n\n def __init__(\n self,\n *,\n is_context_only: Optional[bool] = None,\n prompts: Optional[List[\"_models.KnowledgeBaseAnswerPrompt\"]] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword is_context_only: To mark if a prompt is relevant only with a previous question or not.\n If true, do not include this QnA as search result for queries without context; otherwise, if\n false, ignores context and includes this QnA in search result.\n :paramtype is_context_only: bool\n :keyword prompts: List of prompts associated with the answer.\n :paramtype prompts: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerPrompt]\n \"\"\"\n super().__init__(**kwargs)\n self.is_context_only = is_context_only\n self.prompts = prompts\n\n\nclass KnowledgeBaseAnswerPrompt(_serialization.Model):\n \"\"\"Prompt for an answer.\n\n :ivar display_order: Index of the prompt - used in ordering of the prompts.\n :vartype display_order: int\n :ivar qna_id: QnA ID corresponding to the prompt.\n :vartype qna_id: int\n :ivar display_text: Text displayed to represent a follow up question prompt.\n :vartype display_text: str\n \"\"\"\n\n _validation = {\n \"display_text\": {\"max_length\": 200},\n }\n\n _attribute_map = {\n \"display_order\": {\"key\": \"displayOrder\", \"type\": \"int\"},\n \"qna_id\": {\"key\": \"qnaId\", \"type\": \"int\"},\n \"display_text\": {\"key\": \"displayText\", \"type\": \"str\"},\n }\n\n def __init__(\n self,\n *,\n display_order: Optional[int] = None,\n qna_id: Optional[int] = None,\n display_text: Optional[str] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword display_order: Index of the prompt - used in ordering of the prompts.\n :paramtype display_order: int\n :keyword qna_id: QnA ID corresponding to the prompt.\n :paramtype qna_id: int\n :keyword display_text: Text displayed to represent a follow up question prompt.\n :paramtype display_text: str\n \"\"\"\n super().__init__(**kwargs)\n self.display_order = display_order\n self.qna_id = qna_id\n self.display_text = display_text\n\n\nclass MetadataFilter(_serialization.Model):\n \"\"\"Find QnAs that are associated with the given list of metadata.\n\n :ivar metadata:\n :vartype metadata: list[JSON]\n :ivar logical_operation: Operation used to join metadata filters.\n :vartype logical_operation: str\n \"\"\"\n\n _attribute_map = {\n \"metadata\": {\"key\": \"metadata\", \"type\": \"[object]\"},\n \"logical_operation\": {\"key\": \"logicalOperation\", \"type\": \"str\"},\n }\n\n def __init__(\n self, *, metadata: Optional[List[JSON]] = None, logical_operation: Optional[str] = None, **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword metadata:\n :paramtype metadata: list[JSON]\n :keyword logical_operation: Operation used to join metadata filters.\n :paramtype logical_operation: str\n \"\"\"\n super().__init__(**kwargs)\n self.metadata = metadata\n self.logical_operation = logical_operation\n\n\nclass QueryFilters(_serialization.Model):\n \"\"\"filters over knowledge base.\n\n :ivar metadata_filter: Find QnAs that are associated with the given list of metadata.\n :vartype metadata_filter: ~azure.ai.language.questionanswering.models.MetadataFilter\n :ivar source_filter: Find QnAs that are associated with any of the given list of sources in\n knowledge base.\n :vartype source_filter: list[str]\n :ivar logical_operation: Logical operation used to join metadata filter with source filter.\n :vartype logical_operation: str\n \"\"\"\n\n _attribute_map = {\n \"metadata_filter\": {\"key\": \"metadataFilter\", \"type\": \"MetadataFilter\"},\n \"source_filter\": {\"key\": \"sourceFilter\", \"type\": \"[str]\"},\n \"logical_operation\": {\"key\": \"logicalOperation\", \"type\": \"str\"},\n }\n\n def __init__(\n self,\n *,\n metadata_filter: Optional[\"_models.MetadataFilter\"] = None,\n source_filter: Optional[List[str]] = None,\n logical_operation: Optional[str] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword metadata_filter: Find QnAs that are associated with the given list of metadata.\n :paramtype metadata_filter: ~azure.ai.language.questionanswering.models.MetadataFilter\n :keyword source_filter: Find QnAs that are associated with any of the given list of sources in\n knowledge base.\n :paramtype source_filter: list[str]\n :keyword logical_operation: Logical operation used to join metadata filter with source filter.\n :paramtype logical_operation: str\n \"\"\"\n super().__init__(**kwargs)\n self.metadata_filter = metadata_filter\n self.source_filter = source_filter\n self.logical_operation = logical_operation\n\n\nclass ShortAnswerOptions(_serialization.Model):\n \"\"\"To configure Answer span prediction feature.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar enable: Enable or disable Answer Span prediction. Required. Default value is True.\n :vartype enable: bool\n :ivar confidence_threshold: Minimum threshold score required to include an answer span, value\n ranges from 0 to 1.\n :vartype confidence_threshold: float\n :ivar top: Number of Top answers to be considered for span prediction from 1 to 10.\n :vartype top: int\n \"\"\"\n\n _validation = {\n \"enable\": {\"required\": True, \"constant\": True},\n \"confidence_threshold\": {\"maximum\": 1, \"minimum\": 0},\n \"top\": {\"maximum\": 10, \"minimum\": 1},\n }\n\n _attribute_map = {\n \"enable\": {\"key\": \"enable\", \"type\": \"bool\"},\n \"confidence_threshold\": {\"key\": \"confidenceScoreThreshold\", \"type\": \"float\"},\n \"top\": {\"key\": \"topAnswersWithSpan\", \"type\": \"int\"},\n }\n\n enable = True\n\n def __init__(\n self, *, confidence_threshold: Optional[float] = None, top: Optional[int] = None, **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword confidence_threshold: Minimum threshold score required to include an answer span,\n value ranges from 0 to 1.\n :paramtype confidence_threshold: float\n :keyword top: Number of Top answers to be considered for span prediction from 1 to 10.\n :paramtype top: int\n \"\"\"\n super().__init__(**kwargs)\n self.confidence_threshold = confidence_threshold\n self.top = top\n\n\nclass TextAnswer(_serialization.Model):\n \"\"\"Represents answer result.\n\n :ivar answer: Answer.\n :vartype answer: str\n :ivar confidence: answer confidence score, value ranges from 0 to 1.\n :vartype confidence: float\n :ivar id: record ID.\n :vartype id: str\n :ivar short_answer: Answer span object with respect to user's question.\n :vartype short_answer: ~azure.ai.language.questionanswering.models.AnswerSpan\n :ivar offset: The sentence offset from the start of the document.\n :vartype offset: int\n :ivar length: The length of the sentence.\n :vartype length: int\n \"\"\"\n\n _validation = {\n \"confidence\": {\"maximum\": 1, \"minimum\": 0},\n }\n\n _attribute_map = {\n \"answer\": {\"key\": \"answer\", \"type\": \"str\"},\n \"confidence\": {\"key\": \"confidenceScore\", \"type\": \"float\"},\n \"id\": {\"key\": \"id\", \"type\": \"str\"},\n \"short_answer\": {\"key\": \"answerSpan\", \"type\": \"AnswerSpan\"},\n \"offset\": {\"key\": \"offset\", \"type\": \"int\"},\n \"length\": {\"key\": \"length\", \"type\": \"int\"},\n }\n\n def __init__(\n self,\n *,\n answer: Optional[str] = None,\n confidence: Optional[float] = None,\n id: Optional[str] = None, # pylint: disable=redefined-builtin\n short_answer: Optional[\"_models.AnswerSpan\"] = None,\n offset: Optional[int] = None,\n length: Optional[int] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword answer: Answer.\n :paramtype answer: str\n :keyword confidence: answer confidence score, value ranges from 0 to 1.\n :paramtype confidence: float\n :keyword id: record ID.\n :paramtype id: str\n :keyword short_answer: Answer span object with respect to user's question.\n :paramtype short_answer: ~azure.ai.language.questionanswering.models.AnswerSpan\n :keyword offset: The sentence offset from the start of the document.\n :paramtype offset: int\n :keyword length: The length of the sentence.\n :paramtype length: int\n \"\"\"\n super().__init__(**kwargs)\n self.answer = answer\n self.confidence = confidence\n self.id = id\n self.short_answer = short_answer\n self.offset = offset\n self.length = length\n\n\nclass TextDocument(_serialization.Model):\n \"\"\"Represent input text record to be queried.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar id: Unique identifier for the text record. Required.\n :vartype id: str\n :ivar text: Text contents of the record. Required.\n :vartype text: str\n \"\"\"\n\n _validation = {\n \"id\": {\"required\": True},\n \"text\": {\"required\": True},\n }\n\n _attribute_map = {\n \"id\": {\"key\": \"id\", \"type\": \"str\"},\n \"text\": {\"key\": \"text\", \"type\": \"str\"},\n }\n\n def __init__(self, *, id: str, text: str, **kwargs: Any) -> None: # pylint: disable=redefined-builtin\n \"\"\"\n :keyword id: Unique identifier for the text record. Required.\n :paramtype id: str\n :keyword text: Text contents of the record. Required.\n :paramtype text: str\n \"\"\"\n super().__init__(**kwargs)\n self.id = id\n self.text = text\n", "step-ids": [ 36, 37, 51, 56, 72 ] }
[ 36, 37, 51, 56, 72 ]