index
int64 0
100k
| blob_id
stringlengths 40
40
| code
stringlengths 7
7.27M
| steps
listlengths 1
1.25k
| error
bool 2
classes |
---|---|---|---|---|
99,600 |
2a80fe2f147161aacb8304b0a13ea1325c4789c5
|
# File: probe.py
# Description: Space probe that can be placed into an environment
# Last Modified: May 9, 2018
# Modified By: Sky Hoffert
from lib.sensor import TemperatureSensor
class Probe():
'''Probe that can be placed in an environment'''
def __init__(self, environment=None):
self._environment = environment
self._sensor_temperature = None
def add_sensor(self, type='', accuracy=0):
if type == 'temperature':
self._sensor_temperature = TemperatureSensor( environment=self._environment, accuracy=accuracy )
else:
raise Exception('Not implemented')
def sample(self, type=''):
if type == 'temperature':
return self._sensor_temperature.sample()
else:
raise Exception('Not implemented')
|
[
"# File: probe.py\n# Description: Space probe that can be placed into an environment\n# Last Modified: May 9, 2018\n# Modified By: Sky Hoffert\n\n\nfrom lib.sensor import TemperatureSensor\n\n\nclass Probe():\n '''Probe that can be placed in an environment'''\n\n def __init__(self, environment=None):\n self._environment = environment\n self._sensor_temperature = None\n\n def add_sensor(self, type='', accuracy=0):\n if type == 'temperature':\n self._sensor_temperature = TemperatureSensor( environment=self._environment, accuracy=accuracy )\n else:\n raise Exception('Not implemented')\n\n def sample(self, type=''):\n if type == 'temperature':\n return self._sensor_temperature.sample()\n else:\n raise Exception('Not implemented')\n",
"from lib.sensor import TemperatureSensor\n\n\nclass Probe:\n \"\"\"Probe that can be placed in an environment\"\"\"\n\n def __init__(self, environment=None):\n self._environment = environment\n self._sensor_temperature = None\n\n def add_sensor(self, type='', accuracy=0):\n if type == 'temperature':\n self._sensor_temperature = TemperatureSensor(environment=self.\n _environment, accuracy=accuracy)\n else:\n raise Exception('Not implemented')\n\n def sample(self, type=''):\n if type == 'temperature':\n return self._sensor_temperature.sample()\n else:\n raise Exception('Not implemented')\n",
"<import token>\n\n\nclass Probe:\n \"\"\"Probe that can be placed in an environment\"\"\"\n\n def __init__(self, environment=None):\n self._environment = environment\n self._sensor_temperature = None\n\n def add_sensor(self, type='', accuracy=0):\n if type == 'temperature':\n self._sensor_temperature = TemperatureSensor(environment=self.\n _environment, accuracy=accuracy)\n else:\n raise Exception('Not implemented')\n\n def sample(self, type=''):\n if type == 'temperature':\n return self._sensor_temperature.sample()\n else:\n raise Exception('Not implemented')\n",
"<import token>\n\n\nclass Probe:\n <docstring token>\n\n def __init__(self, environment=None):\n self._environment = environment\n self._sensor_temperature = None\n\n def add_sensor(self, type='', accuracy=0):\n if type == 'temperature':\n self._sensor_temperature = TemperatureSensor(environment=self.\n _environment, accuracy=accuracy)\n else:\n raise Exception('Not implemented')\n\n def sample(self, type=''):\n if type == 'temperature':\n return self._sensor_temperature.sample()\n else:\n raise Exception('Not implemented')\n",
"<import token>\n\n\nclass Probe:\n <docstring token>\n\n def __init__(self, environment=None):\n self._environment = environment\n self._sensor_temperature = None\n\n def add_sensor(self, type='', accuracy=0):\n if type == 'temperature':\n self._sensor_temperature = TemperatureSensor(environment=self.\n _environment, accuracy=accuracy)\n else:\n raise Exception('Not implemented')\n <function token>\n",
"<import token>\n\n\nclass Probe:\n <docstring token>\n <function token>\n\n def add_sensor(self, type='', accuracy=0):\n if type == 'temperature':\n self._sensor_temperature = TemperatureSensor(environment=self.\n _environment, accuracy=accuracy)\n else:\n raise Exception('Not implemented')\n <function token>\n",
"<import token>\n\n\nclass Probe:\n <docstring token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
99,601 |
e8ee340954c8468d4d43f57c418f178a60413b10
|
from selenium import webdriver
import time
from time import sleep
driver = webdriver.Chrome(executable_path="/Users/gaurnitai/Desktop/PySelenium/drivers/chromedriver")
driver.get("https://www.amazon.in")
driver.maximize_window()
page_title = driver.title
print(page_title)
search_box = driver.find_element_by_id("twotabsearchtextbox")
search_box.send_keys("Macbook Pro 15 inch")
search_icon = driver.find_element_by_xpath("//input[@type='submit']")
search_icon.click()
sleep(3)
expected_product_list = [
'Apple MacBook Pro (15-inch, Latest Model, 16GB RAM, 256GB Storage, 2.6GHz Intel Core i7) - Space Grey',
'New Apple MacBook Pro (13-inch, 8GB RAM, 128GB Storage, 1.4GHz Intel Core i5) - Space Grey']
products_list = driver.find_elements_by_xpath(
"//div[@class='s-include-content-margin s-border-bottom']//a[@class='a-link-normal a-text-normal']")
for expected_product in expected_product_list:
for product in products_list:
print(product.text)
if (product.text == expected_product):
product.click()
sleep(2)
parent_window = driver.current_window_handle # parent window - String
windows_list = driver.window_handles # parent & child windows - Set of Strings
for window in windows_list:
if (window != parent_window):
driver.switch_to.window(window)
# driver.switch_to_window(window)
product_detail_title = driver.find_element_by_id("productTitle")
product_title = product_detail_title.text
print(product_title)
product_detail_price = driver.find_element_by_id("priceblock_ourprice")
product_price = product_detail_price.text
print(product_price)
product_detail_rating = driver.find_element_by_id("acrCustomerReviewText")
product_rating = product_detail_rating.text
print(product_rating)
sleep(3)
driver.close()
driver.switch_to.window(parent_window)
sleep(3)
# for product in products_list:
# print(product.text)
# if (product.text == 'New Apple MacBook Pro (13-inch, 8GB RAM, 128GB Storage, 1.4GHz Intel Core i5) - Space Grey'):
# product.click()
# sleep(2)
# break
# parent_window = driver.current_window_handle
# windows_list = driver.window_handles
#
# for window in windows_list:
# if (window != parent_window):
# driver.switch_to.window(window)
# # driver.switch_to_window(window)
# product_detail_title = driver.find_element_by_id("productTitle")
# product_title = product_detail_title.text
# print(product_title)
#
# product_detail_price = driver.find_element_by_id("priceblock_ourprice")
# product_price = product_detail_price.text
# print(product_price)
#
# product_detail_rating = driver.find_element_by_id("acrCustomerReviewText")
# product_rating = product_detail_rating.text
# print(product_rating)
#
# driver.switch_to.window(parent_window)
sleep(3)
driver.close()
driver.quit()
|
[
"from selenium import webdriver\nimport time\n\nfrom time import sleep\n\ndriver = webdriver.Chrome(executable_path=\"/Users/gaurnitai/Desktop/PySelenium/drivers/chromedriver\")\n\ndriver.get(\"https://www.amazon.in\")\n\ndriver.maximize_window()\n\npage_title = driver.title\n\nprint(page_title)\n\nsearch_box = driver.find_element_by_id(\"twotabsearchtextbox\")\nsearch_box.send_keys(\"Macbook Pro 15 inch\")\n\nsearch_icon = driver.find_element_by_xpath(\"//input[@type='submit']\")\nsearch_icon.click()\nsleep(3)\n\nexpected_product_list = [\n 'Apple MacBook Pro (15-inch, Latest Model, 16GB RAM, 256GB Storage, 2.6GHz Intel Core i7) - Space Grey',\n 'New Apple MacBook Pro (13-inch, 8GB RAM, 128GB Storage, 1.4GHz Intel Core i5) - Space Grey']\n\nproducts_list = driver.find_elements_by_xpath(\n \"//div[@class='s-include-content-margin s-border-bottom']//a[@class='a-link-normal a-text-normal']\")\n\nfor expected_product in expected_product_list:\n for product in products_list:\n print(product.text)\n if (product.text == expected_product):\n product.click()\n sleep(2)\n parent_window = driver.current_window_handle # parent window - String\n windows_list = driver.window_handles # parent & child windows - Set of Strings\n for window in windows_list:\n if (window != parent_window):\n driver.switch_to.window(window)\n # driver.switch_to_window(window)\n product_detail_title = driver.find_element_by_id(\"productTitle\")\n product_title = product_detail_title.text\n print(product_title)\n\n product_detail_price = driver.find_element_by_id(\"priceblock_ourprice\")\n product_price = product_detail_price.text\n print(product_price)\n\n product_detail_rating = driver.find_element_by_id(\"acrCustomerReviewText\")\n product_rating = product_detail_rating.text\n print(product_rating)\n sleep(3)\n driver.close()\n driver.switch_to.window(parent_window)\n sleep(3)\n\n# for product in products_list:\n# print(product.text)\n# if (product.text == 'New Apple MacBook Pro (13-inch, 8GB RAM, 128GB Storage, 1.4GHz Intel Core i5) - Space Grey'):\n# product.click()\n# sleep(2)\n# break\n\n# parent_window = driver.current_window_handle\n# windows_list = driver.window_handles\n#\n# for window in windows_list:\n# if (window != parent_window):\n# driver.switch_to.window(window)\n# # driver.switch_to_window(window)\n# product_detail_title = driver.find_element_by_id(\"productTitle\")\n# product_title = product_detail_title.text\n# print(product_title)\n#\n# product_detail_price = driver.find_element_by_id(\"priceblock_ourprice\")\n# product_price = product_detail_price.text\n# print(product_price)\n#\n# product_detail_rating = driver.find_element_by_id(\"acrCustomerReviewText\")\n# product_rating = product_detail_rating.text\n# print(product_rating)\n#\n# driver.switch_to.window(parent_window)\n\nsleep(3)\n\ndriver.close()\ndriver.quit()\n",
"from selenium import webdriver\nimport time\nfrom time import sleep\ndriver = webdriver.Chrome(executable_path=\n '/Users/gaurnitai/Desktop/PySelenium/drivers/chromedriver')\ndriver.get('https://www.amazon.in')\ndriver.maximize_window()\npage_title = driver.title\nprint(page_title)\nsearch_box = driver.find_element_by_id('twotabsearchtextbox')\nsearch_box.send_keys('Macbook Pro 15 inch')\nsearch_icon = driver.find_element_by_xpath(\"//input[@type='submit']\")\nsearch_icon.click()\nsleep(3)\nexpected_product_list = [\n 'Apple MacBook Pro (15-inch, Latest Model, 16GB RAM, 256GB Storage, 2.6GHz Intel Core i7) - Space Grey'\n ,\n 'New Apple MacBook Pro (13-inch, 8GB RAM, 128GB Storage, 1.4GHz Intel Core i5) - Space Grey'\n ]\nproducts_list = driver.find_elements_by_xpath(\n \"//div[@class='s-include-content-margin s-border-bottom']//a[@class='a-link-normal a-text-normal']\"\n )\nfor expected_product in expected_product_list:\n for product in products_list:\n print(product.text)\n if product.text == expected_product:\n product.click()\n sleep(2)\n parent_window = driver.current_window_handle\n windows_list = driver.window_handles\n for window in windows_list:\n if window != parent_window:\n driver.switch_to.window(window)\n product_detail_title = driver.find_element_by_id(\n 'productTitle')\n product_title = product_detail_title.text\n print(product_title)\n product_detail_price = driver.find_element_by_id(\n 'priceblock_ourprice')\n product_price = product_detail_price.text\n print(product_price)\n product_detail_rating = driver.find_element_by_id(\n 'acrCustomerReviewText')\n product_rating = product_detail_rating.text\n print(product_rating)\n sleep(3)\n driver.close()\n driver.switch_to.window(parent_window)\n sleep(3)\nsleep(3)\ndriver.close()\ndriver.quit()\n",
"<import token>\ndriver = webdriver.Chrome(executable_path=\n '/Users/gaurnitai/Desktop/PySelenium/drivers/chromedriver')\ndriver.get('https://www.amazon.in')\ndriver.maximize_window()\npage_title = driver.title\nprint(page_title)\nsearch_box = driver.find_element_by_id('twotabsearchtextbox')\nsearch_box.send_keys('Macbook Pro 15 inch')\nsearch_icon = driver.find_element_by_xpath(\"//input[@type='submit']\")\nsearch_icon.click()\nsleep(3)\nexpected_product_list = [\n 'Apple MacBook Pro (15-inch, Latest Model, 16GB RAM, 256GB Storage, 2.6GHz Intel Core i7) - Space Grey'\n ,\n 'New Apple MacBook Pro (13-inch, 8GB RAM, 128GB Storage, 1.4GHz Intel Core i5) - Space Grey'\n ]\nproducts_list = driver.find_elements_by_xpath(\n \"//div[@class='s-include-content-margin s-border-bottom']//a[@class='a-link-normal a-text-normal']\"\n )\nfor expected_product in expected_product_list:\n for product in products_list:\n print(product.text)\n if product.text == expected_product:\n product.click()\n sleep(2)\n parent_window = driver.current_window_handle\n windows_list = driver.window_handles\n for window in windows_list:\n if window != parent_window:\n driver.switch_to.window(window)\n product_detail_title = driver.find_element_by_id(\n 'productTitle')\n product_title = product_detail_title.text\n print(product_title)\n product_detail_price = driver.find_element_by_id(\n 'priceblock_ourprice')\n product_price = product_detail_price.text\n print(product_price)\n product_detail_rating = driver.find_element_by_id(\n 'acrCustomerReviewText')\n product_rating = product_detail_rating.text\n print(product_rating)\n sleep(3)\n driver.close()\n driver.switch_to.window(parent_window)\n sleep(3)\nsleep(3)\ndriver.close()\ndriver.quit()\n",
"<import token>\n<assignment token>\ndriver.get('https://www.amazon.in')\ndriver.maximize_window()\n<assignment token>\nprint(page_title)\n<assignment token>\nsearch_box.send_keys('Macbook Pro 15 inch')\n<assignment token>\nsearch_icon.click()\nsleep(3)\n<assignment token>\nfor expected_product in expected_product_list:\n for product in products_list:\n print(product.text)\n if product.text == expected_product:\n product.click()\n sleep(2)\n parent_window = driver.current_window_handle\n windows_list = driver.window_handles\n for window in windows_list:\n if window != parent_window:\n driver.switch_to.window(window)\n product_detail_title = driver.find_element_by_id(\n 'productTitle')\n product_title = product_detail_title.text\n print(product_title)\n product_detail_price = driver.find_element_by_id(\n 'priceblock_ourprice')\n product_price = product_detail_price.text\n print(product_price)\n product_detail_rating = driver.find_element_by_id(\n 'acrCustomerReviewText')\n product_rating = product_detail_rating.text\n print(product_rating)\n sleep(3)\n driver.close()\n driver.switch_to.window(parent_window)\n sleep(3)\nsleep(3)\ndriver.close()\ndriver.quit()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,602 |
63eca29a361a5ead00580e1733ce15e6862ad322
|
# 12345678901234567890123456
# 65432109876543210987654321
letters = "abcdefghijklmnopqrstuvwxyz"
backwards = letters[25::-1]
tebahpla = letters[::-1] # as the step is a negative this works as python reverses the thestart and end characters
print(backwards)
print(tebahpla)
print(letters[-10:-13:-1])
print(letters[16:13:-1])
print(letters[-22::-1])
print(letters[4::-1])
print(letters[:-9:-1])
|
[
"# 12345678901234567890123456\n# 65432109876543210987654321\nletters = \"abcdefghijklmnopqrstuvwxyz\"\n\nbackwards = letters[25::-1]\ntebahpla = letters[::-1] # as the step is a negative this works as python reverses the thestart and end characters\nprint(backwards)\nprint(tebahpla)\nprint(letters[-10:-13:-1])\nprint(letters[16:13:-1])\nprint(letters[-22::-1])\nprint(letters[4::-1])\nprint(letters[:-9:-1]) ",
"letters = 'abcdefghijklmnopqrstuvwxyz'\nbackwards = letters[25::-1]\ntebahpla = letters[::-1]\nprint(backwards)\nprint(tebahpla)\nprint(letters[-10:-13:-1])\nprint(letters[16:13:-1])\nprint(letters[-22::-1])\nprint(letters[4::-1])\nprint(letters[:-9:-1])\n",
"<assignment token>\nprint(backwards)\nprint(tebahpla)\nprint(letters[-10:-13:-1])\nprint(letters[16:13:-1])\nprint(letters[-22::-1])\nprint(letters[4::-1])\nprint(letters[:-9:-1])\n",
"<assignment token>\n<code token>\n"
] | false |
99,603 |
1550382579701a95e6d7c636c1c3db6d54d7e662
|
i = 14
while i >= -3:
if i<0:
print("{} is negative" .format(i))
elif i % 2 ==0:
print("{} is even" .format(i))
else:
print("{} is odd" .format(i))
i = i - 3
|
[
"i = 14\nwhile i >= -3:\n\tif i<0:\n\t\tprint(\"{} is negative\" .format(i))\n\telif i % 2 ==0:\n\t\tprint(\"{} is even\" .format(i))\n\telse:\n\t\tprint(\"{} is odd\" .format(i))\n\n\ti = i - 3",
"i = 14\nwhile i >= -3:\n if i < 0:\n print('{} is negative'.format(i))\n elif i % 2 == 0:\n print('{} is even'.format(i))\n else:\n print('{} is odd'.format(i))\n i = i - 3\n",
"<assignment token>\nwhile i >= -3:\n if i < 0:\n print('{} is negative'.format(i))\n elif i % 2 == 0:\n print('{} is even'.format(i))\n else:\n print('{} is odd'.format(i))\n i = i - 3\n",
"<assignment token>\n<code token>\n"
] | false |
99,604 |
82af34164a6c63e4ab525bfb53f93e00103e36d1
|
import os, hashlib, binascii as ba
import base64, re
import time, math
from colors import *
# from functools import lru_cache
from numba import jit
from cachetools.func import *
from cachy import *
def iif(a,b,c):return b if a else c
import json
def obj2json(obj):
return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)
@stale_cache(ttr=1, ttl=30)
def readfile(fn, mode='rb', *a, **kw):
if 'b' not in mode:
with open(fn, mode, encoding='utf8', *a, **kw) as f:
return f.read()
else:
with open(fn, mode, *a, **kw) as f:
return f.read()
def writefile(fn, data, mode='wb', encoding='utf8', *a, **kw):
if 'b' not in mode:
with open(fn,mode, encoding=encoding, *a,**kw) as f:
f.write(data)
else:
with open(fn,mode,*a,**kw) as f:
f.write(data)
def removefile(fn):
try:
os.remove(fn)
except Exception as e:
print_err(e)
print_err('failed to remove', fn)
else:
return
import threading
def dispatch(f):
return tpe.submit(f)
# t = AppContextThreadMod(target=f, daemon=True)
# # t = threading.Thread(target=f, daemon=True)
# t.start()
def dispatch_with_retries(f):
n = 0
def wrapper():
nonlocal n
while 1:
try:
f()
except Exception as e:
print_err(e)
n+=1
time.sleep(0.5)
print_up(f'{f.__name__}() retry #{n}')
else:
print_down(f'{f.__name__}() success on attempt #{n}')
break
return tpe.submit(wrapper)
def init_directory(d):
try:
os.mkdir(d)
except FileExistsError as e:
print_err('directory {} already exists.'.format(d), e)
else:
print_info('directory {} created.'.format(d))
def key(d, k):
if k in d:
return d[k]
else:
return None
def intify(s, name=''):
try:
return int(s)
except:
if s:
# print_err('intifys',s,name)
pass
return 0
def floatify(s):
try:
return float(s)
except:
if s:
pass
return 0.
def get_environ(k):
k = k.upper()
if k in os.environ:
return os.environ[k]
else:
return None
def clip(a,b):
def _clip(c):
return min(b,max(a, c))
return _clip
clip01 = clip(0,1)
import zlib
def calculate_checksum(bin): return zlib.adler32(bin).to_bytes(4,'big')
def calculate_checksum_base64(bin):
csum = calculate_checksum(bin)
chksum_encoded = base64.b64encode(csum).decode('ascii')
return chksum_encoded
def calculate_checksum_base64_replaced(bin):
return calculate_checksum_base64(bin).replace('+','-').replace('/','_')
def calculate_etag(bin):
return calculate_checksum_base64_replaced(bin)
# pw hashing
def bytes2hexstr(b):
return ba.b2a_hex(b).decode('ascii')
def hexstr2bytes(h):
return ba.a2b_hex(h.encode('ascii'))
# https://nitratine.net/blog/post/how-to-hash-passwords-in-python/
def get_salt():
return os.urandom(32)
def get_random_hex_string(b=8):
return base64.b16encode(os.urandom(b)).decode('ascii')
def hash_pw(salt, string):
return hashlib.pbkdf2_hmac(
'sha256',
string.encode('ascii'),
salt,
100000,
)
# input string, output hash and salt
def hash_w_salt(string):
salt = get_salt()
hash = hash_pw(salt, string)
return bytes2hexstr(hash), bytes2hexstr(salt)
# input hash,salt,string, output comparison result
def check_hash_salt_pw(hashstr, saltstr, string):
chash = hash_pw(hexstr2bytes(saltstr), string)
return chash == hexstr2bytes(hashstr)
def timethis(stmt):
import re, timeit
print('timing', stmt)
broken = re.findall(f'\$([a-zA-Z][0-9a-zA-Z_\-]*)', stmt)
stmt = stmt.replace('$','')
setup = f"from __main__ import {','.join(broken)}"
exec(setup) # preheat
exec(stmt)
timeit.Timer(stmt,
setup=setup
).autorange(
lambda a,b:print(f'{a} in {b:.4f}, avg: {b/a*1000_000:.4f}us'))
# if __name__ == '__main__':
# k = time.time()
# def hello():
# if time.time() - k < 2:
# raise Exception('nah')
#
# dispatch_with_retries(hello)
# time.sleep(4)
if __name__ == '__main__':
toenc = b"r12uf-398gy309ghh123r1"*100000
timethis('calculate_checksum_base64_replaced(toenc)')
# everything time related
import datetime
dtdt = datetime.datetime
dtt = datetime.time
dtd = datetime.date
dtn = dtdt.now
dttz = datetime.timezone
dttd = datetime.timedelta
# default time parsing
def dtdt_from_stamp(stamp):
return dtdt.fromisoformat(stamp)
dfs = dtdt_from_stamp
def dfshk(stamp):
return dfs(stamp).replace(tzinfo=working_timezone)
# proper time formatting
# input: string iso timestamp
# output: string formatted time
def format_time(dtdt,s):
return dtdt.strftime(s)
# default time formatting
def format_time_iso(dtdt):
return dtdt.isoformat(timespec='seconds')[:19]
fti = format_time_iso
format_time_datetime = lambda s: format_time(dfs(s), '%Y-%m-%d %H:%M')
format_time_datetime_second = lambda s: format_time(dfs(s), '%Y-%m-%d %H:%M:%S')
format_time_dateonly = lambda s: format_time(dfs(s), '%Y-%m-%d')
format_time_timeonly = lambda s: format_time(dfs(s), '%H:%M')
def days_since(ts):
then = dfshk(ts)
now = dtn(working_timezone)
dt = now - then
return dt.days
def days_between(ts0, ts1):
return abs(days_since(ts0) - days_since(ts1))
def seconds_since(ts):
then = dfshk(ts)
now = dtn(working_timezone)
dt = now - then
return dt.total_seconds()
def cap(x, mi, ma):
return min(max(x, mi),ma)
working_timezone = dttz(dttd(hours=+8)) # Hong Kong
gmt_timezone = dttz(dttd(hours=0)) # GMT
def time_iso_now(dt=0): # dt in seconds
return format_time_iso(dtn(working_timezone) + dttd(seconds=dt))
|
[
"import os, hashlib, binascii as ba\nimport base64, re\nimport time, math\nfrom colors import *\n# from functools import lru_cache\nfrom numba import jit\n\nfrom cachetools.func import *\nfrom cachy import *\n\ndef iif(a,b,c):return b if a else c\n\nimport json\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n@stale_cache(ttr=1, ttl=30)\ndef readfile(fn, mode='rb', *a, **kw):\n if 'b' not in mode:\n with open(fn, mode, encoding='utf8', *a, **kw) as f:\n return f.read()\n else:\n with open(fn, mode, *a, **kw) as f:\n return f.read()\n\ndef writefile(fn, data, mode='wb', encoding='utf8', *a, **kw):\n if 'b' not in mode:\n with open(fn,mode, encoding=encoding, *a,**kw) as f:\n f.write(data)\n else:\n with open(fn,mode,*a,**kw) as f:\n f.write(data)\n\ndef removefile(fn):\n try:\n os.remove(fn)\n except Exception as e:\n print_err(e)\n print_err('failed to remove', fn)\n else:\n return\n\nimport threading\n\ndef dispatch(f):\n return tpe.submit(f)\n # t = AppContextThreadMod(target=f, daemon=True)\n # # t = threading.Thread(target=f, daemon=True)\n # t.start()\n\ndef dispatch_with_retries(f):\n n = 0\n def wrapper():\n nonlocal n\n while 1:\n try:\n f()\n except Exception as e:\n print_err(e)\n n+=1\n time.sleep(0.5)\n print_up(f'{f.__name__}() retry #{n}')\n else:\n print_down(f'{f.__name__}() success on attempt #{n}')\n break\n return tpe.submit(wrapper)\n\ndef init_directory(d):\n try:\n os.mkdir(d)\n except FileExistsError as e:\n print_err('directory {} already exists.'.format(d), e)\n else:\n print_info('directory {} created.'.format(d))\n\ndef key(d, k):\n if k in d:\n return d[k]\n else:\n return None\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n # print_err('intifys',s,name)\n pass\n return 0\n\ndef floatify(s):\n try:\n return float(s)\n except:\n if s:\n pass\n return 0.\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\ndef clip(a,b):\n def _clip(c):\n return min(b,max(a, c))\n return _clip\n\nclip01 = clip(0,1)\n\nimport zlib\n\ndef calculate_checksum(bin): return zlib.adler32(bin).to_bytes(4,'big')\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\ndef calculate_checksum_base64_replaced(bin):\n return calculate_checksum_base64(bin).replace('+','-').replace('/','_')\n\ndef calculate_etag(bin):\n return calculate_checksum_base64_replaced(bin)\n\n\n# pw hashing\n\ndef bytes2hexstr(b):\n return ba.b2a_hex(b).decode('ascii')\n\ndef hexstr2bytes(h):\n return ba.a2b_hex(h.encode('ascii'))\n\n# https://nitratine.net/blog/post/how-to-hash-passwords-in-python/\ndef get_salt():\n return os.urandom(32)\n\ndef get_random_hex_string(b=8):\n return base64.b16encode(os.urandom(b)).decode('ascii')\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac(\n 'sha256',\n string.encode('ascii'),\n salt,\n 100000,\n )\n\n# input string, output hash and salt\ndef hash_w_salt(string):\n salt = get_salt()\n hash = hash_pw(salt, string)\n return bytes2hexstr(hash), bytes2hexstr(salt)\n\n# input hash,salt,string, output comparison result\ndef check_hash_salt_pw(hashstr, saltstr, string):\n chash = hash_pw(hexstr2bytes(saltstr), string)\n return chash == hexstr2bytes(hashstr)\n\n\ndef timethis(stmt):\n import re, timeit\n print('timing', stmt)\n broken = re.findall(f'\\$([a-zA-Z][0-9a-zA-Z_\\-]*)', stmt)\n stmt = stmt.replace('$','')\n setup = f\"from __main__ import {','.join(broken)}\"\n\n exec(setup) # preheat\n exec(stmt)\n\n timeit.Timer(stmt,\n setup=setup\n ).autorange(\n lambda a,b:print(f'{a} in {b:.4f}, avg: {b/a*1000_000:.4f}us'))\n\n# if __name__ == '__main__':\n# k = time.time()\n# def hello():\n# if time.time() - k < 2:\n# raise Exception('nah')\n#\n# dispatch_with_retries(hello)\n# time.sleep(4)\n\nif __name__ == '__main__':\n toenc = b\"r12uf-398gy309ghh123r1\"*100000\n timethis('calculate_checksum_base64_replaced(toenc)')\n\n\n\n# everything time related\n\nimport datetime\n\ndtdt = datetime.datetime\ndtt = datetime.time\ndtd = datetime.date\ndtn = dtdt.now\ndttz = datetime.timezone\ndttd = datetime.timedelta\n\n# default time parsing\ndef dtdt_from_stamp(stamp):\n return dtdt.fromisoformat(stamp)\n\ndfs = dtdt_from_stamp\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n# proper time formatting\n# input: string iso timestamp\n# output: string formatted time\n\ndef format_time(dtdt,s):\n return dtdt.strftime(s)\n\n# default time formatting\ndef format_time_iso(dtdt):\n return dtdt.isoformat(timespec='seconds')[:19]\nfti = format_time_iso\n\nformat_time_datetime = lambda s: format_time(dfs(s), '%Y-%m-%d %H:%M')\nformat_time_datetime_second = lambda s: format_time(dfs(s), '%Y-%m-%d %H:%M:%S')\nformat_time_dateonly = lambda s: format_time(dfs(s), '%Y-%m-%d')\nformat_time_timeonly = lambda s: format_time(dfs(s), '%H:%M')\n\ndef days_since(ts):\n then = dfshk(ts)\n now = dtn(working_timezone)\n dt = now - then\n return dt.days\n\ndef days_between(ts0, ts1):\n return abs(days_since(ts0) - days_since(ts1))\n\ndef seconds_since(ts):\n then = dfshk(ts)\n now = dtn(working_timezone)\n dt = now - then\n return dt.total_seconds()\n\ndef cap(x, mi, ma):\n return min(max(x, mi),ma)\n\nworking_timezone = dttz(dttd(hours=+8)) # Hong Kong\ngmt_timezone = dttz(dttd(hours=0)) # GMT\n\ndef time_iso_now(dt=0): # dt in seconds\n return format_time_iso(dtn(working_timezone) + dttd(seconds=dt))\n",
"import os, hashlib, binascii as ba\nimport base64, re\nimport time, math\nfrom colors import *\nfrom numba import jit\nfrom cachetools.func import *\nfrom cachy import *\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\nimport json\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n@stale_cache(ttr=1, ttl=30)\ndef readfile(fn, mode='rb', *a, **kw):\n if 'b' not in mode:\n with open(fn, mode, *a, encoding='utf8', **kw) as f:\n return f.read()\n else:\n with open(fn, mode, *a, **kw) as f:\n return f.read()\n\n\ndef writefile(fn, data, mode='wb', encoding='utf8', *a, **kw):\n if 'b' not in mode:\n with open(fn, mode, *a, encoding=encoding, **kw) as f:\n f.write(data)\n else:\n with open(fn, mode, *a, **kw) as f:\n f.write(data)\n\n\ndef removefile(fn):\n try:\n os.remove(fn)\n except Exception as e:\n print_err(e)\n print_err('failed to remove', fn)\n else:\n return\n\n\nimport threading\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\ndef dispatch_with_retries(f):\n n = 0\n\n def wrapper():\n nonlocal n\n while 1:\n try:\n f()\n except Exception as e:\n print_err(e)\n n += 1\n time.sleep(0.5)\n print_up(f'{f.__name__}() retry #{n}')\n else:\n print_down(f'{f.__name__}() success on attempt #{n}')\n break\n return tpe.submit(wrapper)\n\n\ndef init_directory(d):\n try:\n os.mkdir(d)\n except FileExistsError as e:\n print_err('directory {} already exists.'.format(d), e)\n else:\n print_info('directory {} created.'.format(d))\n\n\ndef key(d, k):\n if k in d:\n return d[k]\n else:\n return None\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\ndef floatify(s):\n try:\n return float(s)\n except:\n if s:\n pass\n return 0.0\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\ndef clip(a, b):\n\n def _clip(c):\n return min(b, max(a, c))\n return _clip\n\n\nclip01 = clip(0, 1)\nimport zlib\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\ndef calculate_checksum_base64_replaced(bin):\n return calculate_checksum_base64(bin).replace('+', '-').replace('/', '_')\n\n\ndef calculate_etag(bin):\n return calculate_checksum_base64_replaced(bin)\n\n\ndef bytes2hexstr(b):\n return ba.b2a_hex(b).decode('ascii')\n\n\ndef hexstr2bytes(h):\n return ba.a2b_hex(h.encode('ascii'))\n\n\ndef get_salt():\n return os.urandom(32)\n\n\ndef get_random_hex_string(b=8):\n return base64.b16encode(os.urandom(b)).decode('ascii')\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\ndef hash_w_salt(string):\n salt = get_salt()\n hash = hash_pw(salt, string)\n return bytes2hexstr(hash), bytes2hexstr(salt)\n\n\ndef check_hash_salt_pw(hashstr, saltstr, string):\n chash = hash_pw(hexstr2bytes(saltstr), string)\n return chash == hexstr2bytes(hashstr)\n\n\ndef timethis(stmt):\n import re, timeit\n print('timing', stmt)\n broken = re.findall(f'\\\\$([a-zA-Z][0-9a-zA-Z_\\\\-]*)', stmt)\n stmt = stmt.replace('$', '')\n setup = f\"from __main__ import {','.join(broken)}\"\n exec(setup)\n exec(stmt)\n timeit.Timer(stmt, setup=setup).autorange(lambda a, b: print(\n f'{a} in {b:.4f}, avg: {b / a * 1000000:.4f}us'))\n\n\nif __name__ == '__main__':\n toenc = b'r12uf-398gy309ghh123r1' * 100000\n timethis('calculate_checksum_base64_replaced(toenc)')\nimport datetime\ndtdt = datetime.datetime\ndtt = datetime.time\ndtd = datetime.date\ndtn = dtdt.now\ndttz = datetime.timezone\ndttd = datetime.timedelta\n\n\ndef dtdt_from_stamp(stamp):\n return dtdt.fromisoformat(stamp)\n\n\ndfs = dtdt_from_stamp\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\ndef format_time(dtdt, s):\n return dtdt.strftime(s)\n\n\ndef format_time_iso(dtdt):\n return dtdt.isoformat(timespec='seconds')[:19]\n\n\nfti = format_time_iso\nformat_time_datetime = lambda s: format_time(dfs(s), '%Y-%m-%d %H:%M')\nformat_time_datetime_second = lambda s: format_time(dfs(s), '%Y-%m-%d %H:%M:%S'\n )\nformat_time_dateonly = lambda s: format_time(dfs(s), '%Y-%m-%d')\nformat_time_timeonly = lambda s: format_time(dfs(s), '%H:%M')\n\n\ndef days_since(ts):\n then = dfshk(ts)\n now = dtn(working_timezone)\n dt = now - then\n return dt.days\n\n\ndef days_between(ts0, ts1):\n return abs(days_since(ts0) - days_since(ts1))\n\n\ndef seconds_since(ts):\n then = dfshk(ts)\n now = dtn(working_timezone)\n dt = now - then\n return dt.total_seconds()\n\n\ndef cap(x, mi, ma):\n return min(max(x, mi), ma)\n\n\nworking_timezone = dttz(dttd(hours=+8))\ngmt_timezone = dttz(dttd(hours=0))\n\n\ndef time_iso_now(dt=0):\n return format_time_iso(dtn(working_timezone) + dttd(seconds=dt))\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n@stale_cache(ttr=1, ttl=30)\ndef readfile(fn, mode='rb', *a, **kw):\n if 'b' not in mode:\n with open(fn, mode, *a, encoding='utf8', **kw) as f:\n return f.read()\n else:\n with open(fn, mode, *a, **kw) as f:\n return f.read()\n\n\ndef writefile(fn, data, mode='wb', encoding='utf8', *a, **kw):\n if 'b' not in mode:\n with open(fn, mode, *a, encoding=encoding, **kw) as f:\n f.write(data)\n else:\n with open(fn, mode, *a, **kw) as f:\n f.write(data)\n\n\ndef removefile(fn):\n try:\n os.remove(fn)\n except Exception as e:\n print_err(e)\n print_err('failed to remove', fn)\n else:\n return\n\n\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\ndef dispatch_with_retries(f):\n n = 0\n\n def wrapper():\n nonlocal n\n while 1:\n try:\n f()\n except Exception as e:\n print_err(e)\n n += 1\n time.sleep(0.5)\n print_up(f'{f.__name__}() retry #{n}')\n else:\n print_down(f'{f.__name__}() success on attempt #{n}')\n break\n return tpe.submit(wrapper)\n\n\ndef init_directory(d):\n try:\n os.mkdir(d)\n except FileExistsError as e:\n print_err('directory {} already exists.'.format(d), e)\n else:\n print_info('directory {} created.'.format(d))\n\n\ndef key(d, k):\n if k in d:\n return d[k]\n else:\n return None\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\ndef floatify(s):\n try:\n return float(s)\n except:\n if s:\n pass\n return 0.0\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\ndef clip(a, b):\n\n def _clip(c):\n return min(b, max(a, c))\n return _clip\n\n\nclip01 = clip(0, 1)\n<import token>\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\ndef calculate_checksum_base64_replaced(bin):\n return calculate_checksum_base64(bin).replace('+', '-').replace('/', '_')\n\n\ndef calculate_etag(bin):\n return calculate_checksum_base64_replaced(bin)\n\n\ndef bytes2hexstr(b):\n return ba.b2a_hex(b).decode('ascii')\n\n\ndef hexstr2bytes(h):\n return ba.a2b_hex(h.encode('ascii'))\n\n\ndef get_salt():\n return os.urandom(32)\n\n\ndef get_random_hex_string(b=8):\n return base64.b16encode(os.urandom(b)).decode('ascii')\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\ndef hash_w_salt(string):\n salt = get_salt()\n hash = hash_pw(salt, string)\n return bytes2hexstr(hash), bytes2hexstr(salt)\n\n\ndef check_hash_salt_pw(hashstr, saltstr, string):\n chash = hash_pw(hexstr2bytes(saltstr), string)\n return chash == hexstr2bytes(hashstr)\n\n\ndef timethis(stmt):\n import re, timeit\n print('timing', stmt)\n broken = re.findall(f'\\\\$([a-zA-Z][0-9a-zA-Z_\\\\-]*)', stmt)\n stmt = stmt.replace('$', '')\n setup = f\"from __main__ import {','.join(broken)}\"\n exec(setup)\n exec(stmt)\n timeit.Timer(stmt, setup=setup).autorange(lambda a, b: print(\n f'{a} in {b:.4f}, avg: {b / a * 1000000:.4f}us'))\n\n\nif __name__ == '__main__':\n toenc = b'r12uf-398gy309ghh123r1' * 100000\n timethis('calculate_checksum_base64_replaced(toenc)')\n<import token>\ndtdt = datetime.datetime\ndtt = datetime.time\ndtd = datetime.date\ndtn = dtdt.now\ndttz = datetime.timezone\ndttd = datetime.timedelta\n\n\ndef dtdt_from_stamp(stamp):\n return dtdt.fromisoformat(stamp)\n\n\ndfs = dtdt_from_stamp\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\ndef format_time(dtdt, s):\n return dtdt.strftime(s)\n\n\ndef format_time_iso(dtdt):\n return dtdt.isoformat(timespec='seconds')[:19]\n\n\nfti = format_time_iso\nformat_time_datetime = lambda s: format_time(dfs(s), '%Y-%m-%d %H:%M')\nformat_time_datetime_second = lambda s: format_time(dfs(s), '%Y-%m-%d %H:%M:%S'\n )\nformat_time_dateonly = lambda s: format_time(dfs(s), '%Y-%m-%d')\nformat_time_timeonly = lambda s: format_time(dfs(s), '%H:%M')\n\n\ndef days_since(ts):\n then = dfshk(ts)\n now = dtn(working_timezone)\n dt = now - then\n return dt.days\n\n\ndef days_between(ts0, ts1):\n return abs(days_since(ts0) - days_since(ts1))\n\n\ndef seconds_since(ts):\n then = dfshk(ts)\n now = dtn(working_timezone)\n dt = now - then\n return dt.total_seconds()\n\n\ndef cap(x, mi, ma):\n return min(max(x, mi), ma)\n\n\nworking_timezone = dttz(dttd(hours=+8))\ngmt_timezone = dttz(dttd(hours=0))\n\n\ndef time_iso_now(dt=0):\n return format_time_iso(dtn(working_timezone) + dttd(seconds=dt))\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n@stale_cache(ttr=1, ttl=30)\ndef readfile(fn, mode='rb', *a, **kw):\n if 'b' not in mode:\n with open(fn, mode, *a, encoding='utf8', **kw) as f:\n return f.read()\n else:\n with open(fn, mode, *a, **kw) as f:\n return f.read()\n\n\ndef writefile(fn, data, mode='wb', encoding='utf8', *a, **kw):\n if 'b' not in mode:\n with open(fn, mode, *a, encoding=encoding, **kw) as f:\n f.write(data)\n else:\n with open(fn, mode, *a, **kw) as f:\n f.write(data)\n\n\ndef removefile(fn):\n try:\n os.remove(fn)\n except Exception as e:\n print_err(e)\n print_err('failed to remove', fn)\n else:\n return\n\n\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\ndef dispatch_with_retries(f):\n n = 0\n\n def wrapper():\n nonlocal n\n while 1:\n try:\n f()\n except Exception as e:\n print_err(e)\n n += 1\n time.sleep(0.5)\n print_up(f'{f.__name__}() retry #{n}')\n else:\n print_down(f'{f.__name__}() success on attempt #{n}')\n break\n return tpe.submit(wrapper)\n\n\ndef init_directory(d):\n try:\n os.mkdir(d)\n except FileExistsError as e:\n print_err('directory {} already exists.'.format(d), e)\n else:\n print_info('directory {} created.'.format(d))\n\n\ndef key(d, k):\n if k in d:\n return d[k]\n else:\n return None\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\ndef floatify(s):\n try:\n return float(s)\n except:\n if s:\n pass\n return 0.0\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\ndef clip(a, b):\n\n def _clip(c):\n return min(b, max(a, c))\n return _clip\n\n\n<assignment token>\n<import token>\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\ndef calculate_checksum_base64_replaced(bin):\n return calculate_checksum_base64(bin).replace('+', '-').replace('/', '_')\n\n\ndef calculate_etag(bin):\n return calculate_checksum_base64_replaced(bin)\n\n\ndef bytes2hexstr(b):\n return ba.b2a_hex(b).decode('ascii')\n\n\ndef hexstr2bytes(h):\n return ba.a2b_hex(h.encode('ascii'))\n\n\ndef get_salt():\n return os.urandom(32)\n\n\ndef get_random_hex_string(b=8):\n return base64.b16encode(os.urandom(b)).decode('ascii')\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\ndef hash_w_salt(string):\n salt = get_salt()\n hash = hash_pw(salt, string)\n return bytes2hexstr(hash), bytes2hexstr(salt)\n\n\ndef check_hash_salt_pw(hashstr, saltstr, string):\n chash = hash_pw(hexstr2bytes(saltstr), string)\n return chash == hexstr2bytes(hashstr)\n\n\ndef timethis(stmt):\n import re, timeit\n print('timing', stmt)\n broken = re.findall(f'\\\\$([a-zA-Z][0-9a-zA-Z_\\\\-]*)', stmt)\n stmt = stmt.replace('$', '')\n setup = f\"from __main__ import {','.join(broken)}\"\n exec(setup)\n exec(stmt)\n timeit.Timer(stmt, setup=setup).autorange(lambda a, b: print(\n f'{a} in {b:.4f}, avg: {b / a * 1000000:.4f}us'))\n\n\nif __name__ == '__main__':\n toenc = b'r12uf-398gy309ghh123r1' * 100000\n timethis('calculate_checksum_base64_replaced(toenc)')\n<import token>\n<assignment token>\n\n\ndef dtdt_from_stamp(stamp):\n return dtdt.fromisoformat(stamp)\n\n\n<assignment token>\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\ndef format_time(dtdt, s):\n return dtdt.strftime(s)\n\n\ndef format_time_iso(dtdt):\n return dtdt.isoformat(timespec='seconds')[:19]\n\n\n<assignment token>\n\n\ndef days_since(ts):\n then = dfshk(ts)\n now = dtn(working_timezone)\n dt = now - then\n return dt.days\n\n\ndef days_between(ts0, ts1):\n return abs(days_since(ts0) - days_since(ts1))\n\n\ndef seconds_since(ts):\n then = dfshk(ts)\n now = dtn(working_timezone)\n dt = now - then\n return dt.total_seconds()\n\n\ndef cap(x, mi, ma):\n return min(max(x, mi), ma)\n\n\n<assignment token>\n\n\ndef time_iso_now(dt=0):\n return format_time_iso(dtn(working_timezone) + dttd(seconds=dt))\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n@stale_cache(ttr=1, ttl=30)\ndef readfile(fn, mode='rb', *a, **kw):\n if 'b' not in mode:\n with open(fn, mode, *a, encoding='utf8', **kw) as f:\n return f.read()\n else:\n with open(fn, mode, *a, **kw) as f:\n return f.read()\n\n\ndef writefile(fn, data, mode='wb', encoding='utf8', *a, **kw):\n if 'b' not in mode:\n with open(fn, mode, *a, encoding=encoding, **kw) as f:\n f.write(data)\n else:\n with open(fn, mode, *a, **kw) as f:\n f.write(data)\n\n\ndef removefile(fn):\n try:\n os.remove(fn)\n except Exception as e:\n print_err(e)\n print_err('failed to remove', fn)\n else:\n return\n\n\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\ndef dispatch_with_retries(f):\n n = 0\n\n def wrapper():\n nonlocal n\n while 1:\n try:\n f()\n except Exception as e:\n print_err(e)\n n += 1\n time.sleep(0.5)\n print_up(f'{f.__name__}() retry #{n}')\n else:\n print_down(f'{f.__name__}() success on attempt #{n}')\n break\n return tpe.submit(wrapper)\n\n\ndef init_directory(d):\n try:\n os.mkdir(d)\n except FileExistsError as e:\n print_err('directory {} already exists.'.format(d), e)\n else:\n print_info('directory {} created.'.format(d))\n\n\ndef key(d, k):\n if k in d:\n return d[k]\n else:\n return None\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\ndef floatify(s):\n try:\n return float(s)\n except:\n if s:\n pass\n return 0.0\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\ndef clip(a, b):\n\n def _clip(c):\n return min(b, max(a, c))\n return _clip\n\n\n<assignment token>\n<import token>\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\ndef calculate_checksum_base64_replaced(bin):\n return calculate_checksum_base64(bin).replace('+', '-').replace('/', '_')\n\n\ndef calculate_etag(bin):\n return calculate_checksum_base64_replaced(bin)\n\n\ndef bytes2hexstr(b):\n return ba.b2a_hex(b).decode('ascii')\n\n\ndef hexstr2bytes(h):\n return ba.a2b_hex(h.encode('ascii'))\n\n\ndef get_salt():\n return os.urandom(32)\n\n\ndef get_random_hex_string(b=8):\n return base64.b16encode(os.urandom(b)).decode('ascii')\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\ndef hash_w_salt(string):\n salt = get_salt()\n hash = hash_pw(salt, string)\n return bytes2hexstr(hash), bytes2hexstr(salt)\n\n\ndef check_hash_salt_pw(hashstr, saltstr, string):\n chash = hash_pw(hexstr2bytes(saltstr), string)\n return chash == hexstr2bytes(hashstr)\n\n\ndef timethis(stmt):\n import re, timeit\n print('timing', stmt)\n broken = re.findall(f'\\\\$([a-zA-Z][0-9a-zA-Z_\\\\-]*)', stmt)\n stmt = stmt.replace('$', '')\n setup = f\"from __main__ import {','.join(broken)}\"\n exec(setup)\n exec(stmt)\n timeit.Timer(stmt, setup=setup).autorange(lambda a, b: print(\n f'{a} in {b:.4f}, avg: {b / a * 1000000:.4f}us'))\n\n\n<code token>\n<import token>\n<assignment token>\n\n\ndef dtdt_from_stamp(stamp):\n return dtdt.fromisoformat(stamp)\n\n\n<assignment token>\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\ndef format_time(dtdt, s):\n return dtdt.strftime(s)\n\n\ndef format_time_iso(dtdt):\n return dtdt.isoformat(timespec='seconds')[:19]\n\n\n<assignment token>\n\n\ndef days_since(ts):\n then = dfshk(ts)\n now = dtn(working_timezone)\n dt = now - then\n return dt.days\n\n\ndef days_between(ts0, ts1):\n return abs(days_since(ts0) - days_since(ts1))\n\n\ndef seconds_since(ts):\n then = dfshk(ts)\n now = dtn(working_timezone)\n dt = now - then\n return dt.total_seconds()\n\n\ndef cap(x, mi, ma):\n return min(max(x, mi), ma)\n\n\n<assignment token>\n\n\ndef time_iso_now(dt=0):\n return format_time_iso(dtn(working_timezone) + dttd(seconds=dt))\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n@stale_cache(ttr=1, ttl=30)\ndef readfile(fn, mode='rb', *a, **kw):\n if 'b' not in mode:\n with open(fn, mode, *a, encoding='utf8', **kw) as f:\n return f.read()\n else:\n with open(fn, mode, *a, **kw) as f:\n return f.read()\n\n\ndef writefile(fn, data, mode='wb', encoding='utf8', *a, **kw):\n if 'b' not in mode:\n with open(fn, mode, *a, encoding=encoding, **kw) as f:\n f.write(data)\n else:\n with open(fn, mode, *a, **kw) as f:\n f.write(data)\n\n\ndef removefile(fn):\n try:\n os.remove(fn)\n except Exception as e:\n print_err(e)\n print_err('failed to remove', fn)\n else:\n return\n\n\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\ndef dispatch_with_retries(f):\n n = 0\n\n def wrapper():\n nonlocal n\n while 1:\n try:\n f()\n except Exception as e:\n print_err(e)\n n += 1\n time.sleep(0.5)\n print_up(f'{f.__name__}() retry #{n}')\n else:\n print_down(f'{f.__name__}() success on attempt #{n}')\n break\n return tpe.submit(wrapper)\n\n\ndef init_directory(d):\n try:\n os.mkdir(d)\n except FileExistsError as e:\n print_err('directory {} already exists.'.format(d), e)\n else:\n print_info('directory {} created.'.format(d))\n\n\ndef key(d, k):\n if k in d:\n return d[k]\n else:\n return None\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\ndef floatify(s):\n try:\n return float(s)\n except:\n if s:\n pass\n return 0.0\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\ndef clip(a, b):\n\n def _clip(c):\n return min(b, max(a, c))\n return _clip\n\n\n<assignment token>\n<import token>\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\ndef calculate_checksum_base64_replaced(bin):\n return calculate_checksum_base64(bin).replace('+', '-').replace('/', '_')\n\n\ndef calculate_etag(bin):\n return calculate_checksum_base64_replaced(bin)\n\n\ndef bytes2hexstr(b):\n return ba.b2a_hex(b).decode('ascii')\n\n\ndef hexstr2bytes(h):\n return ba.a2b_hex(h.encode('ascii'))\n\n\ndef get_salt():\n return os.urandom(32)\n\n\ndef get_random_hex_string(b=8):\n return base64.b16encode(os.urandom(b)).decode('ascii')\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\ndef hash_w_salt(string):\n salt = get_salt()\n hash = hash_pw(salt, string)\n return bytes2hexstr(hash), bytes2hexstr(salt)\n\n\ndef check_hash_salt_pw(hashstr, saltstr, string):\n chash = hash_pw(hexstr2bytes(saltstr), string)\n return chash == hexstr2bytes(hashstr)\n\n\ndef timethis(stmt):\n import re, timeit\n print('timing', stmt)\n broken = re.findall(f'\\\\$([a-zA-Z][0-9a-zA-Z_\\\\-]*)', stmt)\n stmt = stmt.replace('$', '')\n setup = f\"from __main__ import {','.join(broken)}\"\n exec(setup)\n exec(stmt)\n timeit.Timer(stmt, setup=setup).autorange(lambda a, b: print(\n f'{a} in {b:.4f}, avg: {b / a * 1000000:.4f}us'))\n\n\n<code token>\n<import token>\n<assignment token>\n\n\ndef dtdt_from_stamp(stamp):\n return dtdt.fromisoformat(stamp)\n\n\n<assignment token>\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\n<function token>\n\n\ndef format_time_iso(dtdt):\n return dtdt.isoformat(timespec='seconds')[:19]\n\n\n<assignment token>\n\n\ndef days_since(ts):\n then = dfshk(ts)\n now = dtn(working_timezone)\n dt = now - then\n return dt.days\n\n\ndef days_between(ts0, ts1):\n return abs(days_since(ts0) - days_since(ts1))\n\n\ndef seconds_since(ts):\n then = dfshk(ts)\n now = dtn(working_timezone)\n dt = now - then\n return dt.total_seconds()\n\n\ndef cap(x, mi, ma):\n return min(max(x, mi), ma)\n\n\n<assignment token>\n\n\ndef time_iso_now(dt=0):\n return format_time_iso(dtn(working_timezone) + dttd(seconds=dt))\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n@stale_cache(ttr=1, ttl=30)\ndef readfile(fn, mode='rb', *a, **kw):\n if 'b' not in mode:\n with open(fn, mode, *a, encoding='utf8', **kw) as f:\n return f.read()\n else:\n with open(fn, mode, *a, **kw) as f:\n return f.read()\n\n\ndef writefile(fn, data, mode='wb', encoding='utf8', *a, **kw):\n if 'b' not in mode:\n with open(fn, mode, *a, encoding=encoding, **kw) as f:\n f.write(data)\n else:\n with open(fn, mode, *a, **kw) as f:\n f.write(data)\n\n\ndef removefile(fn):\n try:\n os.remove(fn)\n except Exception as e:\n print_err(e)\n print_err('failed to remove', fn)\n else:\n return\n\n\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\ndef dispatch_with_retries(f):\n n = 0\n\n def wrapper():\n nonlocal n\n while 1:\n try:\n f()\n except Exception as e:\n print_err(e)\n n += 1\n time.sleep(0.5)\n print_up(f'{f.__name__}() retry #{n}')\n else:\n print_down(f'{f.__name__}() success on attempt #{n}')\n break\n return tpe.submit(wrapper)\n\n\ndef init_directory(d):\n try:\n os.mkdir(d)\n except FileExistsError as e:\n print_err('directory {} already exists.'.format(d), e)\n else:\n print_info('directory {} created.'.format(d))\n\n\ndef key(d, k):\n if k in d:\n return d[k]\n else:\n return None\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\ndef floatify(s):\n try:\n return float(s)\n except:\n if s:\n pass\n return 0.0\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\ndef clip(a, b):\n\n def _clip(c):\n return min(b, max(a, c))\n return _clip\n\n\n<assignment token>\n<import token>\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\ndef calculate_checksum_base64_replaced(bin):\n return calculate_checksum_base64(bin).replace('+', '-').replace('/', '_')\n\n\ndef calculate_etag(bin):\n return calculate_checksum_base64_replaced(bin)\n\n\ndef bytes2hexstr(b):\n return ba.b2a_hex(b).decode('ascii')\n\n\ndef hexstr2bytes(h):\n return ba.a2b_hex(h.encode('ascii'))\n\n\ndef get_salt():\n return os.urandom(32)\n\n\ndef get_random_hex_string(b=8):\n return base64.b16encode(os.urandom(b)).decode('ascii')\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\ndef hash_w_salt(string):\n salt = get_salt()\n hash = hash_pw(salt, string)\n return bytes2hexstr(hash), bytes2hexstr(salt)\n\n\ndef check_hash_salt_pw(hashstr, saltstr, string):\n chash = hash_pw(hexstr2bytes(saltstr), string)\n return chash == hexstr2bytes(hashstr)\n\n\ndef timethis(stmt):\n import re, timeit\n print('timing', stmt)\n broken = re.findall(f'\\\\$([a-zA-Z][0-9a-zA-Z_\\\\-]*)', stmt)\n stmt = stmt.replace('$', '')\n setup = f\"from __main__ import {','.join(broken)}\"\n exec(setup)\n exec(stmt)\n timeit.Timer(stmt, setup=setup).autorange(lambda a, b: print(\n f'{a} in {b:.4f}, avg: {b / a * 1000000:.4f}us'))\n\n\n<code token>\n<import token>\n<assignment token>\n\n\ndef dtdt_from_stamp(stamp):\n return dtdt.fromisoformat(stamp)\n\n\n<assignment token>\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\n<function token>\n\n\ndef format_time_iso(dtdt):\n return dtdt.isoformat(timespec='seconds')[:19]\n\n\n<assignment token>\n\n\ndef days_since(ts):\n then = dfshk(ts)\n now = dtn(working_timezone)\n dt = now - then\n return dt.days\n\n\ndef days_between(ts0, ts1):\n return abs(days_since(ts0) - days_since(ts1))\n\n\n<function token>\n\n\ndef cap(x, mi, ma):\n return min(max(x, mi), ma)\n\n\n<assignment token>\n\n\ndef time_iso_now(dt=0):\n return format_time_iso(dtn(working_timezone) + dttd(seconds=dt))\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n@stale_cache(ttr=1, ttl=30)\ndef readfile(fn, mode='rb', *a, **kw):\n if 'b' not in mode:\n with open(fn, mode, *a, encoding='utf8', **kw) as f:\n return f.read()\n else:\n with open(fn, mode, *a, **kw) as f:\n return f.read()\n\n\ndef writefile(fn, data, mode='wb', encoding='utf8', *a, **kw):\n if 'b' not in mode:\n with open(fn, mode, *a, encoding=encoding, **kw) as f:\n f.write(data)\n else:\n with open(fn, mode, *a, **kw) as f:\n f.write(data)\n\n\ndef removefile(fn):\n try:\n os.remove(fn)\n except Exception as e:\n print_err(e)\n print_err('failed to remove', fn)\n else:\n return\n\n\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\ndef dispatch_with_retries(f):\n n = 0\n\n def wrapper():\n nonlocal n\n while 1:\n try:\n f()\n except Exception as e:\n print_err(e)\n n += 1\n time.sleep(0.5)\n print_up(f'{f.__name__}() retry #{n}')\n else:\n print_down(f'{f.__name__}() success on attempt #{n}')\n break\n return tpe.submit(wrapper)\n\n\ndef init_directory(d):\n try:\n os.mkdir(d)\n except FileExistsError as e:\n print_err('directory {} already exists.'.format(d), e)\n else:\n print_info('directory {} created.'.format(d))\n\n\ndef key(d, k):\n if k in d:\n return d[k]\n else:\n return None\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\ndef floatify(s):\n try:\n return float(s)\n except:\n if s:\n pass\n return 0.0\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\ndef clip(a, b):\n\n def _clip(c):\n return min(b, max(a, c))\n return _clip\n\n\n<assignment token>\n<import token>\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\ndef calculate_checksum_base64_replaced(bin):\n return calculate_checksum_base64(bin).replace('+', '-').replace('/', '_')\n\n\ndef calculate_etag(bin):\n return calculate_checksum_base64_replaced(bin)\n\n\ndef bytes2hexstr(b):\n return ba.b2a_hex(b).decode('ascii')\n\n\ndef hexstr2bytes(h):\n return ba.a2b_hex(h.encode('ascii'))\n\n\ndef get_salt():\n return os.urandom(32)\n\n\ndef get_random_hex_string(b=8):\n return base64.b16encode(os.urandom(b)).decode('ascii')\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\ndef hash_w_salt(string):\n salt = get_salt()\n hash = hash_pw(salt, string)\n return bytes2hexstr(hash), bytes2hexstr(salt)\n\n\ndef check_hash_salt_pw(hashstr, saltstr, string):\n chash = hash_pw(hexstr2bytes(saltstr), string)\n return chash == hexstr2bytes(hashstr)\n\n\ndef timethis(stmt):\n import re, timeit\n print('timing', stmt)\n broken = re.findall(f'\\\\$([a-zA-Z][0-9a-zA-Z_\\\\-]*)', stmt)\n stmt = stmt.replace('$', '')\n setup = f\"from __main__ import {','.join(broken)}\"\n exec(setup)\n exec(stmt)\n timeit.Timer(stmt, setup=setup).autorange(lambda a, b: print(\n f'{a} in {b:.4f}, avg: {b / a * 1000000:.4f}us'))\n\n\n<code token>\n<import token>\n<assignment token>\n\n\ndef dtdt_from_stamp(stamp):\n return dtdt.fromisoformat(stamp)\n\n\n<assignment token>\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\n<function token>\n<function token>\n<assignment token>\n\n\ndef days_since(ts):\n then = dfshk(ts)\n now = dtn(working_timezone)\n dt = now - then\n return dt.days\n\n\ndef days_between(ts0, ts1):\n return abs(days_since(ts0) - days_since(ts1))\n\n\n<function token>\n\n\ndef cap(x, mi, ma):\n return min(max(x, mi), ma)\n\n\n<assignment token>\n\n\ndef time_iso_now(dt=0):\n return format_time_iso(dtn(working_timezone) + dttd(seconds=dt))\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n@stale_cache(ttr=1, ttl=30)\ndef readfile(fn, mode='rb', *a, **kw):\n if 'b' not in mode:\n with open(fn, mode, *a, encoding='utf8', **kw) as f:\n return f.read()\n else:\n with open(fn, mode, *a, **kw) as f:\n return f.read()\n\n\ndef writefile(fn, data, mode='wb', encoding='utf8', *a, **kw):\n if 'b' not in mode:\n with open(fn, mode, *a, encoding=encoding, **kw) as f:\n f.write(data)\n else:\n with open(fn, mode, *a, **kw) as f:\n f.write(data)\n\n\ndef removefile(fn):\n try:\n os.remove(fn)\n except Exception as e:\n print_err(e)\n print_err('failed to remove', fn)\n else:\n return\n\n\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\ndef dispatch_with_retries(f):\n n = 0\n\n def wrapper():\n nonlocal n\n while 1:\n try:\n f()\n except Exception as e:\n print_err(e)\n n += 1\n time.sleep(0.5)\n print_up(f'{f.__name__}() retry #{n}')\n else:\n print_down(f'{f.__name__}() success on attempt #{n}')\n break\n return tpe.submit(wrapper)\n\n\ndef init_directory(d):\n try:\n os.mkdir(d)\n except FileExistsError as e:\n print_err('directory {} already exists.'.format(d), e)\n else:\n print_info('directory {} created.'.format(d))\n\n\ndef key(d, k):\n if k in d:\n return d[k]\n else:\n return None\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\ndef floatify(s):\n try:\n return float(s)\n except:\n if s:\n pass\n return 0.0\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\ndef clip(a, b):\n\n def _clip(c):\n return min(b, max(a, c))\n return _clip\n\n\n<assignment token>\n<import token>\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\ndef calculate_checksum_base64_replaced(bin):\n return calculate_checksum_base64(bin).replace('+', '-').replace('/', '_')\n\n\ndef calculate_etag(bin):\n return calculate_checksum_base64_replaced(bin)\n\n\ndef bytes2hexstr(b):\n return ba.b2a_hex(b).decode('ascii')\n\n\n<function token>\n\n\ndef get_salt():\n return os.urandom(32)\n\n\ndef get_random_hex_string(b=8):\n return base64.b16encode(os.urandom(b)).decode('ascii')\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\ndef hash_w_salt(string):\n salt = get_salt()\n hash = hash_pw(salt, string)\n return bytes2hexstr(hash), bytes2hexstr(salt)\n\n\ndef check_hash_salt_pw(hashstr, saltstr, string):\n chash = hash_pw(hexstr2bytes(saltstr), string)\n return chash == hexstr2bytes(hashstr)\n\n\ndef timethis(stmt):\n import re, timeit\n print('timing', stmt)\n broken = re.findall(f'\\\\$([a-zA-Z][0-9a-zA-Z_\\\\-]*)', stmt)\n stmt = stmt.replace('$', '')\n setup = f\"from __main__ import {','.join(broken)}\"\n exec(setup)\n exec(stmt)\n timeit.Timer(stmt, setup=setup).autorange(lambda a, b: print(\n f'{a} in {b:.4f}, avg: {b / a * 1000000:.4f}us'))\n\n\n<code token>\n<import token>\n<assignment token>\n\n\ndef dtdt_from_stamp(stamp):\n return dtdt.fromisoformat(stamp)\n\n\n<assignment token>\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\n<function token>\n<function token>\n<assignment token>\n\n\ndef days_since(ts):\n then = dfshk(ts)\n now = dtn(working_timezone)\n dt = now - then\n return dt.days\n\n\ndef days_between(ts0, ts1):\n return abs(days_since(ts0) - days_since(ts1))\n\n\n<function token>\n\n\ndef cap(x, mi, ma):\n return min(max(x, mi), ma)\n\n\n<assignment token>\n\n\ndef time_iso_now(dt=0):\n return format_time_iso(dtn(working_timezone) + dttd(seconds=dt))\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n@stale_cache(ttr=1, ttl=30)\ndef readfile(fn, mode='rb', *a, **kw):\n if 'b' not in mode:\n with open(fn, mode, *a, encoding='utf8', **kw) as f:\n return f.read()\n else:\n with open(fn, mode, *a, **kw) as f:\n return f.read()\n\n\n<function token>\n\n\ndef removefile(fn):\n try:\n os.remove(fn)\n except Exception as e:\n print_err(e)\n print_err('failed to remove', fn)\n else:\n return\n\n\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\ndef dispatch_with_retries(f):\n n = 0\n\n def wrapper():\n nonlocal n\n while 1:\n try:\n f()\n except Exception as e:\n print_err(e)\n n += 1\n time.sleep(0.5)\n print_up(f'{f.__name__}() retry #{n}')\n else:\n print_down(f'{f.__name__}() success on attempt #{n}')\n break\n return tpe.submit(wrapper)\n\n\ndef init_directory(d):\n try:\n os.mkdir(d)\n except FileExistsError as e:\n print_err('directory {} already exists.'.format(d), e)\n else:\n print_info('directory {} created.'.format(d))\n\n\ndef key(d, k):\n if k in d:\n return d[k]\n else:\n return None\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\ndef floatify(s):\n try:\n return float(s)\n except:\n if s:\n pass\n return 0.0\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\ndef clip(a, b):\n\n def _clip(c):\n return min(b, max(a, c))\n return _clip\n\n\n<assignment token>\n<import token>\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\ndef calculate_checksum_base64_replaced(bin):\n return calculate_checksum_base64(bin).replace('+', '-').replace('/', '_')\n\n\ndef calculate_etag(bin):\n return calculate_checksum_base64_replaced(bin)\n\n\ndef bytes2hexstr(b):\n return ba.b2a_hex(b).decode('ascii')\n\n\n<function token>\n\n\ndef get_salt():\n return os.urandom(32)\n\n\ndef get_random_hex_string(b=8):\n return base64.b16encode(os.urandom(b)).decode('ascii')\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\ndef hash_w_salt(string):\n salt = get_salt()\n hash = hash_pw(salt, string)\n return bytes2hexstr(hash), bytes2hexstr(salt)\n\n\ndef check_hash_salt_pw(hashstr, saltstr, string):\n chash = hash_pw(hexstr2bytes(saltstr), string)\n return chash == hexstr2bytes(hashstr)\n\n\ndef timethis(stmt):\n import re, timeit\n print('timing', stmt)\n broken = re.findall(f'\\\\$([a-zA-Z][0-9a-zA-Z_\\\\-]*)', stmt)\n stmt = stmt.replace('$', '')\n setup = f\"from __main__ import {','.join(broken)}\"\n exec(setup)\n exec(stmt)\n timeit.Timer(stmt, setup=setup).autorange(lambda a, b: print(\n f'{a} in {b:.4f}, avg: {b / a * 1000000:.4f}us'))\n\n\n<code token>\n<import token>\n<assignment token>\n\n\ndef dtdt_from_stamp(stamp):\n return dtdt.fromisoformat(stamp)\n\n\n<assignment token>\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\n<function token>\n<function token>\n<assignment token>\n\n\ndef days_since(ts):\n then = dfshk(ts)\n now = dtn(working_timezone)\n dt = now - then\n return dt.days\n\n\ndef days_between(ts0, ts1):\n return abs(days_since(ts0) - days_since(ts1))\n\n\n<function token>\n\n\ndef cap(x, mi, ma):\n return min(max(x, mi), ma)\n\n\n<assignment token>\n\n\ndef time_iso_now(dt=0):\n return format_time_iso(dtn(working_timezone) + dttd(seconds=dt))\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n@stale_cache(ttr=1, ttl=30)\ndef readfile(fn, mode='rb', *a, **kw):\n if 'b' not in mode:\n with open(fn, mode, *a, encoding='utf8', **kw) as f:\n return f.read()\n else:\n with open(fn, mode, *a, **kw) as f:\n return f.read()\n\n\n<function token>\n\n\ndef removefile(fn):\n try:\n os.remove(fn)\n except Exception as e:\n print_err(e)\n print_err('failed to remove', fn)\n else:\n return\n\n\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\n<function token>\n\n\ndef init_directory(d):\n try:\n os.mkdir(d)\n except FileExistsError as e:\n print_err('directory {} already exists.'.format(d), e)\n else:\n print_info('directory {} created.'.format(d))\n\n\ndef key(d, k):\n if k in d:\n return d[k]\n else:\n return None\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\ndef floatify(s):\n try:\n return float(s)\n except:\n if s:\n pass\n return 0.0\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\ndef clip(a, b):\n\n def _clip(c):\n return min(b, max(a, c))\n return _clip\n\n\n<assignment token>\n<import token>\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\ndef calculate_checksum_base64_replaced(bin):\n return calculate_checksum_base64(bin).replace('+', '-').replace('/', '_')\n\n\ndef calculate_etag(bin):\n return calculate_checksum_base64_replaced(bin)\n\n\ndef bytes2hexstr(b):\n return ba.b2a_hex(b).decode('ascii')\n\n\n<function token>\n\n\ndef get_salt():\n return os.urandom(32)\n\n\ndef get_random_hex_string(b=8):\n return base64.b16encode(os.urandom(b)).decode('ascii')\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\ndef hash_w_salt(string):\n salt = get_salt()\n hash = hash_pw(salt, string)\n return bytes2hexstr(hash), bytes2hexstr(salt)\n\n\ndef check_hash_salt_pw(hashstr, saltstr, string):\n chash = hash_pw(hexstr2bytes(saltstr), string)\n return chash == hexstr2bytes(hashstr)\n\n\ndef timethis(stmt):\n import re, timeit\n print('timing', stmt)\n broken = re.findall(f'\\\\$([a-zA-Z][0-9a-zA-Z_\\\\-]*)', stmt)\n stmt = stmt.replace('$', '')\n setup = f\"from __main__ import {','.join(broken)}\"\n exec(setup)\n exec(stmt)\n timeit.Timer(stmt, setup=setup).autorange(lambda a, b: print(\n f'{a} in {b:.4f}, avg: {b / a * 1000000:.4f}us'))\n\n\n<code token>\n<import token>\n<assignment token>\n\n\ndef dtdt_from_stamp(stamp):\n return dtdt.fromisoformat(stamp)\n\n\n<assignment token>\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\n<function token>\n<function token>\n<assignment token>\n\n\ndef days_since(ts):\n then = dfshk(ts)\n now = dtn(working_timezone)\n dt = now - then\n return dt.days\n\n\ndef days_between(ts0, ts1):\n return abs(days_since(ts0) - days_since(ts1))\n\n\n<function token>\n\n\ndef cap(x, mi, ma):\n return min(max(x, mi), ma)\n\n\n<assignment token>\n\n\ndef time_iso_now(dt=0):\n return format_time_iso(dtn(working_timezone) + dttd(seconds=dt))\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n@stale_cache(ttr=1, ttl=30)\ndef readfile(fn, mode='rb', *a, **kw):\n if 'b' not in mode:\n with open(fn, mode, *a, encoding='utf8', **kw) as f:\n return f.read()\n else:\n with open(fn, mode, *a, **kw) as f:\n return f.read()\n\n\n<function token>\n\n\ndef removefile(fn):\n try:\n os.remove(fn)\n except Exception as e:\n print_err(e)\n print_err('failed to remove', fn)\n else:\n return\n\n\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\n<function token>\n\n\ndef init_directory(d):\n try:\n os.mkdir(d)\n except FileExistsError as e:\n print_err('directory {} already exists.'.format(d), e)\n else:\n print_info('directory {} created.'.format(d))\n\n\ndef key(d, k):\n if k in d:\n return d[k]\n else:\n return None\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\ndef floatify(s):\n try:\n return float(s)\n except:\n if s:\n pass\n return 0.0\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\ndef clip(a, b):\n\n def _clip(c):\n return min(b, max(a, c))\n return _clip\n\n\n<assignment token>\n<import token>\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\ndef calculate_checksum_base64_replaced(bin):\n return calculate_checksum_base64(bin).replace('+', '-').replace('/', '_')\n\n\ndef calculate_etag(bin):\n return calculate_checksum_base64_replaced(bin)\n\n\ndef bytes2hexstr(b):\n return ba.b2a_hex(b).decode('ascii')\n\n\n<function token>\n\n\ndef get_salt():\n return os.urandom(32)\n\n\ndef get_random_hex_string(b=8):\n return base64.b16encode(os.urandom(b)).decode('ascii')\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\ndef hash_w_salt(string):\n salt = get_salt()\n hash = hash_pw(salt, string)\n return bytes2hexstr(hash), bytes2hexstr(salt)\n\n\ndef check_hash_salt_pw(hashstr, saltstr, string):\n chash = hash_pw(hexstr2bytes(saltstr), string)\n return chash == hexstr2bytes(hashstr)\n\n\ndef timethis(stmt):\n import re, timeit\n print('timing', stmt)\n broken = re.findall(f'\\\\$([a-zA-Z][0-9a-zA-Z_\\\\-]*)', stmt)\n stmt = stmt.replace('$', '')\n setup = f\"from __main__ import {','.join(broken)}\"\n exec(setup)\n exec(stmt)\n timeit.Timer(stmt, setup=setup).autorange(lambda a, b: print(\n f'{a} in {b:.4f}, avg: {b / a * 1000000:.4f}us'))\n\n\n<code token>\n<import token>\n<assignment token>\n\n\ndef dtdt_from_stamp(stamp):\n return dtdt.fromisoformat(stamp)\n\n\n<assignment token>\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\n<function token>\n<function token>\n<assignment token>\n<function token>\n\n\ndef days_between(ts0, ts1):\n return abs(days_since(ts0) - days_since(ts1))\n\n\n<function token>\n\n\ndef cap(x, mi, ma):\n return min(max(x, mi), ma)\n\n\n<assignment token>\n\n\ndef time_iso_now(dt=0):\n return format_time_iso(dtn(working_timezone) + dttd(seconds=dt))\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n@stale_cache(ttr=1, ttl=30)\ndef readfile(fn, mode='rb', *a, **kw):\n if 'b' not in mode:\n with open(fn, mode, *a, encoding='utf8', **kw) as f:\n return f.read()\n else:\n with open(fn, mode, *a, **kw) as f:\n return f.read()\n\n\n<function token>\n\n\ndef removefile(fn):\n try:\n os.remove(fn)\n except Exception as e:\n print_err(e)\n print_err('failed to remove', fn)\n else:\n return\n\n\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\n<function token>\n\n\ndef init_directory(d):\n try:\n os.mkdir(d)\n except FileExistsError as e:\n print_err('directory {} already exists.'.format(d), e)\n else:\n print_info('directory {} created.'.format(d))\n\n\ndef key(d, k):\n if k in d:\n return d[k]\n else:\n return None\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\ndef floatify(s):\n try:\n return float(s)\n except:\n if s:\n pass\n return 0.0\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\n<function token>\n<assignment token>\n<import token>\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\ndef calculate_checksum_base64_replaced(bin):\n return calculate_checksum_base64(bin).replace('+', '-').replace('/', '_')\n\n\ndef calculate_etag(bin):\n return calculate_checksum_base64_replaced(bin)\n\n\ndef bytes2hexstr(b):\n return ba.b2a_hex(b).decode('ascii')\n\n\n<function token>\n\n\ndef get_salt():\n return os.urandom(32)\n\n\ndef get_random_hex_string(b=8):\n return base64.b16encode(os.urandom(b)).decode('ascii')\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\ndef hash_w_salt(string):\n salt = get_salt()\n hash = hash_pw(salt, string)\n return bytes2hexstr(hash), bytes2hexstr(salt)\n\n\ndef check_hash_salt_pw(hashstr, saltstr, string):\n chash = hash_pw(hexstr2bytes(saltstr), string)\n return chash == hexstr2bytes(hashstr)\n\n\ndef timethis(stmt):\n import re, timeit\n print('timing', stmt)\n broken = re.findall(f'\\\\$([a-zA-Z][0-9a-zA-Z_\\\\-]*)', stmt)\n stmt = stmt.replace('$', '')\n setup = f\"from __main__ import {','.join(broken)}\"\n exec(setup)\n exec(stmt)\n timeit.Timer(stmt, setup=setup).autorange(lambda a, b: print(\n f'{a} in {b:.4f}, avg: {b / a * 1000000:.4f}us'))\n\n\n<code token>\n<import token>\n<assignment token>\n\n\ndef dtdt_from_stamp(stamp):\n return dtdt.fromisoformat(stamp)\n\n\n<assignment token>\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\n<function token>\n<function token>\n<assignment token>\n<function token>\n\n\ndef days_between(ts0, ts1):\n return abs(days_since(ts0) - days_since(ts1))\n\n\n<function token>\n\n\ndef cap(x, mi, ma):\n return min(max(x, mi), ma)\n\n\n<assignment token>\n\n\ndef time_iso_now(dt=0):\n return format_time_iso(dtn(working_timezone) + dttd(seconds=dt))\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n@stale_cache(ttr=1, ttl=30)\ndef readfile(fn, mode='rb', *a, **kw):\n if 'b' not in mode:\n with open(fn, mode, *a, encoding='utf8', **kw) as f:\n return f.read()\n else:\n with open(fn, mode, *a, **kw) as f:\n return f.read()\n\n\n<function token>\n\n\ndef removefile(fn):\n try:\n os.remove(fn)\n except Exception as e:\n print_err(e)\n print_err('failed to remove', fn)\n else:\n return\n\n\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\n<function token>\n\n\ndef init_directory(d):\n try:\n os.mkdir(d)\n except FileExistsError as e:\n print_err('directory {} already exists.'.format(d), e)\n else:\n print_info('directory {} created.'.format(d))\n\n\ndef key(d, k):\n if k in d:\n return d[k]\n else:\n return None\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\ndef floatify(s):\n try:\n return float(s)\n except:\n if s:\n pass\n return 0.0\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\n<function token>\n<assignment token>\n<import token>\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\ndef calculate_checksum_base64_replaced(bin):\n return calculate_checksum_base64(bin).replace('+', '-').replace('/', '_')\n\n\ndef calculate_etag(bin):\n return calculate_checksum_base64_replaced(bin)\n\n\ndef bytes2hexstr(b):\n return ba.b2a_hex(b).decode('ascii')\n\n\n<function token>\n\n\ndef get_salt():\n return os.urandom(32)\n\n\ndef get_random_hex_string(b=8):\n return base64.b16encode(os.urandom(b)).decode('ascii')\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\ndef hash_w_salt(string):\n salt = get_salt()\n hash = hash_pw(salt, string)\n return bytes2hexstr(hash), bytes2hexstr(salt)\n\n\ndef check_hash_salt_pw(hashstr, saltstr, string):\n chash = hash_pw(hexstr2bytes(saltstr), string)\n return chash == hexstr2bytes(hashstr)\n\n\ndef timethis(stmt):\n import re, timeit\n print('timing', stmt)\n broken = re.findall(f'\\\\$([a-zA-Z][0-9a-zA-Z_\\\\-]*)', stmt)\n stmt = stmt.replace('$', '')\n setup = f\"from __main__ import {','.join(broken)}\"\n exec(setup)\n exec(stmt)\n timeit.Timer(stmt, setup=setup).autorange(lambda a, b: print(\n f'{a} in {b:.4f}, avg: {b / a * 1000000:.4f}us'))\n\n\n<code token>\n<import token>\n<assignment token>\n\n\ndef dtdt_from_stamp(stamp):\n return dtdt.fromisoformat(stamp)\n\n\n<assignment token>\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef cap(x, mi, ma):\n return min(max(x, mi), ma)\n\n\n<assignment token>\n\n\ndef time_iso_now(dt=0):\n return format_time_iso(dtn(working_timezone) + dttd(seconds=dt))\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n@stale_cache(ttr=1, ttl=30)\ndef readfile(fn, mode='rb', *a, **kw):\n if 'b' not in mode:\n with open(fn, mode, *a, encoding='utf8', **kw) as f:\n return f.read()\n else:\n with open(fn, mode, *a, **kw) as f:\n return f.read()\n\n\n<function token>\n\n\ndef removefile(fn):\n try:\n os.remove(fn)\n except Exception as e:\n print_err(e)\n print_err('failed to remove', fn)\n else:\n return\n\n\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\n<function token>\n\n\ndef init_directory(d):\n try:\n os.mkdir(d)\n except FileExistsError as e:\n print_err('directory {} already exists.'.format(d), e)\n else:\n print_info('directory {} created.'.format(d))\n\n\ndef key(d, k):\n if k in d:\n return d[k]\n else:\n return None\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\ndef floatify(s):\n try:\n return float(s)\n except:\n if s:\n pass\n return 0.0\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\n<function token>\n<assignment token>\n<import token>\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\ndef calculate_checksum_base64_replaced(bin):\n return calculate_checksum_base64(bin).replace('+', '-').replace('/', '_')\n\n\ndef calculate_etag(bin):\n return calculate_checksum_base64_replaced(bin)\n\n\ndef bytes2hexstr(b):\n return ba.b2a_hex(b).decode('ascii')\n\n\n<function token>\n\n\ndef get_salt():\n return os.urandom(32)\n\n\ndef get_random_hex_string(b=8):\n return base64.b16encode(os.urandom(b)).decode('ascii')\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\ndef hash_w_salt(string):\n salt = get_salt()\n hash = hash_pw(salt, string)\n return bytes2hexstr(hash), bytes2hexstr(salt)\n\n\ndef check_hash_salt_pw(hashstr, saltstr, string):\n chash = hash_pw(hexstr2bytes(saltstr), string)\n return chash == hexstr2bytes(hashstr)\n\n\ndef timethis(stmt):\n import re, timeit\n print('timing', stmt)\n broken = re.findall(f'\\\\$([a-zA-Z][0-9a-zA-Z_\\\\-]*)', stmt)\n stmt = stmt.replace('$', '')\n setup = f\"from __main__ import {','.join(broken)}\"\n exec(setup)\n exec(stmt)\n timeit.Timer(stmt, setup=setup).autorange(lambda a, b: print(\n f'{a} in {b:.4f}, avg: {b / a * 1000000:.4f}us'))\n\n\n<code token>\n<import token>\n<assignment token>\n\n\ndef dtdt_from_stamp(stamp):\n return dtdt.fromisoformat(stamp)\n\n\n<assignment token>\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef cap(x, mi, ma):\n return min(max(x, mi), ma)\n\n\n<assignment token>\n<function token>\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n@stale_cache(ttr=1, ttl=30)\ndef readfile(fn, mode='rb', *a, **kw):\n if 'b' not in mode:\n with open(fn, mode, *a, encoding='utf8', **kw) as f:\n return f.read()\n else:\n with open(fn, mode, *a, **kw) as f:\n return f.read()\n\n\n<function token>\n\n\ndef removefile(fn):\n try:\n os.remove(fn)\n except Exception as e:\n print_err(e)\n print_err('failed to remove', fn)\n else:\n return\n\n\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\n<function token>\n\n\ndef init_directory(d):\n try:\n os.mkdir(d)\n except FileExistsError as e:\n print_err('directory {} already exists.'.format(d), e)\n else:\n print_info('directory {} created.'.format(d))\n\n\ndef key(d, k):\n if k in d:\n return d[k]\n else:\n return None\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\ndef floatify(s):\n try:\n return float(s)\n except:\n if s:\n pass\n return 0.0\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\n<function token>\n<assignment token>\n<import token>\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\ndef calculate_checksum_base64_replaced(bin):\n return calculate_checksum_base64(bin).replace('+', '-').replace('/', '_')\n\n\ndef calculate_etag(bin):\n return calculate_checksum_base64_replaced(bin)\n\n\ndef bytes2hexstr(b):\n return ba.b2a_hex(b).decode('ascii')\n\n\n<function token>\n\n\ndef get_salt():\n return os.urandom(32)\n\n\ndef get_random_hex_string(b=8):\n return base64.b16encode(os.urandom(b)).decode('ascii')\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\ndef hash_w_salt(string):\n salt = get_salt()\n hash = hash_pw(salt, string)\n return bytes2hexstr(hash), bytes2hexstr(salt)\n\n\ndef check_hash_salt_pw(hashstr, saltstr, string):\n chash = hash_pw(hexstr2bytes(saltstr), string)\n return chash == hexstr2bytes(hashstr)\n\n\ndef timethis(stmt):\n import re, timeit\n print('timing', stmt)\n broken = re.findall(f'\\\\$([a-zA-Z][0-9a-zA-Z_\\\\-]*)', stmt)\n stmt = stmt.replace('$', '')\n setup = f\"from __main__ import {','.join(broken)}\"\n exec(setup)\n exec(stmt)\n timeit.Timer(stmt, setup=setup).autorange(lambda a, b: print(\n f'{a} in {b:.4f}, avg: {b / a * 1000000:.4f}us'))\n\n\n<code token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef cap(x, mi, ma):\n return min(max(x, mi), ma)\n\n\n<assignment token>\n<function token>\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n@stale_cache(ttr=1, ttl=30)\ndef readfile(fn, mode='rb', *a, **kw):\n if 'b' not in mode:\n with open(fn, mode, *a, encoding='utf8', **kw) as f:\n return f.read()\n else:\n with open(fn, mode, *a, **kw) as f:\n return f.read()\n\n\n<function token>\n\n\ndef removefile(fn):\n try:\n os.remove(fn)\n except Exception as e:\n print_err(e)\n print_err('failed to remove', fn)\n else:\n return\n\n\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\n<function token>\n\n\ndef init_directory(d):\n try:\n os.mkdir(d)\n except FileExistsError as e:\n print_err('directory {} already exists.'.format(d), e)\n else:\n print_info('directory {} created.'.format(d))\n\n\ndef key(d, k):\n if k in d:\n return d[k]\n else:\n return None\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\ndef floatify(s):\n try:\n return float(s)\n except:\n if s:\n pass\n return 0.0\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\n<function token>\n<assignment token>\n<import token>\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\ndef calculate_checksum_base64_replaced(bin):\n return calculate_checksum_base64(bin).replace('+', '-').replace('/', '_')\n\n\ndef calculate_etag(bin):\n return calculate_checksum_base64_replaced(bin)\n\n\ndef bytes2hexstr(b):\n return ba.b2a_hex(b).decode('ascii')\n\n\n<function token>\n\n\ndef get_salt():\n return os.urandom(32)\n\n\n<function token>\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\ndef hash_w_salt(string):\n salt = get_salt()\n hash = hash_pw(salt, string)\n return bytes2hexstr(hash), bytes2hexstr(salt)\n\n\ndef check_hash_salt_pw(hashstr, saltstr, string):\n chash = hash_pw(hexstr2bytes(saltstr), string)\n return chash == hexstr2bytes(hashstr)\n\n\ndef timethis(stmt):\n import re, timeit\n print('timing', stmt)\n broken = re.findall(f'\\\\$([a-zA-Z][0-9a-zA-Z_\\\\-]*)', stmt)\n stmt = stmt.replace('$', '')\n setup = f\"from __main__ import {','.join(broken)}\"\n exec(setup)\n exec(stmt)\n timeit.Timer(stmt, setup=setup).autorange(lambda a, b: print(\n f'{a} in {b:.4f}, avg: {b / a * 1000000:.4f}us'))\n\n\n<code token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef cap(x, mi, ma):\n return min(max(x, mi), ma)\n\n\n<assignment token>\n<function token>\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n<function token>\n<function token>\n\n\ndef removefile(fn):\n try:\n os.remove(fn)\n except Exception as e:\n print_err(e)\n print_err('failed to remove', fn)\n else:\n return\n\n\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\n<function token>\n\n\ndef init_directory(d):\n try:\n os.mkdir(d)\n except FileExistsError as e:\n print_err('directory {} already exists.'.format(d), e)\n else:\n print_info('directory {} created.'.format(d))\n\n\ndef key(d, k):\n if k in d:\n return d[k]\n else:\n return None\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\ndef floatify(s):\n try:\n return float(s)\n except:\n if s:\n pass\n return 0.0\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\n<function token>\n<assignment token>\n<import token>\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\ndef calculate_checksum_base64_replaced(bin):\n return calculate_checksum_base64(bin).replace('+', '-').replace('/', '_')\n\n\ndef calculate_etag(bin):\n return calculate_checksum_base64_replaced(bin)\n\n\ndef bytes2hexstr(b):\n return ba.b2a_hex(b).decode('ascii')\n\n\n<function token>\n\n\ndef get_salt():\n return os.urandom(32)\n\n\n<function token>\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\ndef hash_w_salt(string):\n salt = get_salt()\n hash = hash_pw(salt, string)\n return bytes2hexstr(hash), bytes2hexstr(salt)\n\n\ndef check_hash_salt_pw(hashstr, saltstr, string):\n chash = hash_pw(hexstr2bytes(saltstr), string)\n return chash == hexstr2bytes(hashstr)\n\n\ndef timethis(stmt):\n import re, timeit\n print('timing', stmt)\n broken = re.findall(f'\\\\$([a-zA-Z][0-9a-zA-Z_\\\\-]*)', stmt)\n stmt = stmt.replace('$', '')\n setup = f\"from __main__ import {','.join(broken)}\"\n exec(setup)\n exec(stmt)\n timeit.Timer(stmt, setup=setup).autorange(lambda a, b: print(\n f'{a} in {b:.4f}, avg: {b / a * 1000000:.4f}us'))\n\n\n<code token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef cap(x, mi, ma):\n return min(max(x, mi), ma)\n\n\n<assignment token>\n<function token>\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n<function token>\n<function token>\n\n\ndef removefile(fn):\n try:\n os.remove(fn)\n except Exception as e:\n print_err(e)\n print_err('failed to remove', fn)\n else:\n return\n\n\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\n<function token>\n\n\ndef init_directory(d):\n try:\n os.mkdir(d)\n except FileExistsError as e:\n print_err('directory {} already exists.'.format(d), e)\n else:\n print_info('directory {} created.'.format(d))\n\n\ndef key(d, k):\n if k in d:\n return d[k]\n else:\n return None\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\ndef floatify(s):\n try:\n return float(s)\n except:\n if s:\n pass\n return 0.0\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\n<function token>\n<assignment token>\n<import token>\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\n<function token>\n\n\ndef calculate_etag(bin):\n return calculate_checksum_base64_replaced(bin)\n\n\ndef bytes2hexstr(b):\n return ba.b2a_hex(b).decode('ascii')\n\n\n<function token>\n\n\ndef get_salt():\n return os.urandom(32)\n\n\n<function token>\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\ndef hash_w_salt(string):\n salt = get_salt()\n hash = hash_pw(salt, string)\n return bytes2hexstr(hash), bytes2hexstr(salt)\n\n\ndef check_hash_salt_pw(hashstr, saltstr, string):\n chash = hash_pw(hexstr2bytes(saltstr), string)\n return chash == hexstr2bytes(hashstr)\n\n\ndef timethis(stmt):\n import re, timeit\n print('timing', stmt)\n broken = re.findall(f'\\\\$([a-zA-Z][0-9a-zA-Z_\\\\-]*)', stmt)\n stmt = stmt.replace('$', '')\n setup = f\"from __main__ import {','.join(broken)}\"\n exec(setup)\n exec(stmt)\n timeit.Timer(stmt, setup=setup).autorange(lambda a, b: print(\n f'{a} in {b:.4f}, avg: {b / a * 1000000:.4f}us'))\n\n\n<code token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef cap(x, mi, ma):\n return min(max(x, mi), ma)\n\n\n<assignment token>\n<function token>\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n<function token>\n<function token>\n\n\ndef removefile(fn):\n try:\n os.remove(fn)\n except Exception as e:\n print_err(e)\n print_err('failed to remove', fn)\n else:\n return\n\n\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\n<function token>\n\n\ndef init_directory(d):\n try:\n os.mkdir(d)\n except FileExistsError as e:\n print_err('directory {} already exists.'.format(d), e)\n else:\n print_info('directory {} created.'.format(d))\n\n\ndef key(d, k):\n if k in d:\n return d[k]\n else:\n return None\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\ndef floatify(s):\n try:\n return float(s)\n except:\n if s:\n pass\n return 0.0\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\n<function token>\n<assignment token>\n<import token>\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\n<function token>\n\n\ndef calculate_etag(bin):\n return calculate_checksum_base64_replaced(bin)\n\n\n<function token>\n<function token>\n\n\ndef get_salt():\n return os.urandom(32)\n\n\n<function token>\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\ndef hash_w_salt(string):\n salt = get_salt()\n hash = hash_pw(salt, string)\n return bytes2hexstr(hash), bytes2hexstr(salt)\n\n\ndef check_hash_salt_pw(hashstr, saltstr, string):\n chash = hash_pw(hexstr2bytes(saltstr), string)\n return chash == hexstr2bytes(hashstr)\n\n\ndef timethis(stmt):\n import re, timeit\n print('timing', stmt)\n broken = re.findall(f'\\\\$([a-zA-Z][0-9a-zA-Z_\\\\-]*)', stmt)\n stmt = stmt.replace('$', '')\n setup = f\"from __main__ import {','.join(broken)}\"\n exec(setup)\n exec(stmt)\n timeit.Timer(stmt, setup=setup).autorange(lambda a, b: print(\n f'{a} in {b:.4f}, avg: {b / a * 1000000:.4f}us'))\n\n\n<code token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef cap(x, mi, ma):\n return min(max(x, mi), ma)\n\n\n<assignment token>\n<function token>\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n<function token>\n<function token>\n\n\ndef removefile(fn):\n try:\n os.remove(fn)\n except Exception as e:\n print_err(e)\n print_err('failed to remove', fn)\n else:\n return\n\n\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\n<function token>\n<function token>\n\n\ndef key(d, k):\n if k in d:\n return d[k]\n else:\n return None\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\ndef floatify(s):\n try:\n return float(s)\n except:\n if s:\n pass\n return 0.0\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\n<function token>\n<assignment token>\n<import token>\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\n<function token>\n\n\ndef calculate_etag(bin):\n return calculate_checksum_base64_replaced(bin)\n\n\n<function token>\n<function token>\n\n\ndef get_salt():\n return os.urandom(32)\n\n\n<function token>\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\ndef hash_w_salt(string):\n salt = get_salt()\n hash = hash_pw(salt, string)\n return bytes2hexstr(hash), bytes2hexstr(salt)\n\n\ndef check_hash_salt_pw(hashstr, saltstr, string):\n chash = hash_pw(hexstr2bytes(saltstr), string)\n return chash == hexstr2bytes(hashstr)\n\n\ndef timethis(stmt):\n import re, timeit\n print('timing', stmt)\n broken = re.findall(f'\\\\$([a-zA-Z][0-9a-zA-Z_\\\\-]*)', stmt)\n stmt = stmt.replace('$', '')\n setup = f\"from __main__ import {','.join(broken)}\"\n exec(setup)\n exec(stmt)\n timeit.Timer(stmt, setup=setup).autorange(lambda a, b: print(\n f'{a} in {b:.4f}, avg: {b / a * 1000000:.4f}us'))\n\n\n<code token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef cap(x, mi, ma):\n return min(max(x, mi), ma)\n\n\n<assignment token>\n<function token>\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n<function token>\n<function token>\n<function token>\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\n<function token>\n<function token>\n\n\ndef key(d, k):\n if k in d:\n return d[k]\n else:\n return None\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\ndef floatify(s):\n try:\n return float(s)\n except:\n if s:\n pass\n return 0.0\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\n<function token>\n<assignment token>\n<import token>\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\n<function token>\n\n\ndef calculate_etag(bin):\n return calculate_checksum_base64_replaced(bin)\n\n\n<function token>\n<function token>\n\n\ndef get_salt():\n return os.urandom(32)\n\n\n<function token>\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\ndef hash_w_salt(string):\n salt = get_salt()\n hash = hash_pw(salt, string)\n return bytes2hexstr(hash), bytes2hexstr(salt)\n\n\ndef check_hash_salt_pw(hashstr, saltstr, string):\n chash = hash_pw(hexstr2bytes(saltstr), string)\n return chash == hexstr2bytes(hashstr)\n\n\ndef timethis(stmt):\n import re, timeit\n print('timing', stmt)\n broken = re.findall(f'\\\\$([a-zA-Z][0-9a-zA-Z_\\\\-]*)', stmt)\n stmt = stmt.replace('$', '')\n setup = f\"from __main__ import {','.join(broken)}\"\n exec(setup)\n exec(stmt)\n timeit.Timer(stmt, setup=setup).autorange(lambda a, b: print(\n f'{a} in {b:.4f}, avg: {b / a * 1000000:.4f}us'))\n\n\n<code token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef cap(x, mi, ma):\n return min(max(x, mi), ma)\n\n\n<assignment token>\n<function token>\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n<function token>\n<function token>\n<function token>\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\n<function token>\n<function token>\n\n\ndef key(d, k):\n if k in d:\n return d[k]\n else:\n return None\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\n<function token>\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\n<function token>\n<assignment token>\n<import token>\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\n<function token>\n\n\ndef calculate_etag(bin):\n return calculate_checksum_base64_replaced(bin)\n\n\n<function token>\n<function token>\n\n\ndef get_salt():\n return os.urandom(32)\n\n\n<function token>\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\ndef hash_w_salt(string):\n salt = get_salt()\n hash = hash_pw(salt, string)\n return bytes2hexstr(hash), bytes2hexstr(salt)\n\n\ndef check_hash_salt_pw(hashstr, saltstr, string):\n chash = hash_pw(hexstr2bytes(saltstr), string)\n return chash == hexstr2bytes(hashstr)\n\n\ndef timethis(stmt):\n import re, timeit\n print('timing', stmt)\n broken = re.findall(f'\\\\$([a-zA-Z][0-9a-zA-Z_\\\\-]*)', stmt)\n stmt = stmt.replace('$', '')\n setup = f\"from __main__ import {','.join(broken)}\"\n exec(setup)\n exec(stmt)\n timeit.Timer(stmt, setup=setup).autorange(lambda a, b: print(\n f'{a} in {b:.4f}, avg: {b / a * 1000000:.4f}us'))\n\n\n<code token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef cap(x, mi, ma):\n return min(max(x, mi), ma)\n\n\n<assignment token>\n<function token>\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n<function token>\n<function token>\n<function token>\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\n<function token>\n<function token>\n\n\ndef key(d, k):\n if k in d:\n return d[k]\n else:\n return None\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\n<function token>\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\n<function token>\n<assignment token>\n<import token>\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\n<function token>\n\n\ndef calculate_etag(bin):\n return calculate_checksum_base64_replaced(bin)\n\n\n<function token>\n<function token>\n\n\ndef get_salt():\n return os.urandom(32)\n\n\n<function token>\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\ndef hash_w_salt(string):\n salt = get_salt()\n hash = hash_pw(salt, string)\n return bytes2hexstr(hash), bytes2hexstr(salt)\n\n\ndef check_hash_salt_pw(hashstr, saltstr, string):\n chash = hash_pw(hexstr2bytes(saltstr), string)\n return chash == hexstr2bytes(hashstr)\n\n\n<function token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef cap(x, mi, ma):\n return min(max(x, mi), ma)\n\n\n<assignment token>\n<function token>\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n<function token>\n<function token>\n<function token>\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\n<function token>\n<function token>\n\n\ndef key(d, k):\n if k in d:\n return d[k]\n else:\n return None\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\n<function token>\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\n<function token>\n<assignment token>\n<import token>\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\n<function token>\n\n\ndef calculate_etag(bin):\n return calculate_checksum_base64_replaced(bin)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\ndef hash_w_salt(string):\n salt = get_salt()\n hash = hash_pw(salt, string)\n return bytes2hexstr(hash), bytes2hexstr(salt)\n\n\ndef check_hash_salt_pw(hashstr, saltstr, string):\n chash = hash_pw(hexstr2bytes(saltstr), string)\n return chash == hexstr2bytes(hashstr)\n\n\n<function token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef cap(x, mi, ma):\n return min(max(x, mi), ma)\n\n\n<assignment token>\n<function token>\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n<function token>\n<function token>\n<function token>\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\n<function token>\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\n<function token>\n<assignment token>\n<import token>\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\n<function token>\n\n\ndef calculate_etag(bin):\n return calculate_checksum_base64_replaced(bin)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\ndef hash_w_salt(string):\n salt = get_salt()\n hash = hash_pw(salt, string)\n return bytes2hexstr(hash), bytes2hexstr(salt)\n\n\ndef check_hash_salt_pw(hashstr, saltstr, string):\n chash = hash_pw(hexstr2bytes(saltstr), string)\n return chash == hexstr2bytes(hashstr)\n\n\n<function token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef cap(x, mi, ma):\n return min(max(x, mi), ma)\n\n\n<assignment token>\n<function token>\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n<function token>\n<function token>\n<function token>\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\n<function token>\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\n<function token>\n<assignment token>\n<import token>\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\n<function token>\n\n\ndef calculate_etag(bin):\n return calculate_checksum_base64_replaced(bin)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\n<function token>\n\n\ndef check_hash_salt_pw(hashstr, saltstr, string):\n chash = hash_pw(hexstr2bytes(saltstr), string)\n return chash == hexstr2bytes(hashstr)\n\n\n<function token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef cap(x, mi, ma):\n return min(max(x, mi), ma)\n\n\n<assignment token>\n<function token>\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n<function token>\n<function token>\n<function token>\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\n<function token>\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\n<function token>\n<assignment token>\n<import token>\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\n<function token>\n\n\ndef check_hash_salt_pw(hashstr, saltstr, string):\n chash = hash_pw(hexstr2bytes(saltstr), string)\n return chash == hexstr2bytes(hashstr)\n\n\n<function token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef cap(x, mi, ma):\n return min(max(x, mi), ma)\n\n\n<assignment token>\n<function token>\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n<function token>\n<function token>\n<function token>\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\n<function token>\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\n<function token>\n<assignment token>\n<import token>\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef cap(x, mi, ma):\n return min(max(x, mi), ma)\n\n\n<assignment token>\n<function token>\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n<function token>\n<function token>\n<function token>\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\n<function token>\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\n<function token>\n<assignment token>\n<import token>\n\n\ndef calculate_checksum(bin):\n return zlib.adler32(bin).to_bytes(4, 'big')\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n<function token>\n<function token>\n<function token>\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\n<function token>\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\n<function token>\n<assignment token>\n<import token>\n<function token>\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n\n\ndef dfshk(stamp):\n return dfs(stamp).replace(tzinfo=working_timezone)\n\n\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n\n\ndef obj2json(obj):\n return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)\n\n\n<function token>\n<function token>\n<function token>\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\n<function token>\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\n<function token>\n<assignment token>\n<import token>\n<function token>\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef intify(s, name=''):\n try:\n return int(s)\n except:\n if s:\n pass\n return 0\n\n\n<function token>\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\n<function token>\n<assignment token>\n<import token>\n<function token>\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\n<function token>\n<assignment token>\n<import token>\n<function token>\n\n\ndef calculate_checksum_base64(bin):\n csum = calculate_checksum(bin)\n chksum_encoded = base64.b64encode(csum).decode('ascii')\n return chksum_encoded\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n",
"<import token>\n\n\ndef iif(a, b, c):\n return b if a else c\n\n\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\n<function token>\n<assignment token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n",
"<import token>\n<function token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\n<function token>\n<assignment token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef hash_pw(salt, string):\n return hashlib.pbkdf2_hmac('sha256', string.encode('ascii'), salt, 100000)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n",
"<import token>\n<function token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef get_environ(k):\n k = k.upper()\n if k in os.environ:\n return os.environ[k]\n else:\n return None\n\n\n<function token>\n<assignment token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n",
"<import token>\n<function token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n\n\ndef dispatch(f):\n return tpe.submit(f)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n",
"<import token>\n<function token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n"
] | false |
99,605 |
d3542fbc1b1ea346260005d8e76a867f6363ae7d
|
# Generated by Django 2.2.3 on 2019-07-10 15:29
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='InicioSesion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('users', models.CharField(max_length=10)),
('password', models.CharField(max_length=10)),
('ruc', models.CharField(max_length=13)),
('register_date', models.DateTimeField(verbose_name='Registro cliente')),
],
),
]
|
[
"# Generated by Django 2.2.3 on 2019-07-10 15:29\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='InicioSesion',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('users', models.CharField(max_length=10)),\n ('password', models.CharField(max_length=10)),\n ('ruc', models.CharField(max_length=13)),\n ('register_date', models.DateTimeField(verbose_name='Registro cliente')),\n ],\n ),\n ]\n",
"from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='InicioSesion', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('users', models.CharField(max_length=\n 10)), ('password', models.CharField(max_length=10)), ('ruc', models\n .CharField(max_length=13)), ('register_date', models.DateTimeField(\n verbose_name='Registro cliente'))])]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='InicioSesion', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('users', models.CharField(max_length=\n 10)), ('password', models.CharField(max_length=10)), ('ruc', models\n .CharField(max_length=13)), ('register_date', models.DateTimeField(\n verbose_name='Registro cliente'))])]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
99,606 |
72b6b9f69cf229cb0884bafe51e692369a78ca6b
|
import re
def remove_special_characters(txt):
final = re.sub(r'[^\s\w_-]+', '', txt)
return final
|
[
"\nimport re \ndef remove_special_characters(txt):\n final = re.sub(r'[^\\s\\w_-]+', '', txt)\n return final\n\n",
"import re\n\n\ndef remove_special_characters(txt):\n final = re.sub('[^\\\\s\\\\w_-]+', '', txt)\n return final\n",
"<import token>\n\n\ndef remove_special_characters(txt):\n final = re.sub('[^\\\\s\\\\w_-]+', '', txt)\n return final\n",
"<import token>\n<function token>\n"
] | false |
99,607 |
113d11ca5b83fb74ac980f61cb772ae1f132d429
|
"""
#1
celsius = eval(raw_input('>>'))
fahrenheit = (9.0 / 5.0) * celsius + 32
print(fahrenheit)
#2
radius,length = eval(raw_input('>>'))
area = radius * radius * 3.1415926
volume = area * length
print(area,volume)
#3
feet = eval(raw_input('>>'))
meters = feet * 0.305
print(meters)
#4
M,initialTemperature,finalTemperature = eval(raw_input('>>'))
Q = M * (finalTemperature - initialTemperature) * 4148
print(Q)
#5
balance,interest1rate = eval(raw_input('>>'))
interest = balance * (interest1rate / 1200)
print(interest)
#6
v0,v1,t = eval(raw_input('>>'))
a = (v1 - v0) / t
print(a)
#7
a=float(raw_input('>>'))
a1=a*(1+0.00417)
a2=(a+a1)*(1+0.00417)
a3=(a+a2)*(1+0.00417)
a4=(a+a3)*(1+0.00417)
a5=(a+a4)*(1+0.00417)
a6=(a+a5)*(1+0.00417)
print(a6)"""
#8
number=eval(raw_input('>>'))
a=number%10
b=number//100
c=(number%100)//10
digits=a+b+c
print(digits)
|
[
"\n\"\"\"\n#1\ncelsius = eval(raw_input('>>'))\nfahrenheit = (9.0 / 5.0) * celsius + 32\nprint(fahrenheit)\n\n#2\nradius,length = eval(raw_input('>>'))\narea = radius * radius * 3.1415926\nvolume = area * length\nprint(area,volume)\n\n#3\nfeet = eval(raw_input('>>'))\nmeters = feet * 0.305\nprint(meters)\n\n#4\nM,initialTemperature,finalTemperature = eval(raw_input('>>'))\nQ = M * (finalTemperature - initialTemperature) * 4148\nprint(Q)\n\n#5\nbalance,interest1rate = eval(raw_input('>>'))\ninterest = balance * (interest1rate / 1200)\nprint(interest)\n\n#6\nv0,v1,t = eval(raw_input('>>'))\na = (v1 - v0) / t\nprint(a)\n\n\n#7\na=float(raw_input('>>'))\na1=a*(1+0.00417)\na2=(a+a1)*(1+0.00417)\na3=(a+a2)*(1+0.00417)\na4=(a+a3)*(1+0.00417)\na5=(a+a4)*(1+0.00417)\na6=(a+a5)*(1+0.00417)\nprint(a6)\"\"\"\n\n#8\nnumber=eval(raw_input('>>'))\na=number%10\nb=number//100\nc=(number%100)//10\ndigits=a+b+c\nprint(digits)\n",
"<docstring token>\nnumber = eval(raw_input('>>'))\na = number % 10\nb = number // 100\nc = number % 100 // 10\ndigits = a + b + c\nprint(digits)\n",
"<docstring token>\n<assignment token>\nprint(digits)\n",
"<docstring token>\n<assignment token>\n<code token>\n"
] | false |
99,608 |
54a7bfdb11faf2c38dcf717f39abd8bc7495862f
|
import discord, random
from discord.ext import commands
import asyncio
import os
client=commands.Bot(command_prefix='"')
client.remove_command('help')
@client.event
async def on_ready():
await client.change_presence(activity=discord.Game(name='"help'))
@client.command()
@commands.has_permissions(ban_members=True)
async def ban(ctx, member: discord.Member=None):
if not member:
await ctx.send('Please mention a member')
return
await member.ban()
await ctx.send(f'{member.display_name} was banned from the server')
@client.command()
@commands.has_permissions(kick_members=True)
async def kick(ctx, member: discord.Member=None):
if not member:
await ctx.send('Please mention a member')
return
await member.kick()
await ctx.send(f'{member.display_name} was kicked from the server :wave:')
@client.command(aliases=['bc'])
async def broadcast(ctx, *, msg):
await ctx.send(msg)
await ctx.message.delete()
@client.command(aliases=['rbc'])
async def richbroadcast(ctx, *, msg):
embed=discord.Embed()
embed.title='Broadcast Message'
embed.description=msg
await ctx.send(embed=embed)
await ctx.message.delete()
@client.command()
async def help(ctx):
embed=discord.Embed()
embed.title='Help'
embed.description='Welcome to RedZone! Please read #rules.. Commands are: help, richbroadcast, broadcast, kick, ban, userinfo, settings. '
await ctx.send(embed=embed)
@client.command()
async def dates(ctx):
await ctx.send('Sent Dates in DMs!')
await ctx.author.send('No dates right now!')
@client.command()
async def ping(ctx):
await ctx.send('Pong! {0}'.format(round(client.latency, 1)))
@client.command()
async def userinfo(ctx, member: discord.Member):
roles = [role for role in member.roles]
embed = discord.Embed(colour=member.color, timestamp=ctx.message.created_at)
embed.set_author(name=f"User Info - {member}")
embed.set_thumbnail(url=member.avatar_url)
embed.set_footer(text=f"Requested by {ctx.author}", icon_url=ctx.author.avatar_url)
embed.add_field(name="ID:", value=member.id)
embed.add_field(name="Guild name:", value=member.display_name)
embed.add_field(name="Created at:", value=member.created_at.strftime("%a, %#d %B %Y, %I:%M %p UTC"))
embed.add_field(name="Joined at:", value=member.joined_at.strftime("%a, %#d %B %Y, %I:%M %p UTC"))
embed.add_field(name=f"Roles ({len(roles)})", value=" ".join([role.mention for role in roles]))
embed.add_field(name="Top role:", value=member.top_role.mention)
embed.add_field(name="Bot?", value=member.bot)
await ctx.send(embed=embed)
@client.command()
async def settings(ctx):
await ctx.send("Comming soon!")
client.run(os.getenv('TOKEN'))
|
[
"import discord, random\nfrom discord.ext import commands\nimport asyncio\nimport os\n\nclient=commands.Bot(command_prefix='\"')\nclient.remove_command('help')\n\[email protected]\nasync def on_ready():\n await client.change_presence(activity=discord.Game(name='\"help'))\n\n\[email protected]()\[email protected]_permissions(ban_members=True)\nasync def ban(ctx, member: discord.Member=None):\n if not member:\n await ctx.send('Please mention a member')\n return\n await member.ban()\n await ctx.send(f'{member.display_name} was banned from the server')\[email protected]()\[email protected]_permissions(kick_members=True)\nasync def kick(ctx, member: discord.Member=None):\n if not member:\n await ctx.send('Please mention a member')\n return\n await member.kick()\n await ctx.send(f'{member.display_name} was kicked from the server :wave:')\[email protected](aliases=['bc'])\nasync def broadcast(ctx, *, msg):\n await ctx.send(msg)\n await ctx.message.delete()\[email protected](aliases=['rbc'])\nasync def richbroadcast(ctx, *, msg):\n embed=discord.Embed()\n embed.title='Broadcast Message'\n embed.description=msg\n await ctx.send(embed=embed)\n await ctx.message.delete()\[email protected]()\nasync def help(ctx):\n embed=discord.Embed()\n embed.title='Help'\n embed.description='Welcome to RedZone! Please read #rules.. Commands are: help, richbroadcast, broadcast, kick, ban, userinfo, settings. '\n await ctx.send(embed=embed)\[email protected]()\nasync def dates(ctx):\n\n await ctx.send('Sent Dates in DMs!')\n await ctx.author.send('No dates right now!')\n\[email protected]()\nasync def ping(ctx):\n await ctx.send('Pong! {0}'.format(round(client.latency, 1)))\n\[email protected]()\nasync def userinfo(ctx, member: discord.Member):\n \n roles = [role for role in member.roles]\n \n embed = discord.Embed(colour=member.color, timestamp=ctx.message.created_at)\n \n embed.set_author(name=f\"User Info - {member}\")\n embed.set_thumbnail(url=member.avatar_url)\n embed.set_footer(text=f\"Requested by {ctx.author}\", icon_url=ctx.author.avatar_url)\n \n embed.add_field(name=\"ID:\", value=member.id)\n embed.add_field(name=\"Guild name:\", value=member.display_name)\n \n embed.add_field(name=\"Created at:\", value=member.created_at.strftime(\"%a, %#d %B %Y, %I:%M %p UTC\"))\n embed.add_field(name=\"Joined at:\", value=member.joined_at.strftime(\"%a, %#d %B %Y, %I:%M %p UTC\"))\n \n embed.add_field(name=f\"Roles ({len(roles)})\", value=\" \".join([role.mention for role in roles]))\n embed.add_field(name=\"Top role:\", value=member.top_role.mention)\n \n embed.add_field(name=\"Bot?\", value=member.bot)\n \n await ctx.send(embed=embed)\n\[email protected]()\nasync def settings(ctx):\n await ctx.send(\"Comming soon!\")\n\n\n\n\n\nclient.run(os.getenv('TOKEN'))\n",
"import discord, random\nfrom discord.ext import commands\nimport asyncio\nimport os\nclient = commands.Bot(command_prefix='\"')\nclient.remove_command('help')\n\n\[email protected]\nasync def on_ready():\n await client.change_presence(activity=discord.Game(name='\"help'))\n\n\[email protected]()\[email protected]_permissions(ban_members=True)\nasync def ban(ctx, member: discord.Member=None):\n if not member:\n await ctx.send('Please mention a member')\n return\n await member.ban()\n await ctx.send(f'{member.display_name} was banned from the server')\n\n\[email protected]()\[email protected]_permissions(kick_members=True)\nasync def kick(ctx, member: discord.Member=None):\n if not member:\n await ctx.send('Please mention a member')\n return\n await member.kick()\n await ctx.send(\n f'{member.display_name} was kicked from the server :wave:')\n\n\[email protected](aliases=['bc'])\nasync def broadcast(ctx, *, msg):\n await ctx.send(msg)\n await ctx.message.delete()\n\n\[email protected](aliases=['rbc'])\nasync def richbroadcast(ctx, *, msg):\n embed = discord.Embed()\n embed.title = 'Broadcast Message'\n embed.description = msg\n await ctx.send(embed=embed)\n await ctx.message.delete()\n\n\[email protected]()\nasync def help(ctx):\n embed = discord.Embed()\n embed.title = 'Help'\n embed.description = (\n 'Welcome to RedZone! Please read #rules.. Commands are: help, richbroadcast, broadcast, kick, ban, userinfo, settings. '\n )\n await ctx.send(embed=embed)\n\n\[email protected]()\nasync def dates(ctx):\n await ctx.send('Sent Dates in DMs!')\n await ctx.author.send('No dates right now!')\n\n\[email protected]()\nasync def ping(ctx):\n await ctx.send('Pong! {0}'.format(round(client.latency, 1)))\n\n\[email protected]()\nasync def userinfo(ctx, member: discord.Member):\n roles = [role for role in member.roles]\n embed = discord.Embed(colour=member.color, timestamp=ctx.message.created_at\n )\n embed.set_author(name=f'User Info - {member}')\n embed.set_thumbnail(url=member.avatar_url)\n embed.set_footer(text=f'Requested by {ctx.author}', icon_url=ctx.author\n .avatar_url)\n embed.add_field(name='ID:', value=member.id)\n embed.add_field(name='Guild name:', value=member.display_name)\n embed.add_field(name='Created at:', value=member.created_at.strftime(\n '%a, %#d %B %Y, %I:%M %p UTC'))\n embed.add_field(name='Joined at:', value=member.joined_at.strftime(\n '%a, %#d %B %Y, %I:%M %p UTC'))\n embed.add_field(name=f'Roles ({len(roles)})', value=' '.join([role.\n mention for role in roles]))\n embed.add_field(name='Top role:', value=member.top_role.mention)\n embed.add_field(name='Bot?', value=member.bot)\n await ctx.send(embed=embed)\n\n\[email protected]()\nasync def settings(ctx):\n await ctx.send('Comming soon!')\n\n\nclient.run(os.getenv('TOKEN'))\n",
"<import token>\nclient = commands.Bot(command_prefix='\"')\nclient.remove_command('help')\n\n\[email protected]\nasync def on_ready():\n await client.change_presence(activity=discord.Game(name='\"help'))\n\n\[email protected]()\[email protected]_permissions(ban_members=True)\nasync def ban(ctx, member: discord.Member=None):\n if not member:\n await ctx.send('Please mention a member')\n return\n await member.ban()\n await ctx.send(f'{member.display_name} was banned from the server')\n\n\[email protected]()\[email protected]_permissions(kick_members=True)\nasync def kick(ctx, member: discord.Member=None):\n if not member:\n await ctx.send('Please mention a member')\n return\n await member.kick()\n await ctx.send(\n f'{member.display_name} was kicked from the server :wave:')\n\n\[email protected](aliases=['bc'])\nasync def broadcast(ctx, *, msg):\n await ctx.send(msg)\n await ctx.message.delete()\n\n\[email protected](aliases=['rbc'])\nasync def richbroadcast(ctx, *, msg):\n embed = discord.Embed()\n embed.title = 'Broadcast Message'\n embed.description = msg\n await ctx.send(embed=embed)\n await ctx.message.delete()\n\n\[email protected]()\nasync def help(ctx):\n embed = discord.Embed()\n embed.title = 'Help'\n embed.description = (\n 'Welcome to RedZone! Please read #rules.. Commands are: help, richbroadcast, broadcast, kick, ban, userinfo, settings. '\n )\n await ctx.send(embed=embed)\n\n\[email protected]()\nasync def dates(ctx):\n await ctx.send('Sent Dates in DMs!')\n await ctx.author.send('No dates right now!')\n\n\[email protected]()\nasync def ping(ctx):\n await ctx.send('Pong! {0}'.format(round(client.latency, 1)))\n\n\[email protected]()\nasync def userinfo(ctx, member: discord.Member):\n roles = [role for role in member.roles]\n embed = discord.Embed(colour=member.color, timestamp=ctx.message.created_at\n )\n embed.set_author(name=f'User Info - {member}')\n embed.set_thumbnail(url=member.avatar_url)\n embed.set_footer(text=f'Requested by {ctx.author}', icon_url=ctx.author\n .avatar_url)\n embed.add_field(name='ID:', value=member.id)\n embed.add_field(name='Guild name:', value=member.display_name)\n embed.add_field(name='Created at:', value=member.created_at.strftime(\n '%a, %#d %B %Y, %I:%M %p UTC'))\n embed.add_field(name='Joined at:', value=member.joined_at.strftime(\n '%a, %#d %B %Y, %I:%M %p UTC'))\n embed.add_field(name=f'Roles ({len(roles)})', value=' '.join([role.\n mention for role in roles]))\n embed.add_field(name='Top role:', value=member.top_role.mention)\n embed.add_field(name='Bot?', value=member.bot)\n await ctx.send(embed=embed)\n\n\[email protected]()\nasync def settings(ctx):\n await ctx.send('Comming soon!')\n\n\nclient.run(os.getenv('TOKEN'))\n",
"<import token>\n<assignment token>\nclient.remove_command('help')\n\n\[email protected]\nasync def on_ready():\n await client.change_presence(activity=discord.Game(name='\"help'))\n\n\[email protected]()\[email protected]_permissions(ban_members=True)\nasync def ban(ctx, member: discord.Member=None):\n if not member:\n await ctx.send('Please mention a member')\n return\n await member.ban()\n await ctx.send(f'{member.display_name} was banned from the server')\n\n\[email protected]()\[email protected]_permissions(kick_members=True)\nasync def kick(ctx, member: discord.Member=None):\n if not member:\n await ctx.send('Please mention a member')\n return\n await member.kick()\n await ctx.send(\n f'{member.display_name} was kicked from the server :wave:')\n\n\[email protected](aliases=['bc'])\nasync def broadcast(ctx, *, msg):\n await ctx.send(msg)\n await ctx.message.delete()\n\n\[email protected](aliases=['rbc'])\nasync def richbroadcast(ctx, *, msg):\n embed = discord.Embed()\n embed.title = 'Broadcast Message'\n embed.description = msg\n await ctx.send(embed=embed)\n await ctx.message.delete()\n\n\[email protected]()\nasync def help(ctx):\n embed = discord.Embed()\n embed.title = 'Help'\n embed.description = (\n 'Welcome to RedZone! Please read #rules.. Commands are: help, richbroadcast, broadcast, kick, ban, userinfo, settings. '\n )\n await ctx.send(embed=embed)\n\n\[email protected]()\nasync def dates(ctx):\n await ctx.send('Sent Dates in DMs!')\n await ctx.author.send('No dates right now!')\n\n\[email protected]()\nasync def ping(ctx):\n await ctx.send('Pong! {0}'.format(round(client.latency, 1)))\n\n\[email protected]()\nasync def userinfo(ctx, member: discord.Member):\n roles = [role for role in member.roles]\n embed = discord.Embed(colour=member.color, timestamp=ctx.message.created_at\n )\n embed.set_author(name=f'User Info - {member}')\n embed.set_thumbnail(url=member.avatar_url)\n embed.set_footer(text=f'Requested by {ctx.author}', icon_url=ctx.author\n .avatar_url)\n embed.add_field(name='ID:', value=member.id)\n embed.add_field(name='Guild name:', value=member.display_name)\n embed.add_field(name='Created at:', value=member.created_at.strftime(\n '%a, %#d %B %Y, %I:%M %p UTC'))\n embed.add_field(name='Joined at:', value=member.joined_at.strftime(\n '%a, %#d %B %Y, %I:%M %p UTC'))\n embed.add_field(name=f'Roles ({len(roles)})', value=' '.join([role.\n mention for role in roles]))\n embed.add_field(name='Top role:', value=member.top_role.mention)\n embed.add_field(name='Bot?', value=member.bot)\n await ctx.send(embed=embed)\n\n\[email protected]()\nasync def settings(ctx):\n await ctx.send('Comming soon!')\n\n\nclient.run(os.getenv('TOKEN'))\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
99,609 |
bf64145ac557533e9390ff0b81b503f6a1a94696
|
import os
import constants as c
def setup_directories():
'''
Generate the app's structure
'''
os.makedirs(c.LAZY_BADGER_HOME, exist_ok=True)
os.makedirs(c.LAZY_BADGER_BIN, exist_ok=True)
def _get_app_home(app_name):
'''
Get the home directory from an app
args:
(str) app_name: Name of the app
'''
return os.path.join(c.LAZY_BADGER_HOME, app_name)
def _get_app_scripts(app_name):
'''
Get the scripts directory for an app
args:
(str) app_name: Name of the app
'''
return os.path.join(_get_app_home(app_name), c.APP_SCRIPTS)
def create_app_home_and_scripts(app_name):
'''
Generate a new app scripts folder
args:
(str) app_name: Name of the new app
'''
os.makedirs(_get_app_scripts(app_name), exist_ok=True)
def create_app_proxy_executable(app_name, app_path):
'''
Generate a new proxy executable for a new app
args:
(str) app_name: Name of the app
(str) app_path: Workdir of the app
'''
from stat import S_IXGRP
exec_path = os.path.join(c.LAZY_BADGER_BIN, app_name)
with open(exec_path, 'w') as app_proxy:
app_proxy.write(
c.APP_PROXY_SCRIPT.format(
workdir=app_path,
app_scripts_dir=_get_app_scripts(app_name)
)
)
os.chmod(exec_path, 0o755)
def uninstall_app(app_name):
'''
Uninstall an app
args:
(str) app_name: Name of the app
'''
from shutil import rmtree
rmtree(_get_app_home(app_name), ignore_errors=True)
os.remove(os.path.join(c.LAZY_BADGER_BIN, app_name))
|
[
"import os\nimport constants as c\n\ndef setup_directories():\n '''\n Generate the app's structure\n '''\n os.makedirs(c.LAZY_BADGER_HOME, exist_ok=True)\n os.makedirs(c.LAZY_BADGER_BIN, exist_ok=True)\n\n\ndef _get_app_home(app_name):\n '''\n Get the home directory from an app\n args:\n (str) app_name: Name of the app\n '''\n return os.path.join(c.LAZY_BADGER_HOME, app_name)\n\n\ndef _get_app_scripts(app_name):\n '''\n Get the scripts directory for an app\n args:\n (str) app_name: Name of the app\n '''\n return os.path.join(_get_app_home(app_name), c.APP_SCRIPTS)\n\n\ndef create_app_home_and_scripts(app_name):\n '''\n Generate a new app scripts folder\n args:\n (str) app_name: Name of the new app\n '''\n os.makedirs(_get_app_scripts(app_name), exist_ok=True)\n\n\ndef create_app_proxy_executable(app_name, app_path):\n '''\n Generate a new proxy executable for a new app\n args:\n (str) app_name: Name of the app\n (str) app_path: Workdir of the app\n '''\n from stat import S_IXGRP\n exec_path = os.path.join(c.LAZY_BADGER_BIN, app_name)\n with open(exec_path, 'w') as app_proxy:\n app_proxy.write(\n c.APP_PROXY_SCRIPT.format(\n workdir=app_path,\n app_scripts_dir=_get_app_scripts(app_name)\n )\n )\n os.chmod(exec_path, 0o755)\n\n\ndef uninstall_app(app_name):\n '''\n Uninstall an app\n args:\n (str) app_name: Name of the app\n '''\n from shutil import rmtree\n rmtree(_get_app_home(app_name), ignore_errors=True)\n os.remove(os.path.join(c.LAZY_BADGER_BIN, app_name))\n",
"import os\nimport constants as c\n\n\ndef setup_directories():\n \"\"\"\n Generate the app's structure\n \"\"\"\n os.makedirs(c.LAZY_BADGER_HOME, exist_ok=True)\n os.makedirs(c.LAZY_BADGER_BIN, exist_ok=True)\n\n\ndef _get_app_home(app_name):\n \"\"\"\n Get the home directory from an app\n args:\n (str) app_name: Name of the app\n \"\"\"\n return os.path.join(c.LAZY_BADGER_HOME, app_name)\n\n\ndef _get_app_scripts(app_name):\n \"\"\"\n Get the scripts directory for an app\n args:\n (str) app_name: Name of the app\n \"\"\"\n return os.path.join(_get_app_home(app_name), c.APP_SCRIPTS)\n\n\ndef create_app_home_and_scripts(app_name):\n \"\"\"\n Generate a new app scripts folder\n args:\n (str) app_name: Name of the new app\n \"\"\"\n os.makedirs(_get_app_scripts(app_name), exist_ok=True)\n\n\ndef create_app_proxy_executable(app_name, app_path):\n \"\"\"\n Generate a new proxy executable for a new app\n args:\n (str) app_name: Name of the app\n (str) app_path: Workdir of the app\n \"\"\"\n from stat import S_IXGRP\n exec_path = os.path.join(c.LAZY_BADGER_BIN, app_name)\n with open(exec_path, 'w') as app_proxy:\n app_proxy.write(c.APP_PROXY_SCRIPT.format(workdir=app_path,\n app_scripts_dir=_get_app_scripts(app_name)))\n os.chmod(exec_path, 493)\n\n\ndef uninstall_app(app_name):\n \"\"\"\n Uninstall an app\n args:\n (str) app_name: Name of the app\n \"\"\"\n from shutil import rmtree\n rmtree(_get_app_home(app_name), ignore_errors=True)\n os.remove(os.path.join(c.LAZY_BADGER_BIN, app_name))\n",
"<import token>\n\n\ndef setup_directories():\n \"\"\"\n Generate the app's structure\n \"\"\"\n os.makedirs(c.LAZY_BADGER_HOME, exist_ok=True)\n os.makedirs(c.LAZY_BADGER_BIN, exist_ok=True)\n\n\ndef _get_app_home(app_name):\n \"\"\"\n Get the home directory from an app\n args:\n (str) app_name: Name of the app\n \"\"\"\n return os.path.join(c.LAZY_BADGER_HOME, app_name)\n\n\ndef _get_app_scripts(app_name):\n \"\"\"\n Get the scripts directory for an app\n args:\n (str) app_name: Name of the app\n \"\"\"\n return os.path.join(_get_app_home(app_name), c.APP_SCRIPTS)\n\n\ndef create_app_home_and_scripts(app_name):\n \"\"\"\n Generate a new app scripts folder\n args:\n (str) app_name: Name of the new app\n \"\"\"\n os.makedirs(_get_app_scripts(app_name), exist_ok=True)\n\n\ndef create_app_proxy_executable(app_name, app_path):\n \"\"\"\n Generate a new proxy executable for a new app\n args:\n (str) app_name: Name of the app\n (str) app_path: Workdir of the app\n \"\"\"\n from stat import S_IXGRP\n exec_path = os.path.join(c.LAZY_BADGER_BIN, app_name)\n with open(exec_path, 'w') as app_proxy:\n app_proxy.write(c.APP_PROXY_SCRIPT.format(workdir=app_path,\n app_scripts_dir=_get_app_scripts(app_name)))\n os.chmod(exec_path, 493)\n\n\ndef uninstall_app(app_name):\n \"\"\"\n Uninstall an app\n args:\n (str) app_name: Name of the app\n \"\"\"\n from shutil import rmtree\n rmtree(_get_app_home(app_name), ignore_errors=True)\n os.remove(os.path.join(c.LAZY_BADGER_BIN, app_name))\n",
"<import token>\n\n\ndef setup_directories():\n \"\"\"\n Generate the app's structure\n \"\"\"\n os.makedirs(c.LAZY_BADGER_HOME, exist_ok=True)\n os.makedirs(c.LAZY_BADGER_BIN, exist_ok=True)\n\n\ndef _get_app_home(app_name):\n \"\"\"\n Get the home directory from an app\n args:\n (str) app_name: Name of the app\n \"\"\"\n return os.path.join(c.LAZY_BADGER_HOME, app_name)\n\n\ndef _get_app_scripts(app_name):\n \"\"\"\n Get the scripts directory for an app\n args:\n (str) app_name: Name of the app\n \"\"\"\n return os.path.join(_get_app_home(app_name), c.APP_SCRIPTS)\n\n\ndef create_app_home_and_scripts(app_name):\n \"\"\"\n Generate a new app scripts folder\n args:\n (str) app_name: Name of the new app\n \"\"\"\n os.makedirs(_get_app_scripts(app_name), exist_ok=True)\n\n\n<function token>\n\n\ndef uninstall_app(app_name):\n \"\"\"\n Uninstall an app\n args:\n (str) app_name: Name of the app\n \"\"\"\n from shutil import rmtree\n rmtree(_get_app_home(app_name), ignore_errors=True)\n os.remove(os.path.join(c.LAZY_BADGER_BIN, app_name))\n",
"<import token>\n\n\ndef setup_directories():\n \"\"\"\n Generate the app's structure\n \"\"\"\n os.makedirs(c.LAZY_BADGER_HOME, exist_ok=True)\n os.makedirs(c.LAZY_BADGER_BIN, exist_ok=True)\n\n\n<function token>\n\n\ndef _get_app_scripts(app_name):\n \"\"\"\n Get the scripts directory for an app\n args:\n (str) app_name: Name of the app\n \"\"\"\n return os.path.join(_get_app_home(app_name), c.APP_SCRIPTS)\n\n\ndef create_app_home_and_scripts(app_name):\n \"\"\"\n Generate a new app scripts folder\n args:\n (str) app_name: Name of the new app\n \"\"\"\n os.makedirs(_get_app_scripts(app_name), exist_ok=True)\n\n\n<function token>\n\n\ndef uninstall_app(app_name):\n \"\"\"\n Uninstall an app\n args:\n (str) app_name: Name of the app\n \"\"\"\n from shutil import rmtree\n rmtree(_get_app_home(app_name), ignore_errors=True)\n os.remove(os.path.join(c.LAZY_BADGER_BIN, app_name))\n",
"<import token>\n<function token>\n<function token>\n\n\ndef _get_app_scripts(app_name):\n \"\"\"\n Get the scripts directory for an app\n args:\n (str) app_name: Name of the app\n \"\"\"\n return os.path.join(_get_app_home(app_name), c.APP_SCRIPTS)\n\n\ndef create_app_home_and_scripts(app_name):\n \"\"\"\n Generate a new app scripts folder\n args:\n (str) app_name: Name of the new app\n \"\"\"\n os.makedirs(_get_app_scripts(app_name), exist_ok=True)\n\n\n<function token>\n\n\ndef uninstall_app(app_name):\n \"\"\"\n Uninstall an app\n args:\n (str) app_name: Name of the app\n \"\"\"\n from shutil import rmtree\n rmtree(_get_app_home(app_name), ignore_errors=True)\n os.remove(os.path.join(c.LAZY_BADGER_BIN, app_name))\n",
"<import token>\n<function token>\n<function token>\n\n\ndef _get_app_scripts(app_name):\n \"\"\"\n Get the scripts directory for an app\n args:\n (str) app_name: Name of the app\n \"\"\"\n return os.path.join(_get_app_home(app_name), c.APP_SCRIPTS)\n\n\n<function token>\n<function token>\n\n\ndef uninstall_app(app_name):\n \"\"\"\n Uninstall an app\n args:\n (str) app_name: Name of the app\n \"\"\"\n from shutil import rmtree\n rmtree(_get_app_home(app_name), ignore_errors=True)\n os.remove(os.path.join(c.LAZY_BADGER_BIN, app_name))\n",
"<import token>\n<function token>\n<function token>\n\n\ndef _get_app_scripts(app_name):\n \"\"\"\n Get the scripts directory for an app\n args:\n (str) app_name: Name of the app\n \"\"\"\n return os.path.join(_get_app_home(app_name), c.APP_SCRIPTS)\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,610 |
84ab72e2c713f96d349d4dc40b126a903f0b0c98
|
import collections
from typing import List
#Runtime: 124 ms, faster than 5.88% of Python3 online submissions for Decompress Run-Length Encoded List.
#Memory Usage: 14 MB, less than 53.90% of Python3 online submissions for Decompress Run-Length Encoded List.
class Solution:
def decompressRLElist(self, nums: List[int]) -> List[int]:
list= []
for i in range(0, len(nums),2):
for j in range(nums[i]):
list += nums[i+1],
return list
s = Solution()
print(s.decompressRLElist([1,2,3,4]))
|
[
"import collections\nfrom typing import List\n\n#Runtime: 124 ms, faster than 5.88% of Python3 online submissions for Decompress Run-Length Encoded List.\n#Memory Usage: 14 MB, less than 53.90% of Python3 online submissions for Decompress Run-Length Encoded List.\n\nclass Solution:\n def decompressRLElist(self, nums: List[int]) -> List[int]:\n list= []\n for i in range(0, len(nums),2):\n for j in range(nums[i]):\n list += nums[i+1],\n return list\n\ns = Solution()\nprint(s.decompressRLElist([1,2,3,4]))",
"import collections\nfrom typing import List\n\n\nclass Solution:\n\n def decompressRLElist(self, nums: List[int]) ->List[int]:\n list = []\n for i in range(0, len(nums), 2):\n for j in range(nums[i]):\n list += nums[i + 1],\n return list\n\n\ns = Solution()\nprint(s.decompressRLElist([1, 2, 3, 4]))\n",
"<import token>\n\n\nclass Solution:\n\n def decompressRLElist(self, nums: List[int]) ->List[int]:\n list = []\n for i in range(0, len(nums), 2):\n for j in range(nums[i]):\n list += nums[i + 1],\n return list\n\n\ns = Solution()\nprint(s.decompressRLElist([1, 2, 3, 4]))\n",
"<import token>\n\n\nclass Solution:\n\n def decompressRLElist(self, nums: List[int]) ->List[int]:\n list = []\n for i in range(0, len(nums), 2):\n for j in range(nums[i]):\n list += nums[i + 1],\n return list\n\n\n<assignment token>\nprint(s.decompressRLElist([1, 2, 3, 4]))\n",
"<import token>\n\n\nclass Solution:\n\n def decompressRLElist(self, nums: List[int]) ->List[int]:\n list = []\n for i in range(0, len(nums), 2):\n for j in range(nums[i]):\n list += nums[i + 1],\n return list\n\n\n<assignment token>\n<code token>\n",
"<import token>\n\n\nclass Solution:\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<class token>\n<assignment token>\n<code token>\n"
] | false |
99,611 |
de26ee4005bcb5b61ff16e15ff2afba6e198738c
|
from django.apps import AppConfig
class MainscreenConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'mainscreen'
|
[
"from django.apps import AppConfig\n\n\nclass MainscreenConfig(AppConfig):\n default_auto_field = 'django.db.models.BigAutoField'\n name = 'mainscreen'\n",
"<import token>\n\n\nclass MainscreenConfig(AppConfig):\n default_auto_field = 'django.db.models.BigAutoField'\n name = 'mainscreen'\n",
"<import token>\n\n\nclass MainscreenConfig(AppConfig):\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
99,612 |
1f823b530b5a6bba4726cdfb90b5695dcfb8e4d1
|
# -*- coding: utf-8 -*-
import scrapy
import json
from urllib import parse
from traffic_sz.chinadaily.chinadaily.settings import SEARCH_LIST
from traffic_sz.chinadaily.chinadaily.items import ChinadailyItem
class ChinadailySpider(scrapy.Spider):
name = 'ChinaDaily'
allowed_domains = ['newssearch.chinadaily.com.cn']
base_url = 'http://newssearch.chinadaily.com.cn/rest/cn/search?'
def start_requests(self):
for key_word in SEARCH_LIST:
data = {'fullMust': key_word, 'fullAny': key_word, 'sort': "dp", 'duplication': 'off', 'page': '0'}
url = self.base_url + parse.urlencode(data)
print('start url :', url)
yield scrapy.Request(url, method='GET', callback=self.parse)
def url_decode(self, url):
params = url.split('?')
if len(params) == 1:
return {}
params = params[-1]
params_data = {}
for item in params.split('&'):
params_data[item.split('=')[0]] = parse.unquote(item.split('=')[-1])
return params_data
def parse(self, response):
params_data = self.url_decode(response.url)
data = response.body.decode()
data = json.loads(data)
now_number = data.get('number')
total_pages = data.get('totalPages')
content_data = data.get('content')
for item in content_data:
items = ChinadailyItem()
items['inner_id'] = item.get('inner_id')
items['title'] = item.get('title')
items['source'] = item.get('source')
items['url'] = item.get('url')
items['content'] = item.get('plainText')
keywords = item.get('keywords')
items['key_word'] = json.dumps(keywords[: 5] if len(keywords) > 5 else keywords, ensure_ascii=False)
items['time_map'] = item.get('pubDateStr')
items['search_word'] = params_data.get('fullMust')
yield items
print(now_number, total_pages, params_data)
if now_number < total_pages:
params_data.update({'page': str(now_number + 1)})
next_url = self.base_url + parse.urlencode(params_data)
yield scrapy.Request(next_url, method='GET', callback=self.parse)
|
[
"# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nfrom urllib import parse\nfrom traffic_sz.chinadaily.chinadaily.settings import SEARCH_LIST\nfrom traffic_sz.chinadaily.chinadaily.items import ChinadailyItem\n\n\nclass ChinadailySpider(scrapy.Spider):\n name = 'ChinaDaily'\n allowed_domains = ['newssearch.chinadaily.com.cn']\n base_url = 'http://newssearch.chinadaily.com.cn/rest/cn/search?'\n\n def start_requests(self):\n for key_word in SEARCH_LIST:\n data = {'fullMust': key_word, 'fullAny': key_word, 'sort': \"dp\", 'duplication': 'off', 'page': '0'}\n url = self.base_url + parse.urlencode(data)\n print('start url :', url)\n yield scrapy.Request(url, method='GET', callback=self.parse)\n\n def url_decode(self, url):\n params = url.split('?')\n if len(params) == 1:\n return {}\n params = params[-1]\n params_data = {}\n for item in params.split('&'):\n params_data[item.split('=')[0]] = parse.unquote(item.split('=')[-1])\n return params_data\n\n def parse(self, response):\n params_data = self.url_decode(response.url)\n data = response.body.decode()\n data = json.loads(data)\n now_number = data.get('number')\n total_pages = data.get('totalPages')\n content_data = data.get('content')\n for item in content_data:\n items = ChinadailyItem()\n items['inner_id'] = item.get('inner_id')\n items['title'] = item.get('title')\n items['source'] = item.get('source')\n items['url'] = item.get('url')\n items['content'] = item.get('plainText')\n keywords = item.get('keywords')\n items['key_word'] = json.dumps(keywords[: 5] if len(keywords) > 5 else keywords, ensure_ascii=False)\n items['time_map'] = item.get('pubDateStr')\n items['search_word'] = params_data.get('fullMust')\n yield items\n print(now_number, total_pages, params_data)\n if now_number < total_pages:\n params_data.update({'page': str(now_number + 1)})\n next_url = self.base_url + parse.urlencode(params_data)\n yield scrapy.Request(next_url, method='GET', callback=self.parse)\n",
"import scrapy\nimport json\nfrom urllib import parse\nfrom traffic_sz.chinadaily.chinadaily.settings import SEARCH_LIST\nfrom traffic_sz.chinadaily.chinadaily.items import ChinadailyItem\n\n\nclass ChinadailySpider(scrapy.Spider):\n name = 'ChinaDaily'\n allowed_domains = ['newssearch.chinadaily.com.cn']\n base_url = 'http://newssearch.chinadaily.com.cn/rest/cn/search?'\n\n def start_requests(self):\n for key_word in SEARCH_LIST:\n data = {'fullMust': key_word, 'fullAny': key_word, 'sort': 'dp',\n 'duplication': 'off', 'page': '0'}\n url = self.base_url + parse.urlencode(data)\n print('start url :', url)\n yield scrapy.Request(url, method='GET', callback=self.parse)\n\n def url_decode(self, url):\n params = url.split('?')\n if len(params) == 1:\n return {}\n params = params[-1]\n params_data = {}\n for item in params.split('&'):\n params_data[item.split('=')[0]] = parse.unquote(item.split('=')[-1]\n )\n return params_data\n\n def parse(self, response):\n params_data = self.url_decode(response.url)\n data = response.body.decode()\n data = json.loads(data)\n now_number = data.get('number')\n total_pages = data.get('totalPages')\n content_data = data.get('content')\n for item in content_data:\n items = ChinadailyItem()\n items['inner_id'] = item.get('inner_id')\n items['title'] = item.get('title')\n items['source'] = item.get('source')\n items['url'] = item.get('url')\n items['content'] = item.get('plainText')\n keywords = item.get('keywords')\n items['key_word'] = json.dumps(keywords[:5] if len(keywords) > \n 5 else keywords, ensure_ascii=False)\n items['time_map'] = item.get('pubDateStr')\n items['search_word'] = params_data.get('fullMust')\n yield items\n print(now_number, total_pages, params_data)\n if now_number < total_pages:\n params_data.update({'page': str(now_number + 1)})\n next_url = self.base_url + parse.urlencode(params_data)\n yield scrapy.Request(next_url, method='GET', callback=self.parse)\n",
"<import token>\n\n\nclass ChinadailySpider(scrapy.Spider):\n name = 'ChinaDaily'\n allowed_domains = ['newssearch.chinadaily.com.cn']\n base_url = 'http://newssearch.chinadaily.com.cn/rest/cn/search?'\n\n def start_requests(self):\n for key_word in SEARCH_LIST:\n data = {'fullMust': key_word, 'fullAny': key_word, 'sort': 'dp',\n 'duplication': 'off', 'page': '0'}\n url = self.base_url + parse.urlencode(data)\n print('start url :', url)\n yield scrapy.Request(url, method='GET', callback=self.parse)\n\n def url_decode(self, url):\n params = url.split('?')\n if len(params) == 1:\n return {}\n params = params[-1]\n params_data = {}\n for item in params.split('&'):\n params_data[item.split('=')[0]] = parse.unquote(item.split('=')[-1]\n )\n return params_data\n\n def parse(self, response):\n params_data = self.url_decode(response.url)\n data = response.body.decode()\n data = json.loads(data)\n now_number = data.get('number')\n total_pages = data.get('totalPages')\n content_data = data.get('content')\n for item in content_data:\n items = ChinadailyItem()\n items['inner_id'] = item.get('inner_id')\n items['title'] = item.get('title')\n items['source'] = item.get('source')\n items['url'] = item.get('url')\n items['content'] = item.get('plainText')\n keywords = item.get('keywords')\n items['key_word'] = json.dumps(keywords[:5] if len(keywords) > \n 5 else keywords, ensure_ascii=False)\n items['time_map'] = item.get('pubDateStr')\n items['search_word'] = params_data.get('fullMust')\n yield items\n print(now_number, total_pages, params_data)\n if now_number < total_pages:\n params_data.update({'page': str(now_number + 1)})\n next_url = self.base_url + parse.urlencode(params_data)\n yield scrapy.Request(next_url, method='GET', callback=self.parse)\n",
"<import token>\n\n\nclass ChinadailySpider(scrapy.Spider):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def start_requests(self):\n for key_word in SEARCH_LIST:\n data = {'fullMust': key_word, 'fullAny': key_word, 'sort': 'dp',\n 'duplication': 'off', 'page': '0'}\n url = self.base_url + parse.urlencode(data)\n print('start url :', url)\n yield scrapy.Request(url, method='GET', callback=self.parse)\n\n def url_decode(self, url):\n params = url.split('?')\n if len(params) == 1:\n return {}\n params = params[-1]\n params_data = {}\n for item in params.split('&'):\n params_data[item.split('=')[0]] = parse.unquote(item.split('=')[-1]\n )\n return params_data\n\n def parse(self, response):\n params_data = self.url_decode(response.url)\n data = response.body.decode()\n data = json.loads(data)\n now_number = data.get('number')\n total_pages = data.get('totalPages')\n content_data = data.get('content')\n for item in content_data:\n items = ChinadailyItem()\n items['inner_id'] = item.get('inner_id')\n items['title'] = item.get('title')\n items['source'] = item.get('source')\n items['url'] = item.get('url')\n items['content'] = item.get('plainText')\n keywords = item.get('keywords')\n items['key_word'] = json.dumps(keywords[:5] if len(keywords) > \n 5 else keywords, ensure_ascii=False)\n items['time_map'] = item.get('pubDateStr')\n items['search_word'] = params_data.get('fullMust')\n yield items\n print(now_number, total_pages, params_data)\n if now_number < total_pages:\n params_data.update({'page': str(now_number + 1)})\n next_url = self.base_url + parse.urlencode(params_data)\n yield scrapy.Request(next_url, method='GET', callback=self.parse)\n",
"<import token>\n\n\nclass ChinadailySpider(scrapy.Spider):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def url_decode(self, url):\n params = url.split('?')\n if len(params) == 1:\n return {}\n params = params[-1]\n params_data = {}\n for item in params.split('&'):\n params_data[item.split('=')[0]] = parse.unquote(item.split('=')[-1]\n )\n return params_data\n\n def parse(self, response):\n params_data = self.url_decode(response.url)\n data = response.body.decode()\n data = json.loads(data)\n now_number = data.get('number')\n total_pages = data.get('totalPages')\n content_data = data.get('content')\n for item in content_data:\n items = ChinadailyItem()\n items['inner_id'] = item.get('inner_id')\n items['title'] = item.get('title')\n items['source'] = item.get('source')\n items['url'] = item.get('url')\n items['content'] = item.get('plainText')\n keywords = item.get('keywords')\n items['key_word'] = json.dumps(keywords[:5] if len(keywords) > \n 5 else keywords, ensure_ascii=False)\n items['time_map'] = item.get('pubDateStr')\n items['search_word'] = params_data.get('fullMust')\n yield items\n print(now_number, total_pages, params_data)\n if now_number < total_pages:\n params_data.update({'page': str(now_number + 1)})\n next_url = self.base_url + parse.urlencode(params_data)\n yield scrapy.Request(next_url, method='GET', callback=self.parse)\n",
"<import token>\n\n\nclass ChinadailySpider(scrapy.Spider):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def url_decode(self, url):\n params = url.split('?')\n if len(params) == 1:\n return {}\n params = params[-1]\n params_data = {}\n for item in params.split('&'):\n params_data[item.split('=')[0]] = parse.unquote(item.split('=')[-1]\n )\n return params_data\n <function token>\n",
"<import token>\n\n\nclass ChinadailySpider(scrapy.Spider):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
99,613 |
17d8d53697679f3aaf311be70ba54feac5b142c5
|
# Create entries into the table.
from models import db, Puppy, Owner, Toy
# Create two puppies
rufus = Puppy('Rufus')
fido = Puppy('Fido')
db.session.add_all([rufus, fido])
db.session.commit()
# Check
print(Puppy.query.all())
rufus = Puppy.query.filter_by(name='Rufus').first()
# Create owner object
jose = Owner('Jose', rufus.id)
# Give toy to Rufus.
toy1 = Toy('Chew Toy', rufus.id)
toy2 = Toy('Ball', rufus.id)
db.session.add_all([jose, toy1, toy2])
db.session.commit()
# Grap Rufus after those additions.
rufus = Puppy.query.filter_by(name="Rufus").first()
print(rufus)
rufus.report_toys()
|
[
"# Create entries into the table.\nfrom models import db, Puppy, Owner, Toy\n\n# Create two puppies\nrufus = Puppy('Rufus')\nfido = Puppy('Fido')\n\ndb.session.add_all([rufus, fido])\ndb.session.commit()\n\n# Check\nprint(Puppy.query.all())\n\nrufus = Puppy.query.filter_by(name='Rufus').first()\n\n# Create owner object\njose = Owner('Jose', rufus.id)\n\n# Give toy to Rufus.\ntoy1 = Toy('Chew Toy', rufus.id)\ntoy2 = Toy('Ball', rufus.id)\n\ndb.session.add_all([jose, toy1, toy2])\ndb.session.commit()\n\n# Grap Rufus after those additions.\nrufus = Puppy.query.filter_by(name=\"Rufus\").first()\nprint(rufus)\nrufus.report_toys()\n",
"from models import db, Puppy, Owner, Toy\nrufus = Puppy('Rufus')\nfido = Puppy('Fido')\ndb.session.add_all([rufus, fido])\ndb.session.commit()\nprint(Puppy.query.all())\nrufus = Puppy.query.filter_by(name='Rufus').first()\njose = Owner('Jose', rufus.id)\ntoy1 = Toy('Chew Toy', rufus.id)\ntoy2 = Toy('Ball', rufus.id)\ndb.session.add_all([jose, toy1, toy2])\ndb.session.commit()\nrufus = Puppy.query.filter_by(name='Rufus').first()\nprint(rufus)\nrufus.report_toys()\n",
"<import token>\nrufus = Puppy('Rufus')\nfido = Puppy('Fido')\ndb.session.add_all([rufus, fido])\ndb.session.commit()\nprint(Puppy.query.all())\nrufus = Puppy.query.filter_by(name='Rufus').first()\njose = Owner('Jose', rufus.id)\ntoy1 = Toy('Chew Toy', rufus.id)\ntoy2 = Toy('Ball', rufus.id)\ndb.session.add_all([jose, toy1, toy2])\ndb.session.commit()\nrufus = Puppy.query.filter_by(name='Rufus').first()\nprint(rufus)\nrufus.report_toys()\n",
"<import token>\n<assignment token>\ndb.session.add_all([rufus, fido])\ndb.session.commit()\nprint(Puppy.query.all())\n<assignment token>\ndb.session.add_all([jose, toy1, toy2])\ndb.session.commit()\n<assignment token>\nprint(rufus)\nrufus.report_toys()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,614 |
a98baa84feb5d011585771cf6c249b8081b01f1e
|
from django.shortcuts import render
from .models import *
from django.core.exceptions import ObjectDoesNotExist
import json
from django.http import HttpResponseServerError, JsonResponse
import datetime
from django.contrib import messages
# Create your views here.
def store(request):
product = Product.objects.all()
if request.user.is_authenticated:
try:
customer = request.user.customer
order, created = Order.objects.get_or_create(
customer=customer, complete=False)
items = order.orderitem_set.all()
cartItem = order.get_cart_items
except ObjectDoesNotExist:
customer = Customer.objects.create(user=request.user,
first_name=request.user.first_name,
last_name=request.user.last_name,
email=request.user.email)
order, created = Order.objects.get_or_create(
customer=customer, complete=False)
items = order.orderitem_set.all()
cartItem = order.get_cart_items
else:
items = []
order = {'get_cart_total': 0, 'get_cart_items': 0, 'shipping': False}
cartItem = order['get_cart_items']
context = {
'products': product,
'cartItem': cartItem
}
return render(request, 'store/store.html', context)
def cart(request):
if request.user.is_authenticated:
customer = request.user.customer
order, created = Order.objects.get_or_create(
customer=customer, complete=False)
items = order.orderitem_set.all()
cartItem = order.get_cart_items
else:
items = []
order = {'get_cart_total': 0, 'get_cart_items': 0, 'shipping': False}
cartItem = order['get_cart_items']
context = {
'items': items,
'orders': order,
'cartItem': cartItem
}
return render(request, 'store/cart.html', context)
def checkout(request):
if request.user.is_authenticated:
customer = request.user.customer
order, created = Order.objects.get_or_create(
customer=customer, complete=False)
items = order.orderitem_set.all()
cartItem = order.get_cart_items
try:
address = CustomerAddress.objects.get(
customer=customer, is_default=True)
except ObjectDoesNotExist:
address = []
messages.add_message(request, messages.INFO,
'Silakan tambahkan alamat untuk melanjutkan')
else:
items = []
address = []
order = {'get_cart_total': 0, 'get_cart_items': 0, 'shipping': False}
cartItem = order['get_cart_items']
context = {
'items': items,
'orders': order,
'cartItem': cartItem,
'address': address
}
return render(request, 'store/checkout.html', context)
def updateItem(request):
data = json.loads(request.body)
productId = data['productId']
action = data['action']
customer = request.user.customer
product = Product.objects.get(id=productId)
order, create = Order.objects.get_or_create(
customer=customer, complete=False)
orderItem, created = OrderItem.objects.get_or_create(
order=order, product=product)
if action == 'add':
orderItem.quantity += 1
elif action == 'remove':
orderItem.quantity -= 1
orderItem.save()
if orderItem.quantity <= 0:
orderItem.delete()
return JsonResponse("Item added", safe=False)
def processOrder(request):
data = json.loads(request.body)
if request.user.is_authenticated:
customer = request.user.customer
transaction_id = datetime.datetime.now().timestamp()
order, create = Order.objects.get_or_create(
customer=customer, complete=False)
total = float(data['price']['total'])
order.transaction_id = transaction_id
if total == order.get_cart_total:
order.complete = True
order.save()
ship = data['shipping']
if (order.shipping):
customer_address = CustomerAddress.objects.get(
id=data['address_id']['id'])
shipping = ShippingAddress.objects.create(
customer_address=customer_address, order=order)
Pesanan.objects.create(
customer=customer, order=order, shipping=shipping)
return JsonResponse("Ordered", safe=False)
def viewProducts(request, id):
product = Product.objects.get(id=id)
if request.user.is_authenticated:
try:
customer = request.user.customer
order, created = Order.objects.get_or_create(
customer=customer, complete=False)
cartItem = order.get_cart_items
except ObjectDoesNotExist:
customer = Customer.objects.create(user=request.user,
first_name=request.user.first_name,
last_name=request.user.last_name,
email=request.user.email)
order, created = Order.objects.get_or_create(
customer=customer, complete=False)
cartItem = order.get_cart_items
else:
cartItem = 0
context = {
'product': product,
'cartItem': cartItem
}
return render(request, 'store/view-product.html', context)
|
[
"from django.shortcuts import render\nfrom .models import *\nfrom django.core.exceptions import ObjectDoesNotExist\nimport json\nfrom django.http import HttpResponseServerError, JsonResponse\nimport datetime\nfrom django.contrib import messages\n# Create your views here.\n\n\ndef store(request):\n product = Product.objects.all()\n if request.user.is_authenticated:\n try:\n customer = request.user.customer\n order, created = Order.objects.get_or_create(\n customer=customer, complete=False)\n items = order.orderitem_set.all()\n cartItem = order.get_cart_items\n except ObjectDoesNotExist:\n customer = Customer.objects.create(user=request.user,\n first_name=request.user.first_name,\n last_name=request.user.last_name,\n email=request.user.email)\n order, created = Order.objects.get_or_create(\n customer=customer, complete=False)\n items = order.orderitem_set.all()\n cartItem = order.get_cart_items\n\n else:\n items = []\n order = {'get_cart_total': 0, 'get_cart_items': 0, 'shipping': False}\n cartItem = order['get_cart_items']\n context = {\n 'products': product,\n 'cartItem': cartItem\n }\n return render(request, 'store/store.html', context)\n\n\ndef cart(request):\n if request.user.is_authenticated:\n customer = request.user.customer\n order, created = Order.objects.get_or_create(\n customer=customer, complete=False)\n items = order.orderitem_set.all()\n cartItem = order.get_cart_items\n else:\n items = []\n order = {'get_cart_total': 0, 'get_cart_items': 0, 'shipping': False}\n cartItem = order['get_cart_items']\n context = {\n 'items': items,\n 'orders': order,\n 'cartItem': cartItem\n }\n return render(request, 'store/cart.html', context)\n\n\ndef checkout(request):\n if request.user.is_authenticated:\n customer = request.user.customer\n order, created = Order.objects.get_or_create(\n customer=customer, complete=False)\n items = order.orderitem_set.all()\n cartItem = order.get_cart_items\n try:\n address = CustomerAddress.objects.get(\n customer=customer, is_default=True)\n except ObjectDoesNotExist:\n address = []\n messages.add_message(request, messages.INFO,\n 'Silakan tambahkan alamat untuk melanjutkan')\n\n else:\n items = []\n address = []\n order = {'get_cart_total': 0, 'get_cart_items': 0, 'shipping': False}\n cartItem = order['get_cart_items']\n context = {\n 'items': items,\n 'orders': order,\n 'cartItem': cartItem,\n 'address': address\n }\n return render(request, 'store/checkout.html', context)\n\n\ndef updateItem(request):\n data = json.loads(request.body)\n productId = data['productId']\n action = data['action']\n\n customer = request.user.customer\n product = Product.objects.get(id=productId)\n order, create = Order.objects.get_or_create(\n customer=customer, complete=False)\n\n orderItem, created = OrderItem.objects.get_or_create(\n order=order, product=product)\n\n if action == 'add':\n orderItem.quantity += 1\n elif action == 'remove':\n orderItem.quantity -= 1\n orderItem.save()\n\n if orderItem.quantity <= 0:\n orderItem.delete()\n return JsonResponse(\"Item added\", safe=False)\n\n\ndef processOrder(request):\n data = json.loads(request.body)\n\n if request.user.is_authenticated:\n customer = request.user.customer\n transaction_id = datetime.datetime.now().timestamp()\n order, create = Order.objects.get_or_create(\n customer=customer, complete=False)\n total = float(data['price']['total'])\n order.transaction_id = transaction_id\n\n if total == order.get_cart_total:\n order.complete = True\n order.save()\n\n ship = data['shipping']\n if (order.shipping):\n customer_address = CustomerAddress.objects.get(\n id=data['address_id']['id'])\n shipping = ShippingAddress.objects.create(\n customer_address=customer_address, order=order)\n Pesanan.objects.create(\n customer=customer, order=order, shipping=shipping)\n\n return JsonResponse(\"Ordered\", safe=False)\n\n\ndef viewProducts(request, id):\n product = Product.objects.get(id=id)\n if request.user.is_authenticated:\n try:\n customer = request.user.customer\n order, created = Order.objects.get_or_create(\n customer=customer, complete=False)\n cartItem = order.get_cart_items\n except ObjectDoesNotExist:\n customer = Customer.objects.create(user=request.user,\n first_name=request.user.first_name,\n last_name=request.user.last_name,\n email=request.user.email)\n order, created = Order.objects.get_or_create(\n customer=customer, complete=False)\n cartItem = order.get_cart_items\n\n else:\n cartItem = 0\n context = {\n 'product': product,\n 'cartItem': cartItem\n }\n return render(request, 'store/view-product.html', context)\n",
"from django.shortcuts import render\nfrom .models import *\nfrom django.core.exceptions import ObjectDoesNotExist\nimport json\nfrom django.http import HttpResponseServerError, JsonResponse\nimport datetime\nfrom django.contrib import messages\n\n\ndef store(request):\n product = Product.objects.all()\n if request.user.is_authenticated:\n try:\n customer = request.user.customer\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n items = order.orderitem_set.all()\n cartItem = order.get_cart_items\n except ObjectDoesNotExist:\n customer = Customer.objects.create(user=request.user,\n first_name=request.user.first_name, last_name=request.user.\n last_name, email=request.user.email)\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n items = order.orderitem_set.all()\n cartItem = order.get_cart_items\n else:\n items = []\n order = {'get_cart_total': 0, 'get_cart_items': 0, 'shipping': False}\n cartItem = order['get_cart_items']\n context = {'products': product, 'cartItem': cartItem}\n return render(request, 'store/store.html', context)\n\n\ndef cart(request):\n if request.user.is_authenticated:\n customer = request.user.customer\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n items = order.orderitem_set.all()\n cartItem = order.get_cart_items\n else:\n items = []\n order = {'get_cart_total': 0, 'get_cart_items': 0, 'shipping': False}\n cartItem = order['get_cart_items']\n context = {'items': items, 'orders': order, 'cartItem': cartItem}\n return render(request, 'store/cart.html', context)\n\n\ndef checkout(request):\n if request.user.is_authenticated:\n customer = request.user.customer\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n items = order.orderitem_set.all()\n cartItem = order.get_cart_items\n try:\n address = CustomerAddress.objects.get(customer=customer,\n is_default=True)\n except ObjectDoesNotExist:\n address = []\n messages.add_message(request, messages.INFO,\n 'Silakan tambahkan alamat untuk melanjutkan')\n else:\n items = []\n address = []\n order = {'get_cart_total': 0, 'get_cart_items': 0, 'shipping': False}\n cartItem = order['get_cart_items']\n context = {'items': items, 'orders': order, 'cartItem': cartItem,\n 'address': address}\n return render(request, 'store/checkout.html', context)\n\n\ndef updateItem(request):\n data = json.loads(request.body)\n productId = data['productId']\n action = data['action']\n customer = request.user.customer\n product = Product.objects.get(id=productId)\n order, create = Order.objects.get_or_create(customer=customer, complete\n =False)\n orderItem, created = OrderItem.objects.get_or_create(order=order,\n product=product)\n if action == 'add':\n orderItem.quantity += 1\n elif action == 'remove':\n orderItem.quantity -= 1\n orderItem.save()\n if orderItem.quantity <= 0:\n orderItem.delete()\n return JsonResponse('Item added', safe=False)\n\n\ndef processOrder(request):\n data = json.loads(request.body)\n if request.user.is_authenticated:\n customer = request.user.customer\n transaction_id = datetime.datetime.now().timestamp()\n order, create = Order.objects.get_or_create(customer=customer,\n complete=False)\n total = float(data['price']['total'])\n order.transaction_id = transaction_id\n if total == order.get_cart_total:\n order.complete = True\n order.save()\n ship = data['shipping']\n if order.shipping:\n customer_address = CustomerAddress.objects.get(id=data[\n 'address_id']['id'])\n shipping = ShippingAddress.objects.create(customer_address=\n customer_address, order=order)\n Pesanan.objects.create(customer=customer, order=order, shipping\n =shipping)\n return JsonResponse('Ordered', safe=False)\n\n\ndef viewProducts(request, id):\n product = Product.objects.get(id=id)\n if request.user.is_authenticated:\n try:\n customer = request.user.customer\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n cartItem = order.get_cart_items\n except ObjectDoesNotExist:\n customer = Customer.objects.create(user=request.user,\n first_name=request.user.first_name, last_name=request.user.\n last_name, email=request.user.email)\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n cartItem = order.get_cart_items\n else:\n cartItem = 0\n context = {'product': product, 'cartItem': cartItem}\n return render(request, 'store/view-product.html', context)\n",
"<import token>\n\n\ndef store(request):\n product = Product.objects.all()\n if request.user.is_authenticated:\n try:\n customer = request.user.customer\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n items = order.orderitem_set.all()\n cartItem = order.get_cart_items\n except ObjectDoesNotExist:\n customer = Customer.objects.create(user=request.user,\n first_name=request.user.first_name, last_name=request.user.\n last_name, email=request.user.email)\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n items = order.orderitem_set.all()\n cartItem = order.get_cart_items\n else:\n items = []\n order = {'get_cart_total': 0, 'get_cart_items': 0, 'shipping': False}\n cartItem = order['get_cart_items']\n context = {'products': product, 'cartItem': cartItem}\n return render(request, 'store/store.html', context)\n\n\ndef cart(request):\n if request.user.is_authenticated:\n customer = request.user.customer\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n items = order.orderitem_set.all()\n cartItem = order.get_cart_items\n else:\n items = []\n order = {'get_cart_total': 0, 'get_cart_items': 0, 'shipping': False}\n cartItem = order['get_cart_items']\n context = {'items': items, 'orders': order, 'cartItem': cartItem}\n return render(request, 'store/cart.html', context)\n\n\ndef checkout(request):\n if request.user.is_authenticated:\n customer = request.user.customer\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n items = order.orderitem_set.all()\n cartItem = order.get_cart_items\n try:\n address = CustomerAddress.objects.get(customer=customer,\n is_default=True)\n except ObjectDoesNotExist:\n address = []\n messages.add_message(request, messages.INFO,\n 'Silakan tambahkan alamat untuk melanjutkan')\n else:\n items = []\n address = []\n order = {'get_cart_total': 0, 'get_cart_items': 0, 'shipping': False}\n cartItem = order['get_cart_items']\n context = {'items': items, 'orders': order, 'cartItem': cartItem,\n 'address': address}\n return render(request, 'store/checkout.html', context)\n\n\ndef updateItem(request):\n data = json.loads(request.body)\n productId = data['productId']\n action = data['action']\n customer = request.user.customer\n product = Product.objects.get(id=productId)\n order, create = Order.objects.get_or_create(customer=customer, complete\n =False)\n orderItem, created = OrderItem.objects.get_or_create(order=order,\n product=product)\n if action == 'add':\n orderItem.quantity += 1\n elif action == 'remove':\n orderItem.quantity -= 1\n orderItem.save()\n if orderItem.quantity <= 0:\n orderItem.delete()\n return JsonResponse('Item added', safe=False)\n\n\ndef processOrder(request):\n data = json.loads(request.body)\n if request.user.is_authenticated:\n customer = request.user.customer\n transaction_id = datetime.datetime.now().timestamp()\n order, create = Order.objects.get_or_create(customer=customer,\n complete=False)\n total = float(data['price']['total'])\n order.transaction_id = transaction_id\n if total == order.get_cart_total:\n order.complete = True\n order.save()\n ship = data['shipping']\n if order.shipping:\n customer_address = CustomerAddress.objects.get(id=data[\n 'address_id']['id'])\n shipping = ShippingAddress.objects.create(customer_address=\n customer_address, order=order)\n Pesanan.objects.create(customer=customer, order=order, shipping\n =shipping)\n return JsonResponse('Ordered', safe=False)\n\n\ndef viewProducts(request, id):\n product = Product.objects.get(id=id)\n if request.user.is_authenticated:\n try:\n customer = request.user.customer\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n cartItem = order.get_cart_items\n except ObjectDoesNotExist:\n customer = Customer.objects.create(user=request.user,\n first_name=request.user.first_name, last_name=request.user.\n last_name, email=request.user.email)\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n cartItem = order.get_cart_items\n else:\n cartItem = 0\n context = {'product': product, 'cartItem': cartItem}\n return render(request, 'store/view-product.html', context)\n",
"<import token>\n\n\ndef store(request):\n product = Product.objects.all()\n if request.user.is_authenticated:\n try:\n customer = request.user.customer\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n items = order.orderitem_set.all()\n cartItem = order.get_cart_items\n except ObjectDoesNotExist:\n customer = Customer.objects.create(user=request.user,\n first_name=request.user.first_name, last_name=request.user.\n last_name, email=request.user.email)\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n items = order.orderitem_set.all()\n cartItem = order.get_cart_items\n else:\n items = []\n order = {'get_cart_total': 0, 'get_cart_items': 0, 'shipping': False}\n cartItem = order['get_cart_items']\n context = {'products': product, 'cartItem': cartItem}\n return render(request, 'store/store.html', context)\n\n\n<function token>\n\n\ndef checkout(request):\n if request.user.is_authenticated:\n customer = request.user.customer\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n items = order.orderitem_set.all()\n cartItem = order.get_cart_items\n try:\n address = CustomerAddress.objects.get(customer=customer,\n is_default=True)\n except ObjectDoesNotExist:\n address = []\n messages.add_message(request, messages.INFO,\n 'Silakan tambahkan alamat untuk melanjutkan')\n else:\n items = []\n address = []\n order = {'get_cart_total': 0, 'get_cart_items': 0, 'shipping': False}\n cartItem = order['get_cart_items']\n context = {'items': items, 'orders': order, 'cartItem': cartItem,\n 'address': address}\n return render(request, 'store/checkout.html', context)\n\n\ndef updateItem(request):\n data = json.loads(request.body)\n productId = data['productId']\n action = data['action']\n customer = request.user.customer\n product = Product.objects.get(id=productId)\n order, create = Order.objects.get_or_create(customer=customer, complete\n =False)\n orderItem, created = OrderItem.objects.get_or_create(order=order,\n product=product)\n if action == 'add':\n orderItem.quantity += 1\n elif action == 'remove':\n orderItem.quantity -= 1\n orderItem.save()\n if orderItem.quantity <= 0:\n orderItem.delete()\n return JsonResponse('Item added', safe=False)\n\n\ndef processOrder(request):\n data = json.loads(request.body)\n if request.user.is_authenticated:\n customer = request.user.customer\n transaction_id = datetime.datetime.now().timestamp()\n order, create = Order.objects.get_or_create(customer=customer,\n complete=False)\n total = float(data['price']['total'])\n order.transaction_id = transaction_id\n if total == order.get_cart_total:\n order.complete = True\n order.save()\n ship = data['shipping']\n if order.shipping:\n customer_address = CustomerAddress.objects.get(id=data[\n 'address_id']['id'])\n shipping = ShippingAddress.objects.create(customer_address=\n customer_address, order=order)\n Pesanan.objects.create(customer=customer, order=order, shipping\n =shipping)\n return JsonResponse('Ordered', safe=False)\n\n\ndef viewProducts(request, id):\n product = Product.objects.get(id=id)\n if request.user.is_authenticated:\n try:\n customer = request.user.customer\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n cartItem = order.get_cart_items\n except ObjectDoesNotExist:\n customer = Customer.objects.create(user=request.user,\n first_name=request.user.first_name, last_name=request.user.\n last_name, email=request.user.email)\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n cartItem = order.get_cart_items\n else:\n cartItem = 0\n context = {'product': product, 'cartItem': cartItem}\n return render(request, 'store/view-product.html', context)\n",
"<import token>\n\n\ndef store(request):\n product = Product.objects.all()\n if request.user.is_authenticated:\n try:\n customer = request.user.customer\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n items = order.orderitem_set.all()\n cartItem = order.get_cart_items\n except ObjectDoesNotExist:\n customer = Customer.objects.create(user=request.user,\n first_name=request.user.first_name, last_name=request.user.\n last_name, email=request.user.email)\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n items = order.orderitem_set.all()\n cartItem = order.get_cart_items\n else:\n items = []\n order = {'get_cart_total': 0, 'get_cart_items': 0, 'shipping': False}\n cartItem = order['get_cart_items']\n context = {'products': product, 'cartItem': cartItem}\n return render(request, 'store/store.html', context)\n\n\n<function token>\n\n\ndef checkout(request):\n if request.user.is_authenticated:\n customer = request.user.customer\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n items = order.orderitem_set.all()\n cartItem = order.get_cart_items\n try:\n address = CustomerAddress.objects.get(customer=customer,\n is_default=True)\n except ObjectDoesNotExist:\n address = []\n messages.add_message(request, messages.INFO,\n 'Silakan tambahkan alamat untuk melanjutkan')\n else:\n items = []\n address = []\n order = {'get_cart_total': 0, 'get_cart_items': 0, 'shipping': False}\n cartItem = order['get_cart_items']\n context = {'items': items, 'orders': order, 'cartItem': cartItem,\n 'address': address}\n return render(request, 'store/checkout.html', context)\n\n\ndef updateItem(request):\n data = json.loads(request.body)\n productId = data['productId']\n action = data['action']\n customer = request.user.customer\n product = Product.objects.get(id=productId)\n order, create = Order.objects.get_or_create(customer=customer, complete\n =False)\n orderItem, created = OrderItem.objects.get_or_create(order=order,\n product=product)\n if action == 'add':\n orderItem.quantity += 1\n elif action == 'remove':\n orderItem.quantity -= 1\n orderItem.save()\n if orderItem.quantity <= 0:\n orderItem.delete()\n return JsonResponse('Item added', safe=False)\n\n\n<function token>\n\n\ndef viewProducts(request, id):\n product = Product.objects.get(id=id)\n if request.user.is_authenticated:\n try:\n customer = request.user.customer\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n cartItem = order.get_cart_items\n except ObjectDoesNotExist:\n customer = Customer.objects.create(user=request.user,\n first_name=request.user.first_name, last_name=request.user.\n last_name, email=request.user.email)\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n cartItem = order.get_cart_items\n else:\n cartItem = 0\n context = {'product': product, 'cartItem': cartItem}\n return render(request, 'store/view-product.html', context)\n",
"<import token>\n\n\ndef store(request):\n product = Product.objects.all()\n if request.user.is_authenticated:\n try:\n customer = request.user.customer\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n items = order.orderitem_set.all()\n cartItem = order.get_cart_items\n except ObjectDoesNotExist:\n customer = Customer.objects.create(user=request.user,\n first_name=request.user.first_name, last_name=request.user.\n last_name, email=request.user.email)\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n items = order.orderitem_set.all()\n cartItem = order.get_cart_items\n else:\n items = []\n order = {'get_cart_total': 0, 'get_cart_items': 0, 'shipping': False}\n cartItem = order['get_cart_items']\n context = {'products': product, 'cartItem': cartItem}\n return render(request, 'store/store.html', context)\n\n\n<function token>\n\n\ndef checkout(request):\n if request.user.is_authenticated:\n customer = request.user.customer\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n items = order.orderitem_set.all()\n cartItem = order.get_cart_items\n try:\n address = CustomerAddress.objects.get(customer=customer,\n is_default=True)\n except ObjectDoesNotExist:\n address = []\n messages.add_message(request, messages.INFO,\n 'Silakan tambahkan alamat untuk melanjutkan')\n else:\n items = []\n address = []\n order = {'get_cart_total': 0, 'get_cart_items': 0, 'shipping': False}\n cartItem = order['get_cart_items']\n context = {'items': items, 'orders': order, 'cartItem': cartItem,\n 'address': address}\n return render(request, 'store/checkout.html', context)\n\n\n<function token>\n<function token>\n\n\ndef viewProducts(request, id):\n product = Product.objects.get(id=id)\n if request.user.is_authenticated:\n try:\n customer = request.user.customer\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n cartItem = order.get_cart_items\n except ObjectDoesNotExist:\n customer = Customer.objects.create(user=request.user,\n first_name=request.user.first_name, last_name=request.user.\n last_name, email=request.user.email)\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n cartItem = order.get_cart_items\n else:\n cartItem = 0\n context = {'product': product, 'cartItem': cartItem}\n return render(request, 'store/view-product.html', context)\n",
"<import token>\n\n\ndef store(request):\n product = Product.objects.all()\n if request.user.is_authenticated:\n try:\n customer = request.user.customer\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n items = order.orderitem_set.all()\n cartItem = order.get_cart_items\n except ObjectDoesNotExist:\n customer = Customer.objects.create(user=request.user,\n first_name=request.user.first_name, last_name=request.user.\n last_name, email=request.user.email)\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n items = order.orderitem_set.all()\n cartItem = order.get_cart_items\n else:\n items = []\n order = {'get_cart_total': 0, 'get_cart_items': 0, 'shipping': False}\n cartItem = order['get_cart_items']\n context = {'products': product, 'cartItem': cartItem}\n return render(request, 'store/store.html', context)\n\n\n<function token>\n\n\ndef checkout(request):\n if request.user.is_authenticated:\n customer = request.user.customer\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n items = order.orderitem_set.all()\n cartItem = order.get_cart_items\n try:\n address = CustomerAddress.objects.get(customer=customer,\n is_default=True)\n except ObjectDoesNotExist:\n address = []\n messages.add_message(request, messages.INFO,\n 'Silakan tambahkan alamat untuk melanjutkan')\n else:\n items = []\n address = []\n order = {'get_cart_total': 0, 'get_cart_items': 0, 'shipping': False}\n cartItem = order['get_cart_items']\n context = {'items': items, 'orders': order, 'cartItem': cartItem,\n 'address': address}\n return render(request, 'store/checkout.html', context)\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef checkout(request):\n if request.user.is_authenticated:\n customer = request.user.customer\n order, created = Order.objects.get_or_create(customer=customer,\n complete=False)\n items = order.orderitem_set.all()\n cartItem = order.get_cart_items\n try:\n address = CustomerAddress.objects.get(customer=customer,\n is_default=True)\n except ObjectDoesNotExist:\n address = []\n messages.add_message(request, messages.INFO,\n 'Silakan tambahkan alamat untuk melanjutkan')\n else:\n items = []\n address = []\n order = {'get_cart_total': 0, 'get_cart_items': 0, 'shipping': False}\n cartItem = order['get_cart_items']\n context = {'items': items, 'orders': order, 'cartItem': cartItem,\n 'address': address}\n return render(request, 'store/checkout.html', context)\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,615 |
ed3566d4c33a7504d2b0b532c25703e79ccc5700
|
import random
import json
from web3 import Web3
# import json
MAIN_URL = "https://mainnet.infura.io/v3/bb055071bba745488eda95512a6d0035"
URL = 'https://8cf41633363c49a584fbfb0b556a5927.ropsten.rpc.rivet.cloud/'
URL = 'wss://ropsten.infura.io/ws/v3/bb055071bba745488eda95512a6d0035'
w3 = Web3(Web3.WebsocketProvider(URL))
# w3 = Web3(Web3.HTTPProvider(URL))
def _checking(_addr):
'''
ورودی تابع ک استرینگ است که چک میشمود ایا ادرس معبتری هست یا خیر
false یا addrress درنهایت
خارح میشود
'''
if not isinstance(_addr, str):
print("ادرس بد وارد کردی باید یک استرینگ باشه")
return False
try:
if not w3.isConnected():
print("نت مشکل داره ")
return False
addr_ = Web3.toChecksumAddress(_addr)
if not _addr:
print("ادرس بدی وارد کردی شرمنده تم")
return False
return addr_
except Exception as e:
print(e)
print("یه مشکلی وجود داره ×ـ× مثلا نتت ضعیفه")
return False
def balance(_addr: str) -> float:
"""
اینجا ادرس خواسته رو به تابع بدید
توی خروجی یه عدد میده که همون باقیمانده ی حسابش هستش :)
"""
addr_ = _checking(_addr)
return float(w3.eth.get_balance(addr_) / 10**18)
def transfer(_to_addr: str, _value: float, private_key: str, public_key: str ,_nounce:int ):
to_addr_ = _checking(_to_addr)
public_key = _checking(public_key)
if to_addr_ and public_key:
try:
if balance(public_key) < _value:
print("پول ت کمه ، نمیتونی کمک کنی ")
return False
p = w3.eth.gas_price
trancation = {
'from': public_key,
'to': to_addr_,
"gas": "0x200000",
"gasPrice": p,
"nonce": _nounce,
"value": int(_value * 10**18),
}
raw_trx = w3.eth.account.privateKeyToAccount(
private_key).sign_transaction(trancation)
res = w3.eth.send_raw_transaction(raw_trx.rawTransaction).hex()
return res
except Exception as e:
print(e)
print("یک اتفاقی افتاده که من نمیدونم ....")
return 0
## Testing Functions with my wallet
_public_key = Web3.toChecksumAddress("0xAf77fB90baCE88edad8be674232C4a072BdC29A3")
print (balance("0xAf77fB90baCE88edad8be674232C4a072BdC29A3"))
print(balance("0xAf77fB90baCE88edad8be674232C4a072BdC29A3"))
_nounce = w3.eth.get_transaction_count(_public_key)
print (
transfer("0x603c7564035A8c0a7eB9a74a76113563ffdbD36F" ,
0.01,
"a49443970e8c717e218d312c0a7d1b390bea090cd3809011fd5cb926851f2e2b",
"0xAf77fB90baCE88edad8be674232C4a072BdC29A3",
_nounce)
)
_nounce += 1
print (
transfer("0x603c7564035A8c0a7eB9a74a76113563ffdbD36F" ,
0.01,
"a49443970e8c717e218d312c0a7d1b390bea090cd3809011fd5cb926851f2e2b",
"0xAf77fB90baCE88edad8be674232C4a072BdC29A3",
_nounce)
)
|
[
"import random\nimport json\nfrom web3 import Web3\n# import json\n\nMAIN_URL = \"https://mainnet.infura.io/v3/bb055071bba745488eda95512a6d0035\"\nURL = 'https://8cf41633363c49a584fbfb0b556a5927.ropsten.rpc.rivet.cloud/'\nURL = 'wss://ropsten.infura.io/ws/v3/bb055071bba745488eda95512a6d0035'\n\nw3 = Web3(Web3.WebsocketProvider(URL))\n# w3 = Web3(Web3.HTTPProvider(URL))\n\n\ndef _checking(_addr):\n '''\n ورودی تابع ک استرینگ است که چک میشمود ایا ادرس معبتری هست یا خیر\n\n false یا addrress درنهایت\n خارح میشود\n '''\n if not isinstance(_addr, str):\n print(\"ادرس بد وارد کردی باید یک استرینگ باشه\")\n return False\n try:\n if not w3.isConnected():\n print(\"نت مشکل داره \")\n return False\n addr_ = Web3.toChecksumAddress(_addr)\n if not _addr:\n print(\"ادرس بدی وارد کردی شرمنده تم\")\n return False\n return addr_\n except Exception as e:\n print(e)\n print(\"یه مشکلی وجود داره ×ـ× مثلا نتت ضعیفه\")\n return False\n\n\ndef balance(_addr: str) -> float:\n \"\"\"\n اینجا ادرس خواسته رو به تابع بدید\n توی خروجی یه عدد میده که همون باقیمانده ی حسابش هستش :)\n \"\"\"\n addr_ = _checking(_addr)\n return float(w3.eth.get_balance(addr_) / 10**18)\n\n\n\ndef transfer(_to_addr: str, _value: float, private_key: str, public_key: str ,_nounce:int ):\n to_addr_ = _checking(_to_addr)\n public_key = _checking(public_key)\n \n if to_addr_ and public_key:\n try:\n if balance(public_key) < _value:\n print(\"پول ت کمه ، نمیتونی کمک کنی \")\n return False\n p = w3.eth.gas_price\n \n trancation = {\n 'from': public_key,\n 'to': to_addr_,\n \"gas\": \"0x200000\",\n \"gasPrice\": p,\n \"nonce\": _nounce,\n \"value\": int(_value * 10**18),\n }\n raw_trx = w3.eth.account.privateKeyToAccount(\n private_key).sign_transaction(trancation)\n res = w3.eth.send_raw_transaction(raw_trx.rawTransaction).hex()\n return res\n except Exception as e:\n print(e)\n print(\"یک اتفاقی افتاده که من نمیدونم ....\")\n return 0\n\n\n## Testing Functions with my wallet\n\n_public_key = Web3.toChecksumAddress(\"0xAf77fB90baCE88edad8be674232C4a072BdC29A3\")\n\nprint (balance(\"0xAf77fB90baCE88edad8be674232C4a072BdC29A3\"))\n\n\nprint(balance(\"0xAf77fB90baCE88edad8be674232C4a072BdC29A3\"))\n\n\n_nounce = w3.eth.get_transaction_count(_public_key)\nprint (\n transfer(\"0x603c7564035A8c0a7eB9a74a76113563ffdbD36F\" ,\n 0.01,\n \"a49443970e8c717e218d312c0a7d1b390bea090cd3809011fd5cb926851f2e2b\",\n \"0xAf77fB90baCE88edad8be674232C4a072BdC29A3\",\n _nounce)\n )\n_nounce += 1\n\nprint (\n transfer(\"0x603c7564035A8c0a7eB9a74a76113563ffdbD36F\" ,\n 0.01,\n \"a49443970e8c717e218d312c0a7d1b390bea090cd3809011fd5cb926851f2e2b\",\n \"0xAf77fB90baCE88edad8be674232C4a072BdC29A3\",\n _nounce)\n )\n\n",
"import random\nimport json\nfrom web3 import Web3\nMAIN_URL = 'https://mainnet.infura.io/v3/bb055071bba745488eda95512a6d0035'\nURL = 'https://8cf41633363c49a584fbfb0b556a5927.ropsten.rpc.rivet.cloud/'\nURL = 'wss://ropsten.infura.io/ws/v3/bb055071bba745488eda95512a6d0035'\nw3 = Web3(Web3.WebsocketProvider(URL))\n\n\ndef _checking(_addr):\n \"\"\"\n ورودی تابع ک استرینگ است که چک میشمود ایا ادرس معبتری هست یا خیر\n\n false یا addrress درنهایت\n خارح میشود\n \"\"\"\n if not isinstance(_addr, str):\n print('ادرس بد وارد کردی باید یک استرینگ باشه')\n return False\n try:\n if not w3.isConnected():\n print('نت مشکل داره ')\n return False\n addr_ = Web3.toChecksumAddress(_addr)\n if not _addr:\n print('ادرس بدی وارد کردی شرمنده تم')\n return False\n return addr_\n except Exception as e:\n print(e)\n print('یه مشکلی وجود داره ×ـ× مثلا نتت ضعیفه')\n return False\n\n\ndef balance(_addr: str) ->float:\n \"\"\"\n اینجا ادرس خواسته رو به تابع بدید\n توی خروجی یه عدد میده که همون باقیمانده ی حسابش هستش :)\n \"\"\"\n addr_ = _checking(_addr)\n return float(w3.eth.get_balance(addr_) / 10 ** 18)\n\n\ndef transfer(_to_addr: str, _value: float, private_key: str, public_key:\n str, _nounce: int):\n to_addr_ = _checking(_to_addr)\n public_key = _checking(public_key)\n if to_addr_ and public_key:\n try:\n if balance(public_key) < _value:\n print('پول ت کمه ، نمیتونی کمک کنی ')\n return False\n p = w3.eth.gas_price\n trancation = {'from': public_key, 'to': to_addr_, 'gas':\n '0x200000', 'gasPrice': p, 'nonce': _nounce, 'value': int(\n _value * 10 ** 18)}\n raw_trx = w3.eth.account.privateKeyToAccount(private_key\n ).sign_transaction(trancation)\n res = w3.eth.send_raw_transaction(raw_trx.rawTransaction).hex()\n return res\n except Exception as e:\n print(e)\n print('یک اتفاقی افتاده که من نمیدونم ....')\n return 0\n\n\n_public_key = Web3.toChecksumAddress(\n '0xAf77fB90baCE88edad8be674232C4a072BdC29A3')\nprint(balance('0xAf77fB90baCE88edad8be674232C4a072BdC29A3'))\nprint(balance('0xAf77fB90baCE88edad8be674232C4a072BdC29A3'))\n_nounce = w3.eth.get_transaction_count(_public_key)\nprint(transfer('0x603c7564035A8c0a7eB9a74a76113563ffdbD36F', 0.01,\n 'a49443970e8c717e218d312c0a7d1b390bea090cd3809011fd5cb926851f2e2b',\n '0xAf77fB90baCE88edad8be674232C4a072BdC29A3', _nounce))\n_nounce += 1\nprint(transfer('0x603c7564035A8c0a7eB9a74a76113563ffdbD36F', 0.01,\n 'a49443970e8c717e218d312c0a7d1b390bea090cd3809011fd5cb926851f2e2b',\n '0xAf77fB90baCE88edad8be674232C4a072BdC29A3', _nounce))\n",
"<import token>\nMAIN_URL = 'https://mainnet.infura.io/v3/bb055071bba745488eda95512a6d0035'\nURL = 'https://8cf41633363c49a584fbfb0b556a5927.ropsten.rpc.rivet.cloud/'\nURL = 'wss://ropsten.infura.io/ws/v3/bb055071bba745488eda95512a6d0035'\nw3 = Web3(Web3.WebsocketProvider(URL))\n\n\ndef _checking(_addr):\n \"\"\"\n ورودی تابع ک استرینگ است که چک میشمود ایا ادرس معبتری هست یا خیر\n\n false یا addrress درنهایت\n خارح میشود\n \"\"\"\n if not isinstance(_addr, str):\n print('ادرس بد وارد کردی باید یک استرینگ باشه')\n return False\n try:\n if not w3.isConnected():\n print('نت مشکل داره ')\n return False\n addr_ = Web3.toChecksumAddress(_addr)\n if not _addr:\n print('ادرس بدی وارد کردی شرمنده تم')\n return False\n return addr_\n except Exception as e:\n print(e)\n print('یه مشکلی وجود داره ×ـ× مثلا نتت ضعیفه')\n return False\n\n\ndef balance(_addr: str) ->float:\n \"\"\"\n اینجا ادرس خواسته رو به تابع بدید\n توی خروجی یه عدد میده که همون باقیمانده ی حسابش هستش :)\n \"\"\"\n addr_ = _checking(_addr)\n return float(w3.eth.get_balance(addr_) / 10 ** 18)\n\n\ndef transfer(_to_addr: str, _value: float, private_key: str, public_key:\n str, _nounce: int):\n to_addr_ = _checking(_to_addr)\n public_key = _checking(public_key)\n if to_addr_ and public_key:\n try:\n if balance(public_key) < _value:\n print('پول ت کمه ، نمیتونی کمک کنی ')\n return False\n p = w3.eth.gas_price\n trancation = {'from': public_key, 'to': to_addr_, 'gas':\n '0x200000', 'gasPrice': p, 'nonce': _nounce, 'value': int(\n _value * 10 ** 18)}\n raw_trx = w3.eth.account.privateKeyToAccount(private_key\n ).sign_transaction(trancation)\n res = w3.eth.send_raw_transaction(raw_trx.rawTransaction).hex()\n return res\n except Exception as e:\n print(e)\n print('یک اتفاقی افتاده که من نمیدونم ....')\n return 0\n\n\n_public_key = Web3.toChecksumAddress(\n '0xAf77fB90baCE88edad8be674232C4a072BdC29A3')\nprint(balance('0xAf77fB90baCE88edad8be674232C4a072BdC29A3'))\nprint(balance('0xAf77fB90baCE88edad8be674232C4a072BdC29A3'))\n_nounce = w3.eth.get_transaction_count(_public_key)\nprint(transfer('0x603c7564035A8c0a7eB9a74a76113563ffdbD36F', 0.01,\n 'a49443970e8c717e218d312c0a7d1b390bea090cd3809011fd5cb926851f2e2b',\n '0xAf77fB90baCE88edad8be674232C4a072BdC29A3', _nounce))\n_nounce += 1\nprint(transfer('0x603c7564035A8c0a7eB9a74a76113563ffdbD36F', 0.01,\n 'a49443970e8c717e218d312c0a7d1b390bea090cd3809011fd5cb926851f2e2b',\n '0xAf77fB90baCE88edad8be674232C4a072BdC29A3', _nounce))\n",
"<import token>\n<assignment token>\n\n\ndef _checking(_addr):\n \"\"\"\n ورودی تابع ک استرینگ است که چک میشمود ایا ادرس معبتری هست یا خیر\n\n false یا addrress درنهایت\n خارح میشود\n \"\"\"\n if not isinstance(_addr, str):\n print('ادرس بد وارد کردی باید یک استرینگ باشه')\n return False\n try:\n if not w3.isConnected():\n print('نت مشکل داره ')\n return False\n addr_ = Web3.toChecksumAddress(_addr)\n if not _addr:\n print('ادرس بدی وارد کردی شرمنده تم')\n return False\n return addr_\n except Exception as e:\n print(e)\n print('یه مشکلی وجود داره ×ـ× مثلا نتت ضعیفه')\n return False\n\n\ndef balance(_addr: str) ->float:\n \"\"\"\n اینجا ادرس خواسته رو به تابع بدید\n توی خروجی یه عدد میده که همون باقیمانده ی حسابش هستش :)\n \"\"\"\n addr_ = _checking(_addr)\n return float(w3.eth.get_balance(addr_) / 10 ** 18)\n\n\ndef transfer(_to_addr: str, _value: float, private_key: str, public_key:\n str, _nounce: int):\n to_addr_ = _checking(_to_addr)\n public_key = _checking(public_key)\n if to_addr_ and public_key:\n try:\n if balance(public_key) < _value:\n print('پول ت کمه ، نمیتونی کمک کنی ')\n return False\n p = w3.eth.gas_price\n trancation = {'from': public_key, 'to': to_addr_, 'gas':\n '0x200000', 'gasPrice': p, 'nonce': _nounce, 'value': int(\n _value * 10 ** 18)}\n raw_trx = w3.eth.account.privateKeyToAccount(private_key\n ).sign_transaction(trancation)\n res = w3.eth.send_raw_transaction(raw_trx.rawTransaction).hex()\n return res\n except Exception as e:\n print(e)\n print('یک اتفاقی افتاده که من نمیدونم ....')\n return 0\n\n\n<assignment token>\nprint(balance('0xAf77fB90baCE88edad8be674232C4a072BdC29A3'))\nprint(balance('0xAf77fB90baCE88edad8be674232C4a072BdC29A3'))\n<assignment token>\nprint(transfer('0x603c7564035A8c0a7eB9a74a76113563ffdbD36F', 0.01,\n 'a49443970e8c717e218d312c0a7d1b390bea090cd3809011fd5cb926851f2e2b',\n '0xAf77fB90baCE88edad8be674232C4a072BdC29A3', _nounce))\n_nounce += 1\nprint(transfer('0x603c7564035A8c0a7eB9a74a76113563ffdbD36F', 0.01,\n 'a49443970e8c717e218d312c0a7d1b390bea090cd3809011fd5cb926851f2e2b',\n '0xAf77fB90baCE88edad8be674232C4a072BdC29A3', _nounce))\n",
"<import token>\n<assignment token>\n\n\ndef _checking(_addr):\n \"\"\"\n ورودی تابع ک استرینگ است که چک میشمود ایا ادرس معبتری هست یا خیر\n\n false یا addrress درنهایت\n خارح میشود\n \"\"\"\n if not isinstance(_addr, str):\n print('ادرس بد وارد کردی باید یک استرینگ باشه')\n return False\n try:\n if not w3.isConnected():\n print('نت مشکل داره ')\n return False\n addr_ = Web3.toChecksumAddress(_addr)\n if not _addr:\n print('ادرس بدی وارد کردی شرمنده تم')\n return False\n return addr_\n except Exception as e:\n print(e)\n print('یه مشکلی وجود داره ×ـ× مثلا نتت ضعیفه')\n return False\n\n\ndef balance(_addr: str) ->float:\n \"\"\"\n اینجا ادرس خواسته رو به تابع بدید\n توی خروجی یه عدد میده که همون باقیمانده ی حسابش هستش :)\n \"\"\"\n addr_ = _checking(_addr)\n return float(w3.eth.get_balance(addr_) / 10 ** 18)\n\n\ndef transfer(_to_addr: str, _value: float, private_key: str, public_key:\n str, _nounce: int):\n to_addr_ = _checking(_to_addr)\n public_key = _checking(public_key)\n if to_addr_ and public_key:\n try:\n if balance(public_key) < _value:\n print('پول ت کمه ، نمیتونی کمک کنی ')\n return False\n p = w3.eth.gas_price\n trancation = {'from': public_key, 'to': to_addr_, 'gas':\n '0x200000', 'gasPrice': p, 'nonce': _nounce, 'value': int(\n _value * 10 ** 18)}\n raw_trx = w3.eth.account.privateKeyToAccount(private_key\n ).sign_transaction(trancation)\n res = w3.eth.send_raw_transaction(raw_trx.rawTransaction).hex()\n return res\n except Exception as e:\n print(e)\n print('یک اتفاقی افتاده که من نمیدونم ....')\n return 0\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef _checking(_addr):\n \"\"\"\n ورودی تابع ک استرینگ است که چک میشمود ایا ادرس معبتری هست یا خیر\n\n false یا addrress درنهایت\n خارح میشود\n \"\"\"\n if not isinstance(_addr, str):\n print('ادرس بد وارد کردی باید یک استرینگ باشه')\n return False\n try:\n if not w3.isConnected():\n print('نت مشکل داره ')\n return False\n addr_ = Web3.toChecksumAddress(_addr)\n if not _addr:\n print('ادرس بدی وارد کردی شرمنده تم')\n return False\n return addr_\n except Exception as e:\n print(e)\n print('یه مشکلی وجود داره ×ـ× مثلا نتت ضعیفه')\n return False\n\n\ndef balance(_addr: str) ->float:\n \"\"\"\n اینجا ادرس خواسته رو به تابع بدید\n توی خروجی یه عدد میده که همون باقیمانده ی حسابش هستش :)\n \"\"\"\n addr_ = _checking(_addr)\n return float(w3.eth.get_balance(addr_) / 10 ** 18)\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\ndef balance(_addr: str) ->float:\n \"\"\"\n اینجا ادرس خواسته رو به تابع بدید\n توی خروجی یه عدد میده که همون باقیمانده ی حسابش هستش :)\n \"\"\"\n addr_ = _checking(_addr)\n return float(w3.eth.get_balance(addr_) / 10 ** 18)\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,616 |
ac08434897508cf3c616d8904ed9ab2a265fa465
|
import pandas as pd
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split, KFold, cross_val_score
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
import pickle
# def train():
from sklearn.utils import shuffle
dataframe = pd.read_csv("../data/modelinputANN.csv") #read training data set
df = shuffle(dataframe)
#d = dataset.head()
#X = dataset.iloc[:, 0:2].values # first two cloumns are inputs for train model
# X = df.iloc[: , [1,2,3,5,6]].values
X = df.iloc[:, 0:7].values
print(X)
y = df.iloc[:, -1].values
print(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) #randomly select 20% as testing data set
sc = StandardScaler() # scale the data set
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# minmax = MinMaxScaler() # scale the data set
# X_train = minmax.fit_transform(X_train)
# X_test = minmax.transform(X_test)
classif = RandomForestClassifier(n_estimators=10, random_state=1) #train using random forest classifier
classif.fit(X_train, y_train)
y_pred = classif.predict(X_test) # predict the test data
# print(y_pred)
# print(y_test)
print('model accuracy: ',(accuracy_score(y_test, y_pred))*100)
# estimator = KerasClassifier(build_fn=classif, epochs=100, batch_size=30, verbose=0)
kfold = KFold(n_splits=10, shuffle=True, random_state=7)
results = cross_val_score(classif, X, y, cv=kfold)
print("Baseline: %.2f%% (%.2f%%)" % (results.mean() * 100, results.std() * 100))
cm = confusion_matrix(y_test, y_pred)
print(cm)
print(classification_report(y_test, y_pred))
|
[
"import pandas as pd\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split, KFold, cross_val_score\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score\nimport pickle\n\n# def train():\nfrom sklearn.utils import shuffle\n\ndataframe = pd.read_csv(\"../data/modelinputANN.csv\") #read training data set\ndf = shuffle(dataframe)\n#d = dataset.head()\n#X = dataset.iloc[:, 0:2].values # first two cloumns are inputs for train model\n# X = df.iloc[: , [1,2,3,5,6]].values\nX = df.iloc[:, 0:7].values\nprint(X)\ny = df.iloc[:, -1].values\nprint(y)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) #randomly select 20% as testing data set\n\nsc = StandardScaler() # scale the data set\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n# minmax = MinMaxScaler() # scale the data set\n# X_train = minmax.fit_transform(X_train)\n# X_test = minmax.transform(X_test)\n\nclassif = RandomForestClassifier(n_estimators=10, random_state=1) #train using random forest classifier\nclassif.fit(X_train, y_train)\ny_pred = classif.predict(X_test) # predict the test data\n\n# print(y_pred)\n# print(y_test)\nprint('model accuracy: ',(accuracy_score(y_test, y_pred))*100)\n\n# estimator = KerasClassifier(build_fn=classif, epochs=100, batch_size=30, verbose=0)\nkfold = KFold(n_splits=10, shuffle=True, random_state=7)\nresults = cross_val_score(classif, X, y, cv=kfold)\nprint(\"Baseline: %.2f%% (%.2f%%)\" % (results.mean() * 100, results.std() * 100))\n\ncm = confusion_matrix(y_test, y_pred)\nprint(cm)\nprint(classification_report(y_test, y_pred))",
"import pandas as pd\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split, KFold, cross_val_score\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score\nimport pickle\nfrom sklearn.utils import shuffle\ndataframe = pd.read_csv('../data/modelinputANN.csv')\ndf = shuffle(dataframe)\nX = df.iloc[:, 0:7].values\nprint(X)\ny = df.iloc[:, -1].values\nprint(y)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,\n random_state=0)\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\nclassif = RandomForestClassifier(n_estimators=10, random_state=1)\nclassif.fit(X_train, y_train)\ny_pred = classif.predict(X_test)\nprint('model accuracy: ', accuracy_score(y_test, y_pred) * 100)\nkfold = KFold(n_splits=10, shuffle=True, random_state=7)\nresults = cross_val_score(classif, X, y, cv=kfold)\nprint('Baseline: %.2f%% (%.2f%%)' % (results.mean() * 100, results.std() * 100)\n )\ncm = confusion_matrix(y_test, y_pred)\nprint(cm)\nprint(classification_report(y_test, y_pred))\n",
"<import token>\ndataframe = pd.read_csv('../data/modelinputANN.csv')\ndf = shuffle(dataframe)\nX = df.iloc[:, 0:7].values\nprint(X)\ny = df.iloc[:, -1].values\nprint(y)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,\n random_state=0)\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\nclassif = RandomForestClassifier(n_estimators=10, random_state=1)\nclassif.fit(X_train, y_train)\ny_pred = classif.predict(X_test)\nprint('model accuracy: ', accuracy_score(y_test, y_pred) * 100)\nkfold = KFold(n_splits=10, shuffle=True, random_state=7)\nresults = cross_val_score(classif, X, y, cv=kfold)\nprint('Baseline: %.2f%% (%.2f%%)' % (results.mean() * 100, results.std() * 100)\n )\ncm = confusion_matrix(y_test, y_pred)\nprint(cm)\nprint(classification_report(y_test, y_pred))\n",
"<import token>\n<assignment token>\nprint(X)\n<assignment token>\nprint(y)\n<assignment token>\nclassif.fit(X_train, y_train)\n<assignment token>\nprint('model accuracy: ', accuracy_score(y_test, y_pred) * 100)\n<assignment token>\nprint('Baseline: %.2f%% (%.2f%%)' % (results.mean() * 100, results.std() * 100)\n )\n<assignment token>\nprint(cm)\nprint(classification_report(y_test, y_pred))\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,617 |
19dfcab012627925b4dee14b4b717402e9e5a1a1
|
from django.test import TestCase
from .core.utils import *
# Create your tests here.
class UtilsTest(TestCase):
def test_get_files_in_dir(self):
path = 'F:\Project\hawk\data'
ret = Utility.get_files_in_dir(path)
self.assertIs(ret, False)
|
[
"from django.test import TestCase\nfrom .core.utils import *\n# Create your tests here.\n\nclass UtilsTest(TestCase):\n def test_get_files_in_dir(self):\n path = 'F:\\Project\\hawk\\data'\n ret = Utility.get_files_in_dir(path)\n self.assertIs(ret, False)",
"from django.test import TestCase\nfrom .core.utils import *\n\n\nclass UtilsTest(TestCase):\n\n def test_get_files_in_dir(self):\n path = 'F:\\\\Project\\\\hawk\\\\data'\n ret = Utility.get_files_in_dir(path)\n self.assertIs(ret, False)\n",
"<import token>\n\n\nclass UtilsTest(TestCase):\n\n def test_get_files_in_dir(self):\n path = 'F:\\\\Project\\\\hawk\\\\data'\n ret = Utility.get_files_in_dir(path)\n self.assertIs(ret, False)\n",
"<import token>\n\n\nclass UtilsTest(TestCase):\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
99,618 |
0b5a8b27ca53145e61af1dc565763a378ab8086b
|
import logging
import logging.handlers
from flask import Flask, render_template
from pymongo import MongoClient
app = Flask(__name__)
app.secret_key = 'something-secure'
# Logging
file_handler = logging.handlers.RotatingFileHandler(
'/var/log/app/app.log', mode='a+',
maxBytes=20480,
backupCount=5)
formatter = logging.Formatter("%(asctime)s - %(levelname)s: %(message)s",
"%Y-%m-%d %H:%M:%S")
file_handler.setFormatter(formatter)
app.logger.addHandler(file_handler)
@app.route('/')
def hello_world():
return 'Hello World!'
# When linking between containers is setup
# comment previous route and uncomment the one below.
# @app.route('/')
# def index():
# client = MongoClient('database_instance', 27017)
# db = client.test_database
# app_data = db.test_data.find()
#
# return render_template('index.html', app_data=app_data)
if __name__ == '__main__':
logging.getLogger().addHandler(logging.StreamHandler())
app.run(host='0.0.0.0')
|
[
"import logging\nimport logging.handlers\nfrom flask import Flask, render_template\nfrom pymongo import MongoClient\n\n\napp = Flask(__name__)\napp.secret_key = 'something-secure'\n\n# Logging\nfile_handler = logging.handlers.RotatingFileHandler(\n '/var/log/app/app.log', mode='a+',\n maxBytes=20480,\n backupCount=5)\n\nformatter = logging.Formatter(\"%(asctime)s - %(levelname)s: %(message)s\",\n \"%Y-%m-%d %H:%M:%S\")\nfile_handler.setFormatter(formatter)\n\napp.logger.addHandler(file_handler)\n\[email protected]('/')\ndef hello_world():\n return 'Hello World!'\n\n# When linking between containers is setup\n# comment previous route and uncomment the one below.\n# @app.route('/')\n# def index():\n# client = MongoClient('database_instance', 27017)\n# db = client.test_database\n# app_data = db.test_data.find()\n#\n# return render_template('index.html', app_data=app_data)\n\nif __name__ == '__main__':\n logging.getLogger().addHandler(logging.StreamHandler())\n app.run(host='0.0.0.0')\n",
"import logging\nimport logging.handlers\nfrom flask import Flask, render_template\nfrom pymongo import MongoClient\napp = Flask(__name__)\napp.secret_key = 'something-secure'\nfile_handler = logging.handlers.RotatingFileHandler('/var/log/app/app.log',\n mode='a+', maxBytes=20480, backupCount=5)\nformatter = logging.Formatter('%(asctime)s - %(levelname)s: %(message)s',\n '%Y-%m-%d %H:%M:%S')\nfile_handler.setFormatter(formatter)\napp.logger.addHandler(file_handler)\n\n\[email protected]('/')\ndef hello_world():\n return 'Hello World!'\n\n\nif __name__ == '__main__':\n logging.getLogger().addHandler(logging.StreamHandler())\n app.run(host='0.0.0.0')\n",
"<import token>\napp = Flask(__name__)\napp.secret_key = 'something-secure'\nfile_handler = logging.handlers.RotatingFileHandler('/var/log/app/app.log',\n mode='a+', maxBytes=20480, backupCount=5)\nformatter = logging.Formatter('%(asctime)s - %(levelname)s: %(message)s',\n '%Y-%m-%d %H:%M:%S')\nfile_handler.setFormatter(formatter)\napp.logger.addHandler(file_handler)\n\n\[email protected]('/')\ndef hello_world():\n return 'Hello World!'\n\n\nif __name__ == '__main__':\n logging.getLogger().addHandler(logging.StreamHandler())\n app.run(host='0.0.0.0')\n",
"<import token>\n<assignment token>\nfile_handler.setFormatter(formatter)\napp.logger.addHandler(file_handler)\n\n\[email protected]('/')\ndef hello_world():\n return 'Hello World!'\n\n\nif __name__ == '__main__':\n logging.getLogger().addHandler(logging.StreamHandler())\n app.run(host='0.0.0.0')\n",
"<import token>\n<assignment token>\n<code token>\n\n\[email protected]('/')\ndef hello_world():\n return 'Hello World!'\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n<code token>\n"
] | false |
99,619 |
70e1a70e9e99034b04d742d0c29fcff6d0e32a54
|
# 序列化:把内存的数据保存到本地,可以做到数据持久化
# 通用,可以序列化任何数据
import pickle
my_list = [{"name": "张三", "age": "20"}, {"name": "lili", "age": "15"}]
# 得到的数据是二进制数据,想要写入文件,文件的访问模式是”wb“
file = open("mylist.txt", "wb")
pickle.dump(my_list, file)
file.close()
# 反序列化:把文件中的数据读取出来,获得一个python对象
file = open("mylist.txt", "rb")
# 反序列化的操作
my_list = pickle.load(file)
print(my_list)
file.close()
class Student(object):
def __init__(self):
self.name = "张三"
self.age = 10
stu = Student()
file = open("stu.txt", "wb")
# 序列化自定义类型的对象(序列化对象,本质上就是序列化对象属性)
pickle.dump(stu, file)
file.close()
# 反序列化
file = open("stu.txt", "rb")
stu = pickle.load(file)
print(stu.name,stu.age)
# print(stu)
file.close()
|
[
"# 序列化:把内存的数据保存到本地,可以做到数据持久化\n# 通用,可以序列化任何数据\nimport pickle\n\nmy_list = [{\"name\": \"张三\", \"age\": \"20\"}, {\"name\": \"lili\", \"age\": \"15\"}]\n# 得到的数据是二进制数据,想要写入文件,文件的访问模式是”wb“\nfile = open(\"mylist.txt\", \"wb\")\npickle.dump(my_list, file)\nfile.close()\n\n# 反序列化:把文件中的数据读取出来,获得一个python对象\nfile = open(\"mylist.txt\", \"rb\")\n# 反序列化的操作\nmy_list = pickle.load(file)\nprint(my_list)\nfile.close()\n\n\nclass Student(object):\n def __init__(self):\n self.name = \"张三\"\n self.age = 10\n\n\nstu = Student()\nfile = open(\"stu.txt\", \"wb\")\n# 序列化自定义类型的对象(序列化对象,本质上就是序列化对象属性)\npickle.dump(stu, file)\nfile.close()\n# 反序列化\nfile = open(\"stu.txt\", \"rb\")\nstu = pickle.load(file)\nprint(stu.name,stu.age)\n# print(stu)\nfile.close()\n",
"import pickle\nmy_list = [{'name': '张三', 'age': '20'}, {'name': 'lili', 'age': '15'}]\nfile = open('mylist.txt', 'wb')\npickle.dump(my_list, file)\nfile.close()\nfile = open('mylist.txt', 'rb')\nmy_list = pickle.load(file)\nprint(my_list)\nfile.close()\n\n\nclass Student(object):\n\n def __init__(self):\n self.name = '张三'\n self.age = 10\n\n\nstu = Student()\nfile = open('stu.txt', 'wb')\npickle.dump(stu, file)\nfile.close()\nfile = open('stu.txt', 'rb')\nstu = pickle.load(file)\nprint(stu.name, stu.age)\nfile.close()\n",
"<import token>\nmy_list = [{'name': '张三', 'age': '20'}, {'name': 'lili', 'age': '15'}]\nfile = open('mylist.txt', 'wb')\npickle.dump(my_list, file)\nfile.close()\nfile = open('mylist.txt', 'rb')\nmy_list = pickle.load(file)\nprint(my_list)\nfile.close()\n\n\nclass Student(object):\n\n def __init__(self):\n self.name = '张三'\n self.age = 10\n\n\nstu = Student()\nfile = open('stu.txt', 'wb')\npickle.dump(stu, file)\nfile.close()\nfile = open('stu.txt', 'rb')\nstu = pickle.load(file)\nprint(stu.name, stu.age)\nfile.close()\n",
"<import token>\n<assignment token>\npickle.dump(my_list, file)\nfile.close()\n<assignment token>\nprint(my_list)\nfile.close()\n\n\nclass Student(object):\n\n def __init__(self):\n self.name = '张三'\n self.age = 10\n\n\n<assignment token>\npickle.dump(stu, file)\nfile.close()\n<assignment token>\nprint(stu.name, stu.age)\nfile.close()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Student(object):\n\n def __init__(self):\n self.name = '张三'\n self.age = 10\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Student(object):\n <function token>\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,620 |
f2e3c373614886cd76f959dda25310efd78a7ff3
|
from Game2048.Main import *
from Game2048.Visual_game import *
from Game2048.AI_versus import *
def Lauch_2048():
Game = Game_2048()
Game.main()
def Lauch_2048_demo():
AI_one = Game_2048()
AI_one.demo()
def Lauch_6561():
Game = Game_6561()
Game.main()
def Lauch_2048_visual():
Visual_game()
|
[
"from Game2048.Main import *\nfrom Game2048.Visual_game import *\nfrom Game2048.AI_versus import *\n\n\n\ndef Lauch_2048():\n Game = Game_2048()\n Game.main()\n\ndef Lauch_2048_demo():\n AI_one = Game_2048()\n AI_one.demo()\n\n\ndef Lauch_6561():\n Game = Game_6561()\n Game.main()\n\ndef Lauch_2048_visual():\n Visual_game()\n\n",
"from Game2048.Main import *\nfrom Game2048.Visual_game import *\nfrom Game2048.AI_versus import *\n\n\ndef Lauch_2048():\n Game = Game_2048()\n Game.main()\n\n\ndef Lauch_2048_demo():\n AI_one = Game_2048()\n AI_one.demo()\n\n\ndef Lauch_6561():\n Game = Game_6561()\n Game.main()\n\n\ndef Lauch_2048_visual():\n Visual_game()\n",
"<import token>\n\n\ndef Lauch_2048():\n Game = Game_2048()\n Game.main()\n\n\ndef Lauch_2048_demo():\n AI_one = Game_2048()\n AI_one.demo()\n\n\ndef Lauch_6561():\n Game = Game_6561()\n Game.main()\n\n\ndef Lauch_2048_visual():\n Visual_game()\n",
"<import token>\n<function token>\n\n\ndef Lauch_2048_demo():\n AI_one = Game_2048()\n AI_one.demo()\n\n\ndef Lauch_6561():\n Game = Game_6561()\n Game.main()\n\n\ndef Lauch_2048_visual():\n Visual_game()\n",
"<import token>\n<function token>\n<function token>\n\n\ndef Lauch_6561():\n Game = Game_6561()\n Game.main()\n\n\ndef Lauch_2048_visual():\n Visual_game()\n",
"<import token>\n<function token>\n<function token>\n\n\ndef Lauch_6561():\n Game = Game_6561()\n Game.main()\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,621 |
c7b9af87abe81f76475e404c3f150fe25ae3062e
|
#!/usr/bin/env python3
key = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z', '{', '}']
flag = ["-"]*38
for keys in key:
b = bytearray(open('eaxy', 'rb').read())
for i in range(len(b)):
b[i] ^= int(hex(ord(keys)),16)
if "The XOR key you used" in str(b):
for i in str(b).split("this is the ")[1:]:
flag[int(i[0:2])] = str(keys)
print("".join(flag))
|
[
"#!/usr/bin/env python3\n\nkey = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z', '{', '}']\n\nflag = [\"-\"]*38\nfor keys in key:\n b = bytearray(open('eaxy', 'rb').read())\n for i in range(len(b)):\n b[i] ^= int(hex(ord(keys)),16)\n if \"The XOR key you used\" in str(b):\n for i in str(b).split(\"this is the \")[1:]:\n flag[int(i[0:2])] = str(keys)\nprint(\"\".join(flag))\n",
"key = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd',\n 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r',\n 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '}']\nflag = ['-'] * 38\nfor keys in key:\n b = bytearray(open('eaxy', 'rb').read())\n for i in range(len(b)):\n b[i] ^= int(hex(ord(keys)), 16)\n if 'The XOR key you used' in str(b):\n for i in str(b).split('this is the ')[1:]:\n flag[int(i[0:2])] = str(keys)\nprint(''.join(flag))\n",
"<assignment token>\nfor keys in key:\n b = bytearray(open('eaxy', 'rb').read())\n for i in range(len(b)):\n b[i] ^= int(hex(ord(keys)), 16)\n if 'The XOR key you used' in str(b):\n for i in str(b).split('this is the ')[1:]:\n flag[int(i[0:2])] = str(keys)\nprint(''.join(flag))\n",
"<assignment token>\n<code token>\n"
] | false |
99,622 |
9f1348ad1fb2266cad680a6be1c3118a50bddda2
|
# Extensions.
from io import BytesIO
import re
from lxml import etree
from stellated.XsltExtensions import *
from django.conf import settings
import smartypants
def wrapit(fn):
""" lxml extensions have a first dummy arg that Pyana extensions don't. Adapt.
"""
def inside(dummy, *args):
try:
return fn(*args)
except Exception as e:
print("Error in XSLT extension: %s" % e)
raise
return inside
ns = etree.FunctionNamespace('http://www.stellated.com/xuff')
ns['makeuri'] = wrapit(makeuri)
ns['now'] = wrapit(now8601)
ns['idfromtext'] = wrapit(idfromtext)
ns['lexcode'] = wrapit(lexcode)
ns['imgwidth'] = wrapit(imgwidth)
ns['imgheight'] = wrapit(imgheight)
#ns['endswith'] = wrapit(endswith)
#ns['urlquote'] = wrapit(urlquote)
#ns['phpquote'] = wrapit(phpquote)
#ns['w3cdtf'] = wrapit(w3cdtf)
#ns['slugfromtext'] = wrapit(slugfromtext)
#ns['smartypants'] = wrapit(smartypants.smartypants)
def thing_from_path(path):
from djstell.pages.models import Article, Entry
try:
thing = Article.objects.get(path=path[0])
except Article.DoesNotExist:
try:
thing = Entry.all_entries.get(path=path[0])
except Entry.DoesNotExist:
# $set_env.py: STELL_MISSING_OK - Don't complain if a pref is missing.
if os.environ.get("STELL_MISSING_OK", ""):
class Fake(object):
def __init__(self):
self.title = "MISSING PAGE"
def permaurl(self):
return "/text/missing.html"
return Fake()
else:
raise Exception(f"Couldn't find thing_from_path({path=})")
return thing
def pathtitle(path):
""" Return the title of a page at a given path.
"""
return thing_from_path(path).title
ns['pathtitle'] = wrapit(pathtitle)
def permaurl(path):
try:
return thing_from_path(path).permaurl()
except Exception as exc:
print("Couldn't get permaurl({!r}): {}".format(path, exc))
raise
ns['permaurl'] = wrapit(permaurl)
def imgvariant(path, var):
assert var == "webp"
if ":" in path:
return ""
elif path.endswith(".gif"):
# Animated gifs don't currently convert to webp, so don't try.
return ""
else:
if path.startswith(settings.BASE):
prefix = settings.BASE
path = path.removeprefix(settings.BASE)
else:
prefix = ""
path, quest, query = path.partition("?")
return f"{prefix}/iv/{var}/{path.lstrip('/')}.webp{quest}{query}"
ns['imgvariant'] = wrapit(imgvariant)
# The transform from xml to html for content.
XSLT_XFORM = None
def content_transform(name, xmltext, child=None, params={}):
#print("XSLT: %.80s(%s) %r" % (xmltext.replace('\n', ' '), child or '-', params.get('blogmode', '')))
global XSLT_XFORM
if XSLT_XFORM is None:
XSLT_XFORM = etree.XSLT(etree.parse("content.xslt"))
f = BytesIO(xmltext.encode('utf-8'))
try:
doc = etree.parse(f)
except:
print("Text was {!r}".format(xmltext))
raise
if child:
doc = doc.find(child)
params = dict(params)
params.update({
'base': string_param(settings.BASE),
})
html = str(XSLT_XFORM(doc, **params))
# smartypants doesn't handle </a>' properly.
html = re.sub(r"(</\w+>)'", r"\1’", html)
html = smartypants.smartypants(html, smartypants.Attr.q | smartypants.Attr.n)
# <source> tags should be self-closing, but I don't know how to make XSLT
# do that for me.
html = html.replace("></source>", ">")
#print("Transformed {!r} into {!r}".format(xmltext[:80], html[:80]))
for entry in XSLT_XFORM.error_log:
if entry.filename == '<string>':
fname = name
else:
fname = entry.filename
print(f"XSLT Message: {fname} @ {entry.line}: {entry.message}")
return html
def string_param(s):
# Convert the string to an XSLT string literal. There's no escaping of characters
# in XPath string literals, so we have to break strings if they have both
# single- and double-quotes.
#
# The string:
# What's a "blog"?
# comes out as:
# "What's a "+'"'+"blog"+'"'+"?"
parts = s.split('"')
return "+'\"'+".join([ repr(str(p).encode('utf-8'))[1:] for p in parts ])
|
[
"# Extensions.\n\nfrom io import BytesIO\nimport re\n\nfrom lxml import etree\nfrom stellated.XsltExtensions import *\nfrom django.conf import settings\nimport smartypants\n\n\ndef wrapit(fn):\n \"\"\" lxml extensions have a first dummy arg that Pyana extensions don't. Adapt.\n \"\"\"\n def inside(dummy, *args):\n try:\n return fn(*args)\n except Exception as e:\n print(\"Error in XSLT extension: %s\" % e)\n raise\n return inside\n\nns = etree.FunctionNamespace('http://www.stellated.com/xuff')\nns['makeuri'] = wrapit(makeuri)\nns['now'] = wrapit(now8601)\nns['idfromtext'] = wrapit(idfromtext)\nns['lexcode'] = wrapit(lexcode)\nns['imgwidth'] = wrapit(imgwidth)\nns['imgheight'] = wrapit(imgheight)\n\n#ns['endswith'] = wrapit(endswith)\n#ns['urlquote'] = wrapit(urlquote)\n#ns['phpquote'] = wrapit(phpquote)\n#ns['w3cdtf'] = wrapit(w3cdtf)\n#ns['slugfromtext'] = wrapit(slugfromtext)\n#ns['smartypants'] = wrapit(smartypants.smartypants)\n\ndef thing_from_path(path):\n from djstell.pages.models import Article, Entry\n try:\n thing = Article.objects.get(path=path[0])\n except Article.DoesNotExist:\n try:\n thing = Entry.all_entries.get(path=path[0])\n except Entry.DoesNotExist:\n # $set_env.py: STELL_MISSING_OK - Don't complain if a pref is missing.\n if os.environ.get(\"STELL_MISSING_OK\", \"\"):\n class Fake(object):\n def __init__(self):\n self.title = \"MISSING PAGE\"\n def permaurl(self):\n return \"/text/missing.html\"\n return Fake()\n else:\n raise Exception(f\"Couldn't find thing_from_path({path=})\")\n return thing\n\ndef pathtitle(path):\n \"\"\" Return the title of a page at a given path.\n \"\"\"\n return thing_from_path(path).title\n\nns['pathtitle'] = wrapit(pathtitle)\n\ndef permaurl(path):\n try:\n return thing_from_path(path).permaurl()\n except Exception as exc:\n print(\"Couldn't get permaurl({!r}): {}\".format(path, exc))\n raise\n\nns['permaurl'] = wrapit(permaurl)\n\ndef imgvariant(path, var):\n assert var == \"webp\"\n if \":\" in path:\n return \"\"\n elif path.endswith(\".gif\"):\n # Animated gifs don't currently convert to webp, so don't try.\n return \"\"\n else:\n if path.startswith(settings.BASE):\n prefix = settings.BASE\n path = path.removeprefix(settings.BASE)\n else:\n prefix = \"\"\n path, quest, query = path.partition(\"?\")\n return f\"{prefix}/iv/{var}/{path.lstrip('/')}.webp{quest}{query}\"\n\nns['imgvariant'] = wrapit(imgvariant)\n\n# The transform from xml to html for content.\nXSLT_XFORM = None\n\ndef content_transform(name, xmltext, child=None, params={}):\n #print(\"XSLT: %.80s(%s) %r\" % (xmltext.replace('\\n', ' '), child or '-', params.get('blogmode', '')))\n global XSLT_XFORM\n if XSLT_XFORM is None:\n XSLT_XFORM = etree.XSLT(etree.parse(\"content.xslt\"))\n\n f = BytesIO(xmltext.encode('utf-8'))\n try:\n doc = etree.parse(f)\n except:\n print(\"Text was {!r}\".format(xmltext))\n raise\n if child:\n doc = doc.find(child)\n params = dict(params)\n params.update({\n 'base': string_param(settings.BASE),\n })\n html = str(XSLT_XFORM(doc, **params))\n # smartypants doesn't handle </a>' properly.\n html = re.sub(r\"(</\\w+>)'\", r\"\\1’\", html)\n html = smartypants.smartypants(html, smartypants.Attr.q | smartypants.Attr.n)\n # <source> tags should be self-closing, but I don't know how to make XSLT\n # do that for me.\n html = html.replace(\"></source>\", \">\")\n #print(\"Transformed {!r} into {!r}\".format(xmltext[:80], html[:80]))\n for entry in XSLT_XFORM.error_log:\n if entry.filename == '<string>':\n fname = name\n else:\n fname = entry.filename\n print(f\"XSLT Message: {fname} @ {entry.line}: {entry.message}\")\n return html\n\ndef string_param(s):\n # Convert the string to an XSLT string literal. There's no escaping of characters\n # in XPath string literals, so we have to break strings if they have both\n # single- and double-quotes.\n #\n # The string:\n # What's a \"blog\"?\n # comes out as:\n # \"What's a \"+'\"'+\"blog\"+'\"'+\"?\"\n parts = s.split('\"')\n return \"+'\\\"'+\".join([ repr(str(p).encode('utf-8'))[1:] for p in parts ])\n",
"from io import BytesIO\nimport re\nfrom lxml import etree\nfrom stellated.XsltExtensions import *\nfrom django.conf import settings\nimport smartypants\n\n\ndef wrapit(fn):\n \"\"\" lxml extensions have a first dummy arg that Pyana extensions don't. Adapt.\n \"\"\"\n\n def inside(dummy, *args):\n try:\n return fn(*args)\n except Exception as e:\n print('Error in XSLT extension: %s' % e)\n raise\n return inside\n\n\nns = etree.FunctionNamespace('http://www.stellated.com/xuff')\nns['makeuri'] = wrapit(makeuri)\nns['now'] = wrapit(now8601)\nns['idfromtext'] = wrapit(idfromtext)\nns['lexcode'] = wrapit(lexcode)\nns['imgwidth'] = wrapit(imgwidth)\nns['imgheight'] = wrapit(imgheight)\n\n\ndef thing_from_path(path):\n from djstell.pages.models import Article, Entry\n try:\n thing = Article.objects.get(path=path[0])\n except Article.DoesNotExist:\n try:\n thing = Entry.all_entries.get(path=path[0])\n except Entry.DoesNotExist:\n if os.environ.get('STELL_MISSING_OK', ''):\n\n\n class Fake(object):\n\n def __init__(self):\n self.title = 'MISSING PAGE'\n\n def permaurl(self):\n return '/text/missing.html'\n return Fake()\n else:\n raise Exception(f\"Couldn't find thing_from_path(path={path!r})\"\n )\n return thing\n\n\ndef pathtitle(path):\n \"\"\" Return the title of a page at a given path.\n \"\"\"\n return thing_from_path(path).title\n\n\nns['pathtitle'] = wrapit(pathtitle)\n\n\ndef permaurl(path):\n try:\n return thing_from_path(path).permaurl()\n except Exception as exc:\n print(\"Couldn't get permaurl({!r}): {}\".format(path, exc))\n raise\n\n\nns['permaurl'] = wrapit(permaurl)\n\n\ndef imgvariant(path, var):\n assert var == 'webp'\n if ':' in path:\n return ''\n elif path.endswith('.gif'):\n return ''\n else:\n if path.startswith(settings.BASE):\n prefix = settings.BASE\n path = path.removeprefix(settings.BASE)\n else:\n prefix = ''\n path, quest, query = path.partition('?')\n return f\"{prefix}/iv/{var}/{path.lstrip('/')}.webp{quest}{query}\"\n\n\nns['imgvariant'] = wrapit(imgvariant)\nXSLT_XFORM = None\n\n\ndef content_transform(name, xmltext, child=None, params={}):\n global XSLT_XFORM\n if XSLT_XFORM is None:\n XSLT_XFORM = etree.XSLT(etree.parse('content.xslt'))\n f = BytesIO(xmltext.encode('utf-8'))\n try:\n doc = etree.parse(f)\n except:\n print('Text was {!r}'.format(xmltext))\n raise\n if child:\n doc = doc.find(child)\n params = dict(params)\n params.update({'base': string_param(settings.BASE)})\n html = str(XSLT_XFORM(doc, **params))\n html = re.sub(\"(</\\\\w+>)'\", '\\\\1’', html)\n html = smartypants.smartypants(html, smartypants.Attr.q | smartypants.\n Attr.n)\n html = html.replace('></source>', '>')\n for entry in XSLT_XFORM.error_log:\n if entry.filename == '<string>':\n fname = name\n else:\n fname = entry.filename\n print(f'XSLT Message: {fname} @ {entry.line}: {entry.message}')\n return html\n\n\ndef string_param(s):\n parts = s.split('\"')\n return '+\\'\"\\'+'.join([repr(str(p).encode('utf-8'))[1:] for p in parts])\n",
"<import token>\n\n\ndef wrapit(fn):\n \"\"\" lxml extensions have a first dummy arg that Pyana extensions don't. Adapt.\n \"\"\"\n\n def inside(dummy, *args):\n try:\n return fn(*args)\n except Exception as e:\n print('Error in XSLT extension: %s' % e)\n raise\n return inside\n\n\nns = etree.FunctionNamespace('http://www.stellated.com/xuff')\nns['makeuri'] = wrapit(makeuri)\nns['now'] = wrapit(now8601)\nns['idfromtext'] = wrapit(idfromtext)\nns['lexcode'] = wrapit(lexcode)\nns['imgwidth'] = wrapit(imgwidth)\nns['imgheight'] = wrapit(imgheight)\n\n\ndef thing_from_path(path):\n from djstell.pages.models import Article, Entry\n try:\n thing = Article.objects.get(path=path[0])\n except Article.DoesNotExist:\n try:\n thing = Entry.all_entries.get(path=path[0])\n except Entry.DoesNotExist:\n if os.environ.get('STELL_MISSING_OK', ''):\n\n\n class Fake(object):\n\n def __init__(self):\n self.title = 'MISSING PAGE'\n\n def permaurl(self):\n return '/text/missing.html'\n return Fake()\n else:\n raise Exception(f\"Couldn't find thing_from_path(path={path!r})\"\n )\n return thing\n\n\ndef pathtitle(path):\n \"\"\" Return the title of a page at a given path.\n \"\"\"\n return thing_from_path(path).title\n\n\nns['pathtitle'] = wrapit(pathtitle)\n\n\ndef permaurl(path):\n try:\n return thing_from_path(path).permaurl()\n except Exception as exc:\n print(\"Couldn't get permaurl({!r}): {}\".format(path, exc))\n raise\n\n\nns['permaurl'] = wrapit(permaurl)\n\n\ndef imgvariant(path, var):\n assert var == 'webp'\n if ':' in path:\n return ''\n elif path.endswith('.gif'):\n return ''\n else:\n if path.startswith(settings.BASE):\n prefix = settings.BASE\n path = path.removeprefix(settings.BASE)\n else:\n prefix = ''\n path, quest, query = path.partition('?')\n return f\"{prefix}/iv/{var}/{path.lstrip('/')}.webp{quest}{query}\"\n\n\nns['imgvariant'] = wrapit(imgvariant)\nXSLT_XFORM = None\n\n\ndef content_transform(name, xmltext, child=None, params={}):\n global XSLT_XFORM\n if XSLT_XFORM is None:\n XSLT_XFORM = etree.XSLT(etree.parse('content.xslt'))\n f = BytesIO(xmltext.encode('utf-8'))\n try:\n doc = etree.parse(f)\n except:\n print('Text was {!r}'.format(xmltext))\n raise\n if child:\n doc = doc.find(child)\n params = dict(params)\n params.update({'base': string_param(settings.BASE)})\n html = str(XSLT_XFORM(doc, **params))\n html = re.sub(\"(</\\\\w+>)'\", '\\\\1’', html)\n html = smartypants.smartypants(html, smartypants.Attr.q | smartypants.\n Attr.n)\n html = html.replace('></source>', '>')\n for entry in XSLT_XFORM.error_log:\n if entry.filename == '<string>':\n fname = name\n else:\n fname = entry.filename\n print(f'XSLT Message: {fname} @ {entry.line}: {entry.message}')\n return html\n\n\ndef string_param(s):\n parts = s.split('\"')\n return '+\\'\"\\'+'.join([repr(str(p).encode('utf-8'))[1:] for p in parts])\n",
"<import token>\n\n\ndef wrapit(fn):\n \"\"\" lxml extensions have a first dummy arg that Pyana extensions don't. Adapt.\n \"\"\"\n\n def inside(dummy, *args):\n try:\n return fn(*args)\n except Exception as e:\n print('Error in XSLT extension: %s' % e)\n raise\n return inside\n\n\n<assignment token>\n\n\ndef thing_from_path(path):\n from djstell.pages.models import Article, Entry\n try:\n thing = Article.objects.get(path=path[0])\n except Article.DoesNotExist:\n try:\n thing = Entry.all_entries.get(path=path[0])\n except Entry.DoesNotExist:\n if os.environ.get('STELL_MISSING_OK', ''):\n\n\n class Fake(object):\n\n def __init__(self):\n self.title = 'MISSING PAGE'\n\n def permaurl(self):\n return '/text/missing.html'\n return Fake()\n else:\n raise Exception(f\"Couldn't find thing_from_path(path={path!r})\"\n )\n return thing\n\n\ndef pathtitle(path):\n \"\"\" Return the title of a page at a given path.\n \"\"\"\n return thing_from_path(path).title\n\n\n<assignment token>\n\n\ndef permaurl(path):\n try:\n return thing_from_path(path).permaurl()\n except Exception as exc:\n print(\"Couldn't get permaurl({!r}): {}\".format(path, exc))\n raise\n\n\n<assignment token>\n\n\ndef imgvariant(path, var):\n assert var == 'webp'\n if ':' in path:\n return ''\n elif path.endswith('.gif'):\n return ''\n else:\n if path.startswith(settings.BASE):\n prefix = settings.BASE\n path = path.removeprefix(settings.BASE)\n else:\n prefix = ''\n path, quest, query = path.partition('?')\n return f\"{prefix}/iv/{var}/{path.lstrip('/')}.webp{quest}{query}\"\n\n\n<assignment token>\n\n\ndef content_transform(name, xmltext, child=None, params={}):\n global XSLT_XFORM\n if XSLT_XFORM is None:\n XSLT_XFORM = etree.XSLT(etree.parse('content.xslt'))\n f = BytesIO(xmltext.encode('utf-8'))\n try:\n doc = etree.parse(f)\n except:\n print('Text was {!r}'.format(xmltext))\n raise\n if child:\n doc = doc.find(child)\n params = dict(params)\n params.update({'base': string_param(settings.BASE)})\n html = str(XSLT_XFORM(doc, **params))\n html = re.sub(\"(</\\\\w+>)'\", '\\\\1’', html)\n html = smartypants.smartypants(html, smartypants.Attr.q | smartypants.\n Attr.n)\n html = html.replace('></source>', '>')\n for entry in XSLT_XFORM.error_log:\n if entry.filename == '<string>':\n fname = name\n else:\n fname = entry.filename\n print(f'XSLT Message: {fname} @ {entry.line}: {entry.message}')\n return html\n\n\ndef string_param(s):\n parts = s.split('\"')\n return '+\\'\"\\'+'.join([repr(str(p).encode('utf-8'))[1:] for p in parts])\n",
"<import token>\n\n\ndef wrapit(fn):\n \"\"\" lxml extensions have a first dummy arg that Pyana extensions don't. Adapt.\n \"\"\"\n\n def inside(dummy, *args):\n try:\n return fn(*args)\n except Exception as e:\n print('Error in XSLT extension: %s' % e)\n raise\n return inside\n\n\n<assignment token>\n\n\ndef thing_from_path(path):\n from djstell.pages.models import Article, Entry\n try:\n thing = Article.objects.get(path=path[0])\n except Article.DoesNotExist:\n try:\n thing = Entry.all_entries.get(path=path[0])\n except Entry.DoesNotExist:\n if os.environ.get('STELL_MISSING_OK', ''):\n\n\n class Fake(object):\n\n def __init__(self):\n self.title = 'MISSING PAGE'\n\n def permaurl(self):\n return '/text/missing.html'\n return Fake()\n else:\n raise Exception(f\"Couldn't find thing_from_path(path={path!r})\"\n )\n return thing\n\n\n<function token>\n<assignment token>\n\n\ndef permaurl(path):\n try:\n return thing_from_path(path).permaurl()\n except Exception as exc:\n print(\"Couldn't get permaurl({!r}): {}\".format(path, exc))\n raise\n\n\n<assignment token>\n\n\ndef imgvariant(path, var):\n assert var == 'webp'\n if ':' in path:\n return ''\n elif path.endswith('.gif'):\n return ''\n else:\n if path.startswith(settings.BASE):\n prefix = settings.BASE\n path = path.removeprefix(settings.BASE)\n else:\n prefix = ''\n path, quest, query = path.partition('?')\n return f\"{prefix}/iv/{var}/{path.lstrip('/')}.webp{quest}{query}\"\n\n\n<assignment token>\n\n\ndef content_transform(name, xmltext, child=None, params={}):\n global XSLT_XFORM\n if XSLT_XFORM is None:\n XSLT_XFORM = etree.XSLT(etree.parse('content.xslt'))\n f = BytesIO(xmltext.encode('utf-8'))\n try:\n doc = etree.parse(f)\n except:\n print('Text was {!r}'.format(xmltext))\n raise\n if child:\n doc = doc.find(child)\n params = dict(params)\n params.update({'base': string_param(settings.BASE)})\n html = str(XSLT_XFORM(doc, **params))\n html = re.sub(\"(</\\\\w+>)'\", '\\\\1’', html)\n html = smartypants.smartypants(html, smartypants.Attr.q | smartypants.\n Attr.n)\n html = html.replace('></source>', '>')\n for entry in XSLT_XFORM.error_log:\n if entry.filename == '<string>':\n fname = name\n else:\n fname = entry.filename\n print(f'XSLT Message: {fname} @ {entry.line}: {entry.message}')\n return html\n\n\ndef string_param(s):\n parts = s.split('\"')\n return '+\\'\"\\'+'.join([repr(str(p).encode('utf-8'))[1:] for p in parts])\n",
"<import token>\n<function token>\n<assignment token>\n\n\ndef thing_from_path(path):\n from djstell.pages.models import Article, Entry\n try:\n thing = Article.objects.get(path=path[0])\n except Article.DoesNotExist:\n try:\n thing = Entry.all_entries.get(path=path[0])\n except Entry.DoesNotExist:\n if os.environ.get('STELL_MISSING_OK', ''):\n\n\n class Fake(object):\n\n def __init__(self):\n self.title = 'MISSING PAGE'\n\n def permaurl(self):\n return '/text/missing.html'\n return Fake()\n else:\n raise Exception(f\"Couldn't find thing_from_path(path={path!r})\"\n )\n return thing\n\n\n<function token>\n<assignment token>\n\n\ndef permaurl(path):\n try:\n return thing_from_path(path).permaurl()\n except Exception as exc:\n print(\"Couldn't get permaurl({!r}): {}\".format(path, exc))\n raise\n\n\n<assignment token>\n\n\ndef imgvariant(path, var):\n assert var == 'webp'\n if ':' in path:\n return ''\n elif path.endswith('.gif'):\n return ''\n else:\n if path.startswith(settings.BASE):\n prefix = settings.BASE\n path = path.removeprefix(settings.BASE)\n else:\n prefix = ''\n path, quest, query = path.partition('?')\n return f\"{prefix}/iv/{var}/{path.lstrip('/')}.webp{quest}{query}\"\n\n\n<assignment token>\n\n\ndef content_transform(name, xmltext, child=None, params={}):\n global XSLT_XFORM\n if XSLT_XFORM is None:\n XSLT_XFORM = etree.XSLT(etree.parse('content.xslt'))\n f = BytesIO(xmltext.encode('utf-8'))\n try:\n doc = etree.parse(f)\n except:\n print('Text was {!r}'.format(xmltext))\n raise\n if child:\n doc = doc.find(child)\n params = dict(params)\n params.update({'base': string_param(settings.BASE)})\n html = str(XSLT_XFORM(doc, **params))\n html = re.sub(\"(</\\\\w+>)'\", '\\\\1’', html)\n html = smartypants.smartypants(html, smartypants.Attr.q | smartypants.\n Attr.n)\n html = html.replace('></source>', '>')\n for entry in XSLT_XFORM.error_log:\n if entry.filename == '<string>':\n fname = name\n else:\n fname = entry.filename\n print(f'XSLT Message: {fname} @ {entry.line}: {entry.message}')\n return html\n\n\ndef string_param(s):\n parts = s.split('\"')\n return '+\\'\"\\'+'.join([repr(str(p).encode('utf-8'))[1:] for p in parts])\n",
"<import token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<assignment token>\n\n\ndef permaurl(path):\n try:\n return thing_from_path(path).permaurl()\n except Exception as exc:\n print(\"Couldn't get permaurl({!r}): {}\".format(path, exc))\n raise\n\n\n<assignment token>\n\n\ndef imgvariant(path, var):\n assert var == 'webp'\n if ':' in path:\n return ''\n elif path.endswith('.gif'):\n return ''\n else:\n if path.startswith(settings.BASE):\n prefix = settings.BASE\n path = path.removeprefix(settings.BASE)\n else:\n prefix = ''\n path, quest, query = path.partition('?')\n return f\"{prefix}/iv/{var}/{path.lstrip('/')}.webp{quest}{query}\"\n\n\n<assignment token>\n\n\ndef content_transform(name, xmltext, child=None, params={}):\n global XSLT_XFORM\n if XSLT_XFORM is None:\n XSLT_XFORM = etree.XSLT(etree.parse('content.xslt'))\n f = BytesIO(xmltext.encode('utf-8'))\n try:\n doc = etree.parse(f)\n except:\n print('Text was {!r}'.format(xmltext))\n raise\n if child:\n doc = doc.find(child)\n params = dict(params)\n params.update({'base': string_param(settings.BASE)})\n html = str(XSLT_XFORM(doc, **params))\n html = re.sub(\"(</\\\\w+>)'\", '\\\\1’', html)\n html = smartypants.smartypants(html, smartypants.Attr.q | smartypants.\n Attr.n)\n html = html.replace('></source>', '>')\n for entry in XSLT_XFORM.error_log:\n if entry.filename == '<string>':\n fname = name\n else:\n fname = entry.filename\n print(f'XSLT Message: {fname} @ {entry.line}: {entry.message}')\n return html\n\n\ndef string_param(s):\n parts = s.split('\"')\n return '+\\'\"\\'+'.join([repr(str(p).encode('utf-8'))[1:] for p in parts])\n",
"<import token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<assignment token>\n\n\ndef permaurl(path):\n try:\n return thing_from_path(path).permaurl()\n except Exception as exc:\n print(\"Couldn't get permaurl({!r}): {}\".format(path, exc))\n raise\n\n\n<assignment token>\n\n\ndef imgvariant(path, var):\n assert var == 'webp'\n if ':' in path:\n return ''\n elif path.endswith('.gif'):\n return ''\n else:\n if path.startswith(settings.BASE):\n prefix = settings.BASE\n path = path.removeprefix(settings.BASE)\n else:\n prefix = ''\n path, quest, query = path.partition('?')\n return f\"{prefix}/iv/{var}/{path.lstrip('/')}.webp{quest}{query}\"\n\n\n<assignment token>\n\n\ndef content_transform(name, xmltext, child=None, params={}):\n global XSLT_XFORM\n if XSLT_XFORM is None:\n XSLT_XFORM = etree.XSLT(etree.parse('content.xslt'))\n f = BytesIO(xmltext.encode('utf-8'))\n try:\n doc = etree.parse(f)\n except:\n print('Text was {!r}'.format(xmltext))\n raise\n if child:\n doc = doc.find(child)\n params = dict(params)\n params.update({'base': string_param(settings.BASE)})\n html = str(XSLT_XFORM(doc, **params))\n html = re.sub(\"(</\\\\w+>)'\", '\\\\1’', html)\n html = smartypants.smartypants(html, smartypants.Attr.q | smartypants.\n Attr.n)\n html = html.replace('></source>', '>')\n for entry in XSLT_XFORM.error_log:\n if entry.filename == '<string>':\n fname = name\n else:\n fname = entry.filename\n print(f'XSLT Message: {fname} @ {entry.line}: {entry.message}')\n return html\n\n\n<function token>\n",
"<import token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<assignment token>\n\n\ndef imgvariant(path, var):\n assert var == 'webp'\n if ':' in path:\n return ''\n elif path.endswith('.gif'):\n return ''\n else:\n if path.startswith(settings.BASE):\n prefix = settings.BASE\n path = path.removeprefix(settings.BASE)\n else:\n prefix = ''\n path, quest, query = path.partition('?')\n return f\"{prefix}/iv/{var}/{path.lstrip('/')}.webp{quest}{query}\"\n\n\n<assignment token>\n\n\ndef content_transform(name, xmltext, child=None, params={}):\n global XSLT_XFORM\n if XSLT_XFORM is None:\n XSLT_XFORM = etree.XSLT(etree.parse('content.xslt'))\n f = BytesIO(xmltext.encode('utf-8'))\n try:\n doc = etree.parse(f)\n except:\n print('Text was {!r}'.format(xmltext))\n raise\n if child:\n doc = doc.find(child)\n params = dict(params)\n params.update({'base': string_param(settings.BASE)})\n html = str(XSLT_XFORM(doc, **params))\n html = re.sub(\"(</\\\\w+>)'\", '\\\\1’', html)\n html = smartypants.smartypants(html, smartypants.Attr.q | smartypants.\n Attr.n)\n html = html.replace('></source>', '>')\n for entry in XSLT_XFORM.error_log:\n if entry.filename == '<string>':\n fname = name\n else:\n fname = entry.filename\n print(f'XSLT Message: {fname} @ {entry.line}: {entry.message}')\n return html\n\n\n<function token>\n",
"<import token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<assignment token>\n<function token>\n<assignment token>\n\n\ndef content_transform(name, xmltext, child=None, params={}):\n global XSLT_XFORM\n if XSLT_XFORM is None:\n XSLT_XFORM = etree.XSLT(etree.parse('content.xslt'))\n f = BytesIO(xmltext.encode('utf-8'))\n try:\n doc = etree.parse(f)\n except:\n print('Text was {!r}'.format(xmltext))\n raise\n if child:\n doc = doc.find(child)\n params = dict(params)\n params.update({'base': string_param(settings.BASE)})\n html = str(XSLT_XFORM(doc, **params))\n html = re.sub(\"(</\\\\w+>)'\", '\\\\1’', html)\n html = smartypants.smartypants(html, smartypants.Attr.q | smartypants.\n Attr.n)\n html = html.replace('></source>', '>')\n for entry in XSLT_XFORM.error_log:\n if entry.filename == '<string>':\n fname = name\n else:\n fname = entry.filename\n print(f'XSLT Message: {fname} @ {entry.line}: {entry.message}')\n return html\n\n\n<function token>\n",
"<import token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<assignment token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n"
] | false |
99,623 |
34828d54ebe2ca7ab2c6cbdf92d123b7a71b2b83
|
def check_primes(limit):
for n in range(2, limit + 1):
check_if_prime(n)
## Check if prime number
def check_if_prime(n):
for x in range(2, n):
if n % x == 0:
print(f'{n} is equal to {x} * {n//x}')
break
else:
print(f'{n} is a prime number')
check_primes(100)
|
[
"def check_primes(limit):\n for n in range(2, limit + 1):\n check_if_prime(n)\n\n## Check if prime number\ndef check_if_prime(n):\n for x in range(2, n):\n if n % x == 0:\n print(f'{n} is equal to {x} * {n//x}')\n break\n else:\n print(f'{n} is a prime number')\n\ncheck_primes(100)\n",
"def check_primes(limit):\n for n in range(2, limit + 1):\n check_if_prime(n)\n\n\ndef check_if_prime(n):\n for x in range(2, n):\n if n % x == 0:\n print(f'{n} is equal to {x} * {n // x}')\n break\n else:\n print(f'{n} is a prime number')\n\n\ncheck_primes(100)\n",
"def check_primes(limit):\n for n in range(2, limit + 1):\n check_if_prime(n)\n\n\ndef check_if_prime(n):\n for x in range(2, n):\n if n % x == 0:\n print(f'{n} is equal to {x} * {n // x}')\n break\n else:\n print(f'{n} is a prime number')\n\n\n<code token>\n",
"def check_primes(limit):\n for n in range(2, limit + 1):\n check_if_prime(n)\n\n\n<function token>\n<code token>\n",
"<function token>\n<function token>\n<code token>\n"
] | false |
99,624 |
9038ac1b8f19585bf9e31290d39c6145bdaf3d0c
|
"""
Widgets built on top of tkinter
Part of the Minecraft Pack Manager utility (mpm)
"""
from collections.abc import Mapping
import tkinter as tk
import tkinter.ttk as ttk
class Dropdown(ttk.Combobox):
def __init__(self, master, values, *args, interactive=True, **kwargs):
state = "readonly" if not interactive else None
width = max(len(str(v)) for v in values) + 1
values = list(values)
super().__init__(
master, *args, state=state, values=values, width=width, **kwargs
)
if values:
self.set(values[0])
def set_values(self, values):
selected = self.get()
values = list(values)
self.configure(values=values, width=max(len(str(v)) for v in values) + 1)
self.set(selected if selected in values else values[0])
class MultiSelector(ttk.Treeview):
"""
Widget to select/deselect multiple element in a list, with a scrollbar
"""
def __init__(self, master, values, *args, height=5, min_height=3, **kwargs):
self.frame_ = ttk.Frame(master=master)
super().__init__(
*args,
master=self.frame_,
show="tree",
columns=[],
height=max(3, min(len(values), height)),
**kwargs
)
self.height_arg = height
self.min_height_arg = min_height
# self.column("cache", width=0, minwidth=0, stretch=False)
self.bind("<1>", self.on_click)
# Under buttons
self.button_frame = ttk.Frame(master=self.frame_)
self.button_all = ttk.Button(
master=self.button_frame, text="All", command=self.select_all
)
self.button_clear = ttk.Button(
master=self.button_frame, text="Clear", command=self.select_clear
)
self.button_toggle = ttk.Button(
master=self.button_frame, text="Toggle", command=self.select_toggle
)
self.button_frame.pack(side="bottom", fill="x")
self.button_all.pack(side="left", fill="x", expand=True)
self.button_clear.pack(side="left", fill="x", expand=True)
self.button_toggle.pack(side="left", fill="x", expand=True)
self.scrollbar_ = ttk.Scrollbar(
master=self.frame_, orient=tk.VERTICAL, command=self.yview
)
self.configure(yscrollcommand=self.scrollbar_.set)
self.scrollbar_.pack(side="right", expand=False, fill="y")
self.pack(side="left", expand=True, fill="both")
self.id_value_map = {}
self.set_values(values)
self.pack = self.frame_.pack
self.grid = self.frame_.grid
def adapt_display(self, item_number):
height = max(self.min_height_arg, min(item_number, self.height_arg))
self.config(height=height)
def set_values(self, values):
selection = set(self.get_selection())
self.select_clear()
self.delete(*self.get_children())
self.id_value_map = {
self.insert("", "end", text=str(value)): value for value in values
}
self.set_selection(selection & set(values))
self.adapt_display(len(values))
def get_selection(self):
"""
Returns the selected element from the `values` passed to `__init__()`
"""
return [
self.id_value_map[item]
for item in self.selection()
if item in self.id_value_map
]
def set_selection(self, values):
"""
Set the current selection from a subset of 'values' passed to __init__
"""
self.selection_set(
[item for item in self.get_children() if self.id_value_map[item] in values]
)
def on_click(self, event):
"""
Toggle the selection of an item that is clicked on instead of
the default behavior that is to select only that item
"""
item = self.identify("item", event.x, event.y)
if item:
if item in self.selection():
self.selection_remove(item)
else:
self.selection_add(item)
return "break"
def select_all(self):
"""
Select all items
"""
self.selection_add(*self.get_children())
def select_clear(self):
"""
Deselect all items
"""
self.selection_remove(*self.get_children())
def select_toggle(self):
"""
Toggle the selection of all items
"""
self.selection_toggle(*self.get_children())
class NestedMultiSelector(MultiSelector):
"""
Widget for multiselection with nested structures, such as a file hierarchy
"""
sep_char = "/"
undefined_value = "__UNDEFINED__"
def __init__(self, master, values, *args, height=13, min_height=7, **kwargs):
"""
Arguments
master -- parent widget
values -- nested structure to select from. Either:
- a nested dict, with key mapping to all sub-elements and leaves ammped to '{}'
- an iterable of tuples, where an inner iterable represents a path from root to element
"""
super().__init__(
master, values, *args, height=height, min_height=min_height, **kwargs
)
self.bind("<Double-Button-1>", self.on_double_click)
self.click_job = None
def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):
flat_values = flattened or []
for key, value in mapping.items():
if value:
self._flatten_dfs(
mapping=value, prefix=prefix + (key,), flattened=flat_values
)
else:
flat_values.append(prefix + (key,))
return flat_values
def _deepen(self, flat_data):
nested_mapping = {}
for element in flat_data:
node = nested_mapping
for value in element:
node = node.setdefault(value, {})
return nested_mapping
def _rec_insert(self, mapping, prefix=tuple(), parent=""):
for key, value in mapping.items():
item = self.insert(parent=parent, index="end", text=str(key), open=True,)
if value:
self._rec_insert(value, prefix=prefix + (key,), parent=item)
else:
self.id_value_map[item] = prefix + (key,)
def set_values(self, nested_values):
selection = self.get_selection()
if self.get_children():
self.delete(*self.get_children())
self.id_value_map = {}
if isinstance(nested_values, Mapping):
self._rec_insert(nested_values)
else:
self._rec_insert(self._deepen(nested_values))
self.selection_add(
[item for item, value in self.id_value_map.items() if value in selection]
)
self.adapt_display(len(nested_values))
def get_selection(self):
# excludes nodes which are "directory" nodes and where not present leaves in the initial input
return [
self.id_value_map[item]
for item in self.selection()
if item in self.id_value_map
]
def set_selection(self, nested_values):
if isinstance(nested_values, Mapping):
nested_values = self._flatten_dfs(nested_values)
self.selection_set(
[
item
for item, value in self.id_value_map.items()
if value in nested_values
]
)
def on_click(self, event):
"""
Toggle the selection of an item that is clicked on instead of
the default behavior that is to select only that item
If the items is selected/deselected, all its childrens enter the same
selection state
"""
if self.click_job is not None:
self.after_cancel(self.click_job)
item = self.identify("item", event.x, event.y)
if item:
self.click_job = self.after(200, self.clicked, item)
return "break"
def clicked(self, item):
if item in self.selection():
self.select_clear(item)
else:
self.select_all(item)
def on_double_click(self, event):
"""
Open/Close the item
"""
if self.click_job is not None:
self.after_cancel(self.click_job)
item = self.identify("item", event.x, event.y)
if self.get_children(item):
self.item(item, open=not self.item(item, "open"))
return "break"
def select_all(self, item=""):
self.selection_add(item)
for child in self.get_children(item):
self.select_all(child)
def select_clear(self, item=""):
self.selection_remove(item)
for child in self.get_children(item):
self.select_clear(child)
def select_toggle(self, item=""):
self.selection_toggle(item)
for child in self.get_children(item):
self.select_toggle(child)
|
[
"\"\"\"\nWidgets built on top of tkinter\n\nPart of the Minecraft Pack Manager utility (mpm)\n\"\"\"\nfrom collections.abc import Mapping\nimport tkinter as tk\nimport tkinter.ttk as ttk\n\n\nclass Dropdown(ttk.Combobox):\n def __init__(self, master, values, *args, interactive=True, **kwargs):\n state = \"readonly\" if not interactive else None\n width = max(len(str(v)) for v in values) + 1\n values = list(values)\n super().__init__(\n master, *args, state=state, values=values, width=width, **kwargs\n )\n if values:\n self.set(values[0])\n\n def set_values(self, values):\n selected = self.get()\n values = list(values)\n self.configure(values=values, width=max(len(str(v)) for v in values) + 1)\n self.set(selected if selected in values else values[0])\n\n\nclass MultiSelector(ttk.Treeview):\n \"\"\"\n Widget to select/deselect multiple element in a list, with a scrollbar\n \"\"\"\n\n def __init__(self, master, values, *args, height=5, min_height=3, **kwargs):\n self.frame_ = ttk.Frame(master=master)\n super().__init__(\n *args,\n master=self.frame_,\n show=\"tree\",\n columns=[],\n height=max(3, min(len(values), height)),\n **kwargs\n )\n self.height_arg = height\n self.min_height_arg = min_height\n # self.column(\"cache\", width=0, minwidth=0, stretch=False)\n self.bind(\"<1>\", self.on_click)\n # Under buttons\n self.button_frame = ttk.Frame(master=self.frame_)\n self.button_all = ttk.Button(\n master=self.button_frame, text=\"All\", command=self.select_all\n )\n self.button_clear = ttk.Button(\n master=self.button_frame, text=\"Clear\", command=self.select_clear\n )\n self.button_toggle = ttk.Button(\n master=self.button_frame, text=\"Toggle\", command=self.select_toggle\n )\n self.button_frame.pack(side=\"bottom\", fill=\"x\")\n self.button_all.pack(side=\"left\", fill=\"x\", expand=True)\n self.button_clear.pack(side=\"left\", fill=\"x\", expand=True)\n self.button_toggle.pack(side=\"left\", fill=\"x\", expand=True)\n self.scrollbar_ = ttk.Scrollbar(\n master=self.frame_, orient=tk.VERTICAL, command=self.yview\n )\n self.configure(yscrollcommand=self.scrollbar_.set)\n self.scrollbar_.pack(side=\"right\", expand=False, fill=\"y\")\n self.pack(side=\"left\", expand=True, fill=\"both\")\n self.id_value_map = {}\n self.set_values(values)\n self.pack = self.frame_.pack\n self.grid = self.frame_.grid\n\n def adapt_display(self, item_number):\n height = max(self.min_height_arg, min(item_number, self.height_arg))\n self.config(height=height)\n\n def set_values(self, values):\n selection = set(self.get_selection())\n self.select_clear()\n self.delete(*self.get_children())\n self.id_value_map = {\n self.insert(\"\", \"end\", text=str(value)): value for value in values\n }\n self.set_selection(selection & set(values))\n self.adapt_display(len(values))\n\n def get_selection(self):\n \"\"\"\n Returns the selected element from the `values` passed to `__init__()`\n \"\"\"\n return [\n self.id_value_map[item]\n for item in self.selection()\n if item in self.id_value_map\n ]\n\n def set_selection(self, values):\n \"\"\"\n Set the current selection from a subset of 'values' passed to __init__\n \"\"\"\n self.selection_set(\n [item for item in self.get_children() if self.id_value_map[item] in values]\n )\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n \"\"\"\n item = self.identify(\"item\", event.x, event.y)\n if item:\n if item in self.selection():\n self.selection_remove(item)\n else:\n self.selection_add(item)\n return \"break\"\n\n def select_all(self):\n \"\"\"\n Select all items\n \"\"\"\n self.selection_add(*self.get_children())\n\n def select_clear(self):\n \"\"\"\n Deselect all items\n \"\"\"\n self.selection_remove(*self.get_children())\n\n def select_toggle(self):\n \"\"\"\n Toggle the selection of all items\n \"\"\"\n self.selection_toggle(*self.get_children())\n\n\nclass NestedMultiSelector(MultiSelector):\n \"\"\"\n Widget for multiselection with nested structures, such as a file hierarchy\n \"\"\"\n\n sep_char = \"/\"\n undefined_value = \"__UNDEFINED__\"\n\n def __init__(self, master, values, *args, height=13, min_height=7, **kwargs):\n \"\"\"\n Arguments\n master -- parent widget\n values -- nested structure to select from. Either:\n - a nested dict, with key mapping to all sub-elements and leaves ammped to '{}'\n - an iterable of tuples, where an inner iterable represents a path from root to element\n \"\"\"\n super().__init__(\n master, values, *args, height=height, min_height=min_height, **kwargs\n )\n self.bind(\"<Double-Button-1>\", self.on_double_click)\n self.click_job = None\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(\n mapping=value, prefix=prefix + (key,), flattened=flat_values\n )\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n\n def _deepen(self, flat_data):\n nested_mapping = {}\n for element in flat_data:\n node = nested_mapping\n for value in element:\n node = node.setdefault(value, {})\n return nested_mapping\n\n def _rec_insert(self, mapping, prefix=tuple(), parent=\"\"):\n for key, value in mapping.items():\n item = self.insert(parent=parent, index=\"end\", text=str(key), open=True,)\n if value:\n self._rec_insert(value, prefix=prefix + (key,), parent=item)\n else:\n self.id_value_map[item] = prefix + (key,)\n\n def set_values(self, nested_values):\n selection = self.get_selection()\n if self.get_children():\n self.delete(*self.get_children())\n self.id_value_map = {}\n if isinstance(nested_values, Mapping):\n self._rec_insert(nested_values)\n else:\n self._rec_insert(self._deepen(nested_values))\n self.selection_add(\n [item for item, value in self.id_value_map.items() if value in selection]\n )\n self.adapt_display(len(nested_values))\n\n def get_selection(self):\n # excludes nodes which are \"directory\" nodes and where not present leaves in the initial input\n return [\n self.id_value_map[item]\n for item in self.selection()\n if item in self.id_value_map\n ]\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set(\n [\n item\n for item, value in self.id_value_map.items()\n if value in nested_values\n ]\n )\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n If the items is selected/deselected, all its childrens enter the same\n selection state\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify(\"item\", event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return \"break\"\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n\n def on_double_click(self, event):\n \"\"\"\n Open/Close the item\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify(\"item\", event.x, event.y)\n if self.get_children(item):\n self.item(item, open=not self.item(item, \"open\"))\n return \"break\"\n\n def select_all(self, item=\"\"):\n self.selection_add(item)\n for child in self.get_children(item):\n self.select_all(child)\n\n def select_clear(self, item=\"\"):\n self.selection_remove(item)\n for child in self.get_children(item):\n self.select_clear(child)\n\n def select_toggle(self, item=\"\"):\n self.selection_toggle(item)\n for child in self.get_children(item):\n self.select_toggle(child)\n",
"<docstring token>\nfrom collections.abc import Mapping\nimport tkinter as tk\nimport tkinter.ttk as ttk\n\n\nclass Dropdown(ttk.Combobox):\n\n def __init__(self, master, values, *args, interactive=True, **kwargs):\n state = 'readonly' if not interactive else None\n width = max(len(str(v)) for v in values) + 1\n values = list(values)\n super().__init__(master, *args, state=state, values=values, width=\n width, **kwargs)\n if values:\n self.set(values[0])\n\n def set_values(self, values):\n selected = self.get()\n values = list(values)\n self.configure(values=values, width=max(len(str(v)) for v in values\n ) + 1)\n self.set(selected if selected in values else values[0])\n\n\nclass MultiSelector(ttk.Treeview):\n \"\"\"\n Widget to select/deselect multiple element in a list, with a scrollbar\n \"\"\"\n\n def __init__(self, master, values, *args, height=5, min_height=3, **kwargs\n ):\n self.frame_ = ttk.Frame(master=master)\n super().__init__(*args, master=self.frame_, show='tree', columns=[],\n height=max(3, min(len(values), height)), **kwargs)\n self.height_arg = height\n self.min_height_arg = min_height\n self.bind('<1>', self.on_click)\n self.button_frame = ttk.Frame(master=self.frame_)\n self.button_all = ttk.Button(master=self.button_frame, text='All',\n command=self.select_all)\n self.button_clear = ttk.Button(master=self.button_frame, text=\n 'Clear', command=self.select_clear)\n self.button_toggle = ttk.Button(master=self.button_frame, text=\n 'Toggle', command=self.select_toggle)\n self.button_frame.pack(side='bottom', fill='x')\n self.button_all.pack(side='left', fill='x', expand=True)\n self.button_clear.pack(side='left', fill='x', expand=True)\n self.button_toggle.pack(side='left', fill='x', expand=True)\n self.scrollbar_ = ttk.Scrollbar(master=self.frame_, orient=tk.\n VERTICAL, command=self.yview)\n self.configure(yscrollcommand=self.scrollbar_.set)\n self.scrollbar_.pack(side='right', expand=False, fill='y')\n self.pack(side='left', expand=True, fill='both')\n self.id_value_map = {}\n self.set_values(values)\n self.pack = self.frame_.pack\n self.grid = self.frame_.grid\n\n def adapt_display(self, item_number):\n height = max(self.min_height_arg, min(item_number, self.height_arg))\n self.config(height=height)\n\n def set_values(self, values):\n selection = set(self.get_selection())\n self.select_clear()\n self.delete(*self.get_children())\n self.id_value_map = {self.insert('', 'end', text=str(value)): value for\n value in values}\n self.set_selection(selection & set(values))\n self.adapt_display(len(values))\n\n def get_selection(self):\n \"\"\"\n Returns the selected element from the `values` passed to `__init__()`\n \"\"\"\n return [self.id_value_map[item] for item in self.selection() if \n item in self.id_value_map]\n\n def set_selection(self, values):\n \"\"\"\n Set the current selection from a subset of 'values' passed to __init__\n \"\"\"\n self.selection_set([item for item in self.get_children() if self.\n id_value_map[item] in values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n \"\"\"\n item = self.identify('item', event.x, event.y)\n if item:\n if item in self.selection():\n self.selection_remove(item)\n else:\n self.selection_add(item)\n return 'break'\n\n def select_all(self):\n \"\"\"\n Select all items\n \"\"\"\n self.selection_add(*self.get_children())\n\n def select_clear(self):\n \"\"\"\n Deselect all items\n \"\"\"\n self.selection_remove(*self.get_children())\n\n def select_toggle(self):\n \"\"\"\n Toggle the selection of all items\n \"\"\"\n self.selection_toggle(*self.get_children())\n\n\nclass NestedMultiSelector(MultiSelector):\n \"\"\"\n Widget for multiselection with nested structures, such as a file hierarchy\n \"\"\"\n sep_char = '/'\n undefined_value = '__UNDEFINED__'\n\n def __init__(self, master, values, *args, height=13, min_height=7, **kwargs\n ):\n \"\"\"\n Arguments\n master -- parent widget\n values -- nested structure to select from. Either:\n - a nested dict, with key mapping to all sub-elements and leaves ammped to '{}'\n - an iterable of tuples, where an inner iterable represents a path from root to element\n \"\"\"\n super().__init__(master, values, *args, height=height, min_height=\n min_height, **kwargs)\n self.bind('<Double-Button-1>', self.on_double_click)\n self.click_job = None\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n\n def _deepen(self, flat_data):\n nested_mapping = {}\n for element in flat_data:\n node = nested_mapping\n for value in element:\n node = node.setdefault(value, {})\n return nested_mapping\n\n def _rec_insert(self, mapping, prefix=tuple(), parent=''):\n for key, value in mapping.items():\n item = self.insert(parent=parent, index='end', text=str(key),\n open=True)\n if value:\n self._rec_insert(value, prefix=prefix + (key,), parent=item)\n else:\n self.id_value_map[item] = prefix + (key,)\n\n def set_values(self, nested_values):\n selection = self.get_selection()\n if self.get_children():\n self.delete(*self.get_children())\n self.id_value_map = {}\n if isinstance(nested_values, Mapping):\n self._rec_insert(nested_values)\n else:\n self._rec_insert(self._deepen(nested_values))\n self.selection_add([item for item, value in self.id_value_map.items\n () if value in selection])\n self.adapt_display(len(nested_values))\n\n def get_selection(self):\n return [self.id_value_map[item] for item in self.selection() if \n item in self.id_value_map]\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n If the items is selected/deselected, all its childrens enter the same\n selection state\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return 'break'\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n\n def on_double_click(self, event):\n \"\"\"\n Open/Close the item\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if self.get_children(item):\n self.item(item, open=not self.item(item, 'open'))\n return 'break'\n\n def select_all(self, item=''):\n self.selection_add(item)\n for child in self.get_children(item):\n self.select_all(child)\n\n def select_clear(self, item=''):\n self.selection_remove(item)\n for child in self.get_children(item):\n self.select_clear(child)\n\n def select_toggle(self, item=''):\n self.selection_toggle(item)\n for child in self.get_children(item):\n self.select_toggle(child)\n",
"<docstring token>\n<import token>\n\n\nclass Dropdown(ttk.Combobox):\n\n def __init__(self, master, values, *args, interactive=True, **kwargs):\n state = 'readonly' if not interactive else None\n width = max(len(str(v)) for v in values) + 1\n values = list(values)\n super().__init__(master, *args, state=state, values=values, width=\n width, **kwargs)\n if values:\n self.set(values[0])\n\n def set_values(self, values):\n selected = self.get()\n values = list(values)\n self.configure(values=values, width=max(len(str(v)) for v in values\n ) + 1)\n self.set(selected if selected in values else values[0])\n\n\nclass MultiSelector(ttk.Treeview):\n \"\"\"\n Widget to select/deselect multiple element in a list, with a scrollbar\n \"\"\"\n\n def __init__(self, master, values, *args, height=5, min_height=3, **kwargs\n ):\n self.frame_ = ttk.Frame(master=master)\n super().__init__(*args, master=self.frame_, show='tree', columns=[],\n height=max(3, min(len(values), height)), **kwargs)\n self.height_arg = height\n self.min_height_arg = min_height\n self.bind('<1>', self.on_click)\n self.button_frame = ttk.Frame(master=self.frame_)\n self.button_all = ttk.Button(master=self.button_frame, text='All',\n command=self.select_all)\n self.button_clear = ttk.Button(master=self.button_frame, text=\n 'Clear', command=self.select_clear)\n self.button_toggle = ttk.Button(master=self.button_frame, text=\n 'Toggle', command=self.select_toggle)\n self.button_frame.pack(side='bottom', fill='x')\n self.button_all.pack(side='left', fill='x', expand=True)\n self.button_clear.pack(side='left', fill='x', expand=True)\n self.button_toggle.pack(side='left', fill='x', expand=True)\n self.scrollbar_ = ttk.Scrollbar(master=self.frame_, orient=tk.\n VERTICAL, command=self.yview)\n self.configure(yscrollcommand=self.scrollbar_.set)\n self.scrollbar_.pack(side='right', expand=False, fill='y')\n self.pack(side='left', expand=True, fill='both')\n self.id_value_map = {}\n self.set_values(values)\n self.pack = self.frame_.pack\n self.grid = self.frame_.grid\n\n def adapt_display(self, item_number):\n height = max(self.min_height_arg, min(item_number, self.height_arg))\n self.config(height=height)\n\n def set_values(self, values):\n selection = set(self.get_selection())\n self.select_clear()\n self.delete(*self.get_children())\n self.id_value_map = {self.insert('', 'end', text=str(value)): value for\n value in values}\n self.set_selection(selection & set(values))\n self.adapt_display(len(values))\n\n def get_selection(self):\n \"\"\"\n Returns the selected element from the `values` passed to `__init__()`\n \"\"\"\n return [self.id_value_map[item] for item in self.selection() if \n item in self.id_value_map]\n\n def set_selection(self, values):\n \"\"\"\n Set the current selection from a subset of 'values' passed to __init__\n \"\"\"\n self.selection_set([item for item in self.get_children() if self.\n id_value_map[item] in values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n \"\"\"\n item = self.identify('item', event.x, event.y)\n if item:\n if item in self.selection():\n self.selection_remove(item)\n else:\n self.selection_add(item)\n return 'break'\n\n def select_all(self):\n \"\"\"\n Select all items\n \"\"\"\n self.selection_add(*self.get_children())\n\n def select_clear(self):\n \"\"\"\n Deselect all items\n \"\"\"\n self.selection_remove(*self.get_children())\n\n def select_toggle(self):\n \"\"\"\n Toggle the selection of all items\n \"\"\"\n self.selection_toggle(*self.get_children())\n\n\nclass NestedMultiSelector(MultiSelector):\n \"\"\"\n Widget for multiselection with nested structures, such as a file hierarchy\n \"\"\"\n sep_char = '/'\n undefined_value = '__UNDEFINED__'\n\n def __init__(self, master, values, *args, height=13, min_height=7, **kwargs\n ):\n \"\"\"\n Arguments\n master -- parent widget\n values -- nested structure to select from. Either:\n - a nested dict, with key mapping to all sub-elements and leaves ammped to '{}'\n - an iterable of tuples, where an inner iterable represents a path from root to element\n \"\"\"\n super().__init__(master, values, *args, height=height, min_height=\n min_height, **kwargs)\n self.bind('<Double-Button-1>', self.on_double_click)\n self.click_job = None\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n\n def _deepen(self, flat_data):\n nested_mapping = {}\n for element in flat_data:\n node = nested_mapping\n for value in element:\n node = node.setdefault(value, {})\n return nested_mapping\n\n def _rec_insert(self, mapping, prefix=tuple(), parent=''):\n for key, value in mapping.items():\n item = self.insert(parent=parent, index='end', text=str(key),\n open=True)\n if value:\n self._rec_insert(value, prefix=prefix + (key,), parent=item)\n else:\n self.id_value_map[item] = prefix + (key,)\n\n def set_values(self, nested_values):\n selection = self.get_selection()\n if self.get_children():\n self.delete(*self.get_children())\n self.id_value_map = {}\n if isinstance(nested_values, Mapping):\n self._rec_insert(nested_values)\n else:\n self._rec_insert(self._deepen(nested_values))\n self.selection_add([item for item, value in self.id_value_map.items\n () if value in selection])\n self.adapt_display(len(nested_values))\n\n def get_selection(self):\n return [self.id_value_map[item] for item in self.selection() if \n item in self.id_value_map]\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n If the items is selected/deselected, all its childrens enter the same\n selection state\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return 'break'\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n\n def on_double_click(self, event):\n \"\"\"\n Open/Close the item\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if self.get_children(item):\n self.item(item, open=not self.item(item, 'open'))\n return 'break'\n\n def select_all(self, item=''):\n self.selection_add(item)\n for child in self.get_children(item):\n self.select_all(child)\n\n def select_clear(self, item=''):\n self.selection_remove(item)\n for child in self.get_children(item):\n self.select_clear(child)\n\n def select_toggle(self, item=''):\n self.selection_toggle(item)\n for child in self.get_children(item):\n self.select_toggle(child)\n",
"<docstring token>\n<import token>\n\n\nclass Dropdown(ttk.Combobox):\n <function token>\n\n def set_values(self, values):\n selected = self.get()\n values = list(values)\n self.configure(values=values, width=max(len(str(v)) for v in values\n ) + 1)\n self.set(selected if selected in values else values[0])\n\n\nclass MultiSelector(ttk.Treeview):\n \"\"\"\n Widget to select/deselect multiple element in a list, with a scrollbar\n \"\"\"\n\n def __init__(self, master, values, *args, height=5, min_height=3, **kwargs\n ):\n self.frame_ = ttk.Frame(master=master)\n super().__init__(*args, master=self.frame_, show='tree', columns=[],\n height=max(3, min(len(values), height)), **kwargs)\n self.height_arg = height\n self.min_height_arg = min_height\n self.bind('<1>', self.on_click)\n self.button_frame = ttk.Frame(master=self.frame_)\n self.button_all = ttk.Button(master=self.button_frame, text='All',\n command=self.select_all)\n self.button_clear = ttk.Button(master=self.button_frame, text=\n 'Clear', command=self.select_clear)\n self.button_toggle = ttk.Button(master=self.button_frame, text=\n 'Toggle', command=self.select_toggle)\n self.button_frame.pack(side='bottom', fill='x')\n self.button_all.pack(side='left', fill='x', expand=True)\n self.button_clear.pack(side='left', fill='x', expand=True)\n self.button_toggle.pack(side='left', fill='x', expand=True)\n self.scrollbar_ = ttk.Scrollbar(master=self.frame_, orient=tk.\n VERTICAL, command=self.yview)\n self.configure(yscrollcommand=self.scrollbar_.set)\n self.scrollbar_.pack(side='right', expand=False, fill='y')\n self.pack(side='left', expand=True, fill='both')\n self.id_value_map = {}\n self.set_values(values)\n self.pack = self.frame_.pack\n self.grid = self.frame_.grid\n\n def adapt_display(self, item_number):\n height = max(self.min_height_arg, min(item_number, self.height_arg))\n self.config(height=height)\n\n def set_values(self, values):\n selection = set(self.get_selection())\n self.select_clear()\n self.delete(*self.get_children())\n self.id_value_map = {self.insert('', 'end', text=str(value)): value for\n value in values}\n self.set_selection(selection & set(values))\n self.adapt_display(len(values))\n\n def get_selection(self):\n \"\"\"\n Returns the selected element from the `values` passed to `__init__()`\n \"\"\"\n return [self.id_value_map[item] for item in self.selection() if \n item in self.id_value_map]\n\n def set_selection(self, values):\n \"\"\"\n Set the current selection from a subset of 'values' passed to __init__\n \"\"\"\n self.selection_set([item for item in self.get_children() if self.\n id_value_map[item] in values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n \"\"\"\n item = self.identify('item', event.x, event.y)\n if item:\n if item in self.selection():\n self.selection_remove(item)\n else:\n self.selection_add(item)\n return 'break'\n\n def select_all(self):\n \"\"\"\n Select all items\n \"\"\"\n self.selection_add(*self.get_children())\n\n def select_clear(self):\n \"\"\"\n Deselect all items\n \"\"\"\n self.selection_remove(*self.get_children())\n\n def select_toggle(self):\n \"\"\"\n Toggle the selection of all items\n \"\"\"\n self.selection_toggle(*self.get_children())\n\n\nclass NestedMultiSelector(MultiSelector):\n \"\"\"\n Widget for multiselection with nested structures, such as a file hierarchy\n \"\"\"\n sep_char = '/'\n undefined_value = '__UNDEFINED__'\n\n def __init__(self, master, values, *args, height=13, min_height=7, **kwargs\n ):\n \"\"\"\n Arguments\n master -- parent widget\n values -- nested structure to select from. Either:\n - a nested dict, with key mapping to all sub-elements and leaves ammped to '{}'\n - an iterable of tuples, where an inner iterable represents a path from root to element\n \"\"\"\n super().__init__(master, values, *args, height=height, min_height=\n min_height, **kwargs)\n self.bind('<Double-Button-1>', self.on_double_click)\n self.click_job = None\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n\n def _deepen(self, flat_data):\n nested_mapping = {}\n for element in flat_data:\n node = nested_mapping\n for value in element:\n node = node.setdefault(value, {})\n return nested_mapping\n\n def _rec_insert(self, mapping, prefix=tuple(), parent=''):\n for key, value in mapping.items():\n item = self.insert(parent=parent, index='end', text=str(key),\n open=True)\n if value:\n self._rec_insert(value, prefix=prefix + (key,), parent=item)\n else:\n self.id_value_map[item] = prefix + (key,)\n\n def set_values(self, nested_values):\n selection = self.get_selection()\n if self.get_children():\n self.delete(*self.get_children())\n self.id_value_map = {}\n if isinstance(nested_values, Mapping):\n self._rec_insert(nested_values)\n else:\n self._rec_insert(self._deepen(nested_values))\n self.selection_add([item for item, value in self.id_value_map.items\n () if value in selection])\n self.adapt_display(len(nested_values))\n\n def get_selection(self):\n return [self.id_value_map[item] for item in self.selection() if \n item in self.id_value_map]\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n If the items is selected/deselected, all its childrens enter the same\n selection state\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return 'break'\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n\n def on_double_click(self, event):\n \"\"\"\n Open/Close the item\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if self.get_children(item):\n self.item(item, open=not self.item(item, 'open'))\n return 'break'\n\n def select_all(self, item=''):\n self.selection_add(item)\n for child in self.get_children(item):\n self.select_all(child)\n\n def select_clear(self, item=''):\n self.selection_remove(item)\n for child in self.get_children(item):\n self.select_clear(child)\n\n def select_toggle(self, item=''):\n self.selection_toggle(item)\n for child in self.get_children(item):\n self.select_toggle(child)\n",
"<docstring token>\n<import token>\n\n\nclass Dropdown(ttk.Combobox):\n <function token>\n <function token>\n\n\nclass MultiSelector(ttk.Treeview):\n \"\"\"\n Widget to select/deselect multiple element in a list, with a scrollbar\n \"\"\"\n\n def __init__(self, master, values, *args, height=5, min_height=3, **kwargs\n ):\n self.frame_ = ttk.Frame(master=master)\n super().__init__(*args, master=self.frame_, show='tree', columns=[],\n height=max(3, min(len(values), height)), **kwargs)\n self.height_arg = height\n self.min_height_arg = min_height\n self.bind('<1>', self.on_click)\n self.button_frame = ttk.Frame(master=self.frame_)\n self.button_all = ttk.Button(master=self.button_frame, text='All',\n command=self.select_all)\n self.button_clear = ttk.Button(master=self.button_frame, text=\n 'Clear', command=self.select_clear)\n self.button_toggle = ttk.Button(master=self.button_frame, text=\n 'Toggle', command=self.select_toggle)\n self.button_frame.pack(side='bottom', fill='x')\n self.button_all.pack(side='left', fill='x', expand=True)\n self.button_clear.pack(side='left', fill='x', expand=True)\n self.button_toggle.pack(side='left', fill='x', expand=True)\n self.scrollbar_ = ttk.Scrollbar(master=self.frame_, orient=tk.\n VERTICAL, command=self.yview)\n self.configure(yscrollcommand=self.scrollbar_.set)\n self.scrollbar_.pack(side='right', expand=False, fill='y')\n self.pack(side='left', expand=True, fill='both')\n self.id_value_map = {}\n self.set_values(values)\n self.pack = self.frame_.pack\n self.grid = self.frame_.grid\n\n def adapt_display(self, item_number):\n height = max(self.min_height_arg, min(item_number, self.height_arg))\n self.config(height=height)\n\n def set_values(self, values):\n selection = set(self.get_selection())\n self.select_clear()\n self.delete(*self.get_children())\n self.id_value_map = {self.insert('', 'end', text=str(value)): value for\n value in values}\n self.set_selection(selection & set(values))\n self.adapt_display(len(values))\n\n def get_selection(self):\n \"\"\"\n Returns the selected element from the `values` passed to `__init__()`\n \"\"\"\n return [self.id_value_map[item] for item in self.selection() if \n item in self.id_value_map]\n\n def set_selection(self, values):\n \"\"\"\n Set the current selection from a subset of 'values' passed to __init__\n \"\"\"\n self.selection_set([item for item in self.get_children() if self.\n id_value_map[item] in values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n \"\"\"\n item = self.identify('item', event.x, event.y)\n if item:\n if item in self.selection():\n self.selection_remove(item)\n else:\n self.selection_add(item)\n return 'break'\n\n def select_all(self):\n \"\"\"\n Select all items\n \"\"\"\n self.selection_add(*self.get_children())\n\n def select_clear(self):\n \"\"\"\n Deselect all items\n \"\"\"\n self.selection_remove(*self.get_children())\n\n def select_toggle(self):\n \"\"\"\n Toggle the selection of all items\n \"\"\"\n self.selection_toggle(*self.get_children())\n\n\nclass NestedMultiSelector(MultiSelector):\n \"\"\"\n Widget for multiselection with nested structures, such as a file hierarchy\n \"\"\"\n sep_char = '/'\n undefined_value = '__UNDEFINED__'\n\n def __init__(self, master, values, *args, height=13, min_height=7, **kwargs\n ):\n \"\"\"\n Arguments\n master -- parent widget\n values -- nested structure to select from. Either:\n - a nested dict, with key mapping to all sub-elements and leaves ammped to '{}'\n - an iterable of tuples, where an inner iterable represents a path from root to element\n \"\"\"\n super().__init__(master, values, *args, height=height, min_height=\n min_height, **kwargs)\n self.bind('<Double-Button-1>', self.on_double_click)\n self.click_job = None\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n\n def _deepen(self, flat_data):\n nested_mapping = {}\n for element in flat_data:\n node = nested_mapping\n for value in element:\n node = node.setdefault(value, {})\n return nested_mapping\n\n def _rec_insert(self, mapping, prefix=tuple(), parent=''):\n for key, value in mapping.items():\n item = self.insert(parent=parent, index='end', text=str(key),\n open=True)\n if value:\n self._rec_insert(value, prefix=prefix + (key,), parent=item)\n else:\n self.id_value_map[item] = prefix + (key,)\n\n def set_values(self, nested_values):\n selection = self.get_selection()\n if self.get_children():\n self.delete(*self.get_children())\n self.id_value_map = {}\n if isinstance(nested_values, Mapping):\n self._rec_insert(nested_values)\n else:\n self._rec_insert(self._deepen(nested_values))\n self.selection_add([item for item, value in self.id_value_map.items\n () if value in selection])\n self.adapt_display(len(nested_values))\n\n def get_selection(self):\n return [self.id_value_map[item] for item in self.selection() if \n item in self.id_value_map]\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n If the items is selected/deselected, all its childrens enter the same\n selection state\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return 'break'\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n\n def on_double_click(self, event):\n \"\"\"\n Open/Close the item\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if self.get_children(item):\n self.item(item, open=not self.item(item, 'open'))\n return 'break'\n\n def select_all(self, item=''):\n self.selection_add(item)\n for child in self.get_children(item):\n self.select_all(child)\n\n def select_clear(self, item=''):\n self.selection_remove(item)\n for child in self.get_children(item):\n self.select_clear(child)\n\n def select_toggle(self, item=''):\n self.selection_toggle(item)\n for child in self.get_children(item):\n self.select_toggle(child)\n",
"<docstring token>\n<import token>\n<class token>\n\n\nclass MultiSelector(ttk.Treeview):\n \"\"\"\n Widget to select/deselect multiple element in a list, with a scrollbar\n \"\"\"\n\n def __init__(self, master, values, *args, height=5, min_height=3, **kwargs\n ):\n self.frame_ = ttk.Frame(master=master)\n super().__init__(*args, master=self.frame_, show='tree', columns=[],\n height=max(3, min(len(values), height)), **kwargs)\n self.height_arg = height\n self.min_height_arg = min_height\n self.bind('<1>', self.on_click)\n self.button_frame = ttk.Frame(master=self.frame_)\n self.button_all = ttk.Button(master=self.button_frame, text='All',\n command=self.select_all)\n self.button_clear = ttk.Button(master=self.button_frame, text=\n 'Clear', command=self.select_clear)\n self.button_toggle = ttk.Button(master=self.button_frame, text=\n 'Toggle', command=self.select_toggle)\n self.button_frame.pack(side='bottom', fill='x')\n self.button_all.pack(side='left', fill='x', expand=True)\n self.button_clear.pack(side='left', fill='x', expand=True)\n self.button_toggle.pack(side='left', fill='x', expand=True)\n self.scrollbar_ = ttk.Scrollbar(master=self.frame_, orient=tk.\n VERTICAL, command=self.yview)\n self.configure(yscrollcommand=self.scrollbar_.set)\n self.scrollbar_.pack(side='right', expand=False, fill='y')\n self.pack(side='left', expand=True, fill='both')\n self.id_value_map = {}\n self.set_values(values)\n self.pack = self.frame_.pack\n self.grid = self.frame_.grid\n\n def adapt_display(self, item_number):\n height = max(self.min_height_arg, min(item_number, self.height_arg))\n self.config(height=height)\n\n def set_values(self, values):\n selection = set(self.get_selection())\n self.select_clear()\n self.delete(*self.get_children())\n self.id_value_map = {self.insert('', 'end', text=str(value)): value for\n value in values}\n self.set_selection(selection & set(values))\n self.adapt_display(len(values))\n\n def get_selection(self):\n \"\"\"\n Returns the selected element from the `values` passed to `__init__()`\n \"\"\"\n return [self.id_value_map[item] for item in self.selection() if \n item in self.id_value_map]\n\n def set_selection(self, values):\n \"\"\"\n Set the current selection from a subset of 'values' passed to __init__\n \"\"\"\n self.selection_set([item for item in self.get_children() if self.\n id_value_map[item] in values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n \"\"\"\n item = self.identify('item', event.x, event.y)\n if item:\n if item in self.selection():\n self.selection_remove(item)\n else:\n self.selection_add(item)\n return 'break'\n\n def select_all(self):\n \"\"\"\n Select all items\n \"\"\"\n self.selection_add(*self.get_children())\n\n def select_clear(self):\n \"\"\"\n Deselect all items\n \"\"\"\n self.selection_remove(*self.get_children())\n\n def select_toggle(self):\n \"\"\"\n Toggle the selection of all items\n \"\"\"\n self.selection_toggle(*self.get_children())\n\n\nclass NestedMultiSelector(MultiSelector):\n \"\"\"\n Widget for multiselection with nested structures, such as a file hierarchy\n \"\"\"\n sep_char = '/'\n undefined_value = '__UNDEFINED__'\n\n def __init__(self, master, values, *args, height=13, min_height=7, **kwargs\n ):\n \"\"\"\n Arguments\n master -- parent widget\n values -- nested structure to select from. Either:\n - a nested dict, with key mapping to all sub-elements and leaves ammped to '{}'\n - an iterable of tuples, where an inner iterable represents a path from root to element\n \"\"\"\n super().__init__(master, values, *args, height=height, min_height=\n min_height, **kwargs)\n self.bind('<Double-Button-1>', self.on_double_click)\n self.click_job = None\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n\n def _deepen(self, flat_data):\n nested_mapping = {}\n for element in flat_data:\n node = nested_mapping\n for value in element:\n node = node.setdefault(value, {})\n return nested_mapping\n\n def _rec_insert(self, mapping, prefix=tuple(), parent=''):\n for key, value in mapping.items():\n item = self.insert(parent=parent, index='end', text=str(key),\n open=True)\n if value:\n self._rec_insert(value, prefix=prefix + (key,), parent=item)\n else:\n self.id_value_map[item] = prefix + (key,)\n\n def set_values(self, nested_values):\n selection = self.get_selection()\n if self.get_children():\n self.delete(*self.get_children())\n self.id_value_map = {}\n if isinstance(nested_values, Mapping):\n self._rec_insert(nested_values)\n else:\n self._rec_insert(self._deepen(nested_values))\n self.selection_add([item for item, value in self.id_value_map.items\n () if value in selection])\n self.adapt_display(len(nested_values))\n\n def get_selection(self):\n return [self.id_value_map[item] for item in self.selection() if \n item in self.id_value_map]\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n If the items is selected/deselected, all its childrens enter the same\n selection state\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return 'break'\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n\n def on_double_click(self, event):\n \"\"\"\n Open/Close the item\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if self.get_children(item):\n self.item(item, open=not self.item(item, 'open'))\n return 'break'\n\n def select_all(self, item=''):\n self.selection_add(item)\n for child in self.get_children(item):\n self.select_all(child)\n\n def select_clear(self, item=''):\n self.selection_remove(item)\n for child in self.get_children(item):\n self.select_clear(child)\n\n def select_toggle(self, item=''):\n self.selection_toggle(item)\n for child in self.get_children(item):\n self.select_toggle(child)\n",
"<docstring token>\n<import token>\n<class token>\n\n\nclass MultiSelector(ttk.Treeview):\n <docstring token>\n\n def __init__(self, master, values, *args, height=5, min_height=3, **kwargs\n ):\n self.frame_ = ttk.Frame(master=master)\n super().__init__(*args, master=self.frame_, show='tree', columns=[],\n height=max(3, min(len(values), height)), **kwargs)\n self.height_arg = height\n self.min_height_arg = min_height\n self.bind('<1>', self.on_click)\n self.button_frame = ttk.Frame(master=self.frame_)\n self.button_all = ttk.Button(master=self.button_frame, text='All',\n command=self.select_all)\n self.button_clear = ttk.Button(master=self.button_frame, text=\n 'Clear', command=self.select_clear)\n self.button_toggle = ttk.Button(master=self.button_frame, text=\n 'Toggle', command=self.select_toggle)\n self.button_frame.pack(side='bottom', fill='x')\n self.button_all.pack(side='left', fill='x', expand=True)\n self.button_clear.pack(side='left', fill='x', expand=True)\n self.button_toggle.pack(side='left', fill='x', expand=True)\n self.scrollbar_ = ttk.Scrollbar(master=self.frame_, orient=tk.\n VERTICAL, command=self.yview)\n self.configure(yscrollcommand=self.scrollbar_.set)\n self.scrollbar_.pack(side='right', expand=False, fill='y')\n self.pack(side='left', expand=True, fill='both')\n self.id_value_map = {}\n self.set_values(values)\n self.pack = self.frame_.pack\n self.grid = self.frame_.grid\n\n def adapt_display(self, item_number):\n height = max(self.min_height_arg, min(item_number, self.height_arg))\n self.config(height=height)\n\n def set_values(self, values):\n selection = set(self.get_selection())\n self.select_clear()\n self.delete(*self.get_children())\n self.id_value_map = {self.insert('', 'end', text=str(value)): value for\n value in values}\n self.set_selection(selection & set(values))\n self.adapt_display(len(values))\n\n def get_selection(self):\n \"\"\"\n Returns the selected element from the `values` passed to `__init__()`\n \"\"\"\n return [self.id_value_map[item] for item in self.selection() if \n item in self.id_value_map]\n\n def set_selection(self, values):\n \"\"\"\n Set the current selection from a subset of 'values' passed to __init__\n \"\"\"\n self.selection_set([item for item in self.get_children() if self.\n id_value_map[item] in values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n \"\"\"\n item = self.identify('item', event.x, event.y)\n if item:\n if item in self.selection():\n self.selection_remove(item)\n else:\n self.selection_add(item)\n return 'break'\n\n def select_all(self):\n \"\"\"\n Select all items\n \"\"\"\n self.selection_add(*self.get_children())\n\n def select_clear(self):\n \"\"\"\n Deselect all items\n \"\"\"\n self.selection_remove(*self.get_children())\n\n def select_toggle(self):\n \"\"\"\n Toggle the selection of all items\n \"\"\"\n self.selection_toggle(*self.get_children())\n\n\nclass NestedMultiSelector(MultiSelector):\n \"\"\"\n Widget for multiselection with nested structures, such as a file hierarchy\n \"\"\"\n sep_char = '/'\n undefined_value = '__UNDEFINED__'\n\n def __init__(self, master, values, *args, height=13, min_height=7, **kwargs\n ):\n \"\"\"\n Arguments\n master -- parent widget\n values -- nested structure to select from. Either:\n - a nested dict, with key mapping to all sub-elements and leaves ammped to '{}'\n - an iterable of tuples, where an inner iterable represents a path from root to element\n \"\"\"\n super().__init__(master, values, *args, height=height, min_height=\n min_height, **kwargs)\n self.bind('<Double-Button-1>', self.on_double_click)\n self.click_job = None\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n\n def _deepen(self, flat_data):\n nested_mapping = {}\n for element in flat_data:\n node = nested_mapping\n for value in element:\n node = node.setdefault(value, {})\n return nested_mapping\n\n def _rec_insert(self, mapping, prefix=tuple(), parent=''):\n for key, value in mapping.items():\n item = self.insert(parent=parent, index='end', text=str(key),\n open=True)\n if value:\n self._rec_insert(value, prefix=prefix + (key,), parent=item)\n else:\n self.id_value_map[item] = prefix + (key,)\n\n def set_values(self, nested_values):\n selection = self.get_selection()\n if self.get_children():\n self.delete(*self.get_children())\n self.id_value_map = {}\n if isinstance(nested_values, Mapping):\n self._rec_insert(nested_values)\n else:\n self._rec_insert(self._deepen(nested_values))\n self.selection_add([item for item, value in self.id_value_map.items\n () if value in selection])\n self.adapt_display(len(nested_values))\n\n def get_selection(self):\n return [self.id_value_map[item] for item in self.selection() if \n item in self.id_value_map]\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n If the items is selected/deselected, all its childrens enter the same\n selection state\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return 'break'\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n\n def on_double_click(self, event):\n \"\"\"\n Open/Close the item\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if self.get_children(item):\n self.item(item, open=not self.item(item, 'open'))\n return 'break'\n\n def select_all(self, item=''):\n self.selection_add(item)\n for child in self.get_children(item):\n self.select_all(child)\n\n def select_clear(self, item=''):\n self.selection_remove(item)\n for child in self.get_children(item):\n self.select_clear(child)\n\n def select_toggle(self, item=''):\n self.selection_toggle(item)\n for child in self.get_children(item):\n self.select_toggle(child)\n",
"<docstring token>\n<import token>\n<class token>\n\n\nclass MultiSelector(ttk.Treeview):\n <docstring token>\n\n def __init__(self, master, values, *args, height=5, min_height=3, **kwargs\n ):\n self.frame_ = ttk.Frame(master=master)\n super().__init__(*args, master=self.frame_, show='tree', columns=[],\n height=max(3, min(len(values), height)), **kwargs)\n self.height_arg = height\n self.min_height_arg = min_height\n self.bind('<1>', self.on_click)\n self.button_frame = ttk.Frame(master=self.frame_)\n self.button_all = ttk.Button(master=self.button_frame, text='All',\n command=self.select_all)\n self.button_clear = ttk.Button(master=self.button_frame, text=\n 'Clear', command=self.select_clear)\n self.button_toggle = ttk.Button(master=self.button_frame, text=\n 'Toggle', command=self.select_toggle)\n self.button_frame.pack(side='bottom', fill='x')\n self.button_all.pack(side='left', fill='x', expand=True)\n self.button_clear.pack(side='left', fill='x', expand=True)\n self.button_toggle.pack(side='left', fill='x', expand=True)\n self.scrollbar_ = ttk.Scrollbar(master=self.frame_, orient=tk.\n VERTICAL, command=self.yview)\n self.configure(yscrollcommand=self.scrollbar_.set)\n self.scrollbar_.pack(side='right', expand=False, fill='y')\n self.pack(side='left', expand=True, fill='both')\n self.id_value_map = {}\n self.set_values(values)\n self.pack = self.frame_.pack\n self.grid = self.frame_.grid\n\n def adapt_display(self, item_number):\n height = max(self.min_height_arg, min(item_number, self.height_arg))\n self.config(height=height)\n\n def set_values(self, values):\n selection = set(self.get_selection())\n self.select_clear()\n self.delete(*self.get_children())\n self.id_value_map = {self.insert('', 'end', text=str(value)): value for\n value in values}\n self.set_selection(selection & set(values))\n self.adapt_display(len(values))\n\n def get_selection(self):\n \"\"\"\n Returns the selected element from the `values` passed to `__init__()`\n \"\"\"\n return [self.id_value_map[item] for item in self.selection() if \n item in self.id_value_map]\n\n def set_selection(self, values):\n \"\"\"\n Set the current selection from a subset of 'values' passed to __init__\n \"\"\"\n self.selection_set([item for item in self.get_children() if self.\n id_value_map[item] in values])\n <function token>\n\n def select_all(self):\n \"\"\"\n Select all items\n \"\"\"\n self.selection_add(*self.get_children())\n\n def select_clear(self):\n \"\"\"\n Deselect all items\n \"\"\"\n self.selection_remove(*self.get_children())\n\n def select_toggle(self):\n \"\"\"\n Toggle the selection of all items\n \"\"\"\n self.selection_toggle(*self.get_children())\n\n\nclass NestedMultiSelector(MultiSelector):\n \"\"\"\n Widget for multiselection with nested structures, such as a file hierarchy\n \"\"\"\n sep_char = '/'\n undefined_value = '__UNDEFINED__'\n\n def __init__(self, master, values, *args, height=13, min_height=7, **kwargs\n ):\n \"\"\"\n Arguments\n master -- parent widget\n values -- nested structure to select from. Either:\n - a nested dict, with key mapping to all sub-elements and leaves ammped to '{}'\n - an iterable of tuples, where an inner iterable represents a path from root to element\n \"\"\"\n super().__init__(master, values, *args, height=height, min_height=\n min_height, **kwargs)\n self.bind('<Double-Button-1>', self.on_double_click)\n self.click_job = None\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n\n def _deepen(self, flat_data):\n nested_mapping = {}\n for element in flat_data:\n node = nested_mapping\n for value in element:\n node = node.setdefault(value, {})\n return nested_mapping\n\n def _rec_insert(self, mapping, prefix=tuple(), parent=''):\n for key, value in mapping.items():\n item = self.insert(parent=parent, index='end', text=str(key),\n open=True)\n if value:\n self._rec_insert(value, prefix=prefix + (key,), parent=item)\n else:\n self.id_value_map[item] = prefix + (key,)\n\n def set_values(self, nested_values):\n selection = self.get_selection()\n if self.get_children():\n self.delete(*self.get_children())\n self.id_value_map = {}\n if isinstance(nested_values, Mapping):\n self._rec_insert(nested_values)\n else:\n self._rec_insert(self._deepen(nested_values))\n self.selection_add([item for item, value in self.id_value_map.items\n () if value in selection])\n self.adapt_display(len(nested_values))\n\n def get_selection(self):\n return [self.id_value_map[item] for item in self.selection() if \n item in self.id_value_map]\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n If the items is selected/deselected, all its childrens enter the same\n selection state\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return 'break'\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n\n def on_double_click(self, event):\n \"\"\"\n Open/Close the item\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if self.get_children(item):\n self.item(item, open=not self.item(item, 'open'))\n return 'break'\n\n def select_all(self, item=''):\n self.selection_add(item)\n for child in self.get_children(item):\n self.select_all(child)\n\n def select_clear(self, item=''):\n self.selection_remove(item)\n for child in self.get_children(item):\n self.select_clear(child)\n\n def select_toggle(self, item=''):\n self.selection_toggle(item)\n for child in self.get_children(item):\n self.select_toggle(child)\n",
"<docstring token>\n<import token>\n<class token>\n\n\nclass MultiSelector(ttk.Treeview):\n <docstring token>\n\n def __init__(self, master, values, *args, height=5, min_height=3, **kwargs\n ):\n self.frame_ = ttk.Frame(master=master)\n super().__init__(*args, master=self.frame_, show='tree', columns=[],\n height=max(3, min(len(values), height)), **kwargs)\n self.height_arg = height\n self.min_height_arg = min_height\n self.bind('<1>', self.on_click)\n self.button_frame = ttk.Frame(master=self.frame_)\n self.button_all = ttk.Button(master=self.button_frame, text='All',\n command=self.select_all)\n self.button_clear = ttk.Button(master=self.button_frame, text=\n 'Clear', command=self.select_clear)\n self.button_toggle = ttk.Button(master=self.button_frame, text=\n 'Toggle', command=self.select_toggle)\n self.button_frame.pack(side='bottom', fill='x')\n self.button_all.pack(side='left', fill='x', expand=True)\n self.button_clear.pack(side='left', fill='x', expand=True)\n self.button_toggle.pack(side='left', fill='x', expand=True)\n self.scrollbar_ = ttk.Scrollbar(master=self.frame_, orient=tk.\n VERTICAL, command=self.yview)\n self.configure(yscrollcommand=self.scrollbar_.set)\n self.scrollbar_.pack(side='right', expand=False, fill='y')\n self.pack(side='left', expand=True, fill='both')\n self.id_value_map = {}\n self.set_values(values)\n self.pack = self.frame_.pack\n self.grid = self.frame_.grid\n\n def adapt_display(self, item_number):\n height = max(self.min_height_arg, min(item_number, self.height_arg))\n self.config(height=height)\n\n def set_values(self, values):\n selection = set(self.get_selection())\n self.select_clear()\n self.delete(*self.get_children())\n self.id_value_map = {self.insert('', 'end', text=str(value)): value for\n value in values}\n self.set_selection(selection & set(values))\n self.adapt_display(len(values))\n <function token>\n\n def set_selection(self, values):\n \"\"\"\n Set the current selection from a subset of 'values' passed to __init__\n \"\"\"\n self.selection_set([item for item in self.get_children() if self.\n id_value_map[item] in values])\n <function token>\n\n def select_all(self):\n \"\"\"\n Select all items\n \"\"\"\n self.selection_add(*self.get_children())\n\n def select_clear(self):\n \"\"\"\n Deselect all items\n \"\"\"\n self.selection_remove(*self.get_children())\n\n def select_toggle(self):\n \"\"\"\n Toggle the selection of all items\n \"\"\"\n self.selection_toggle(*self.get_children())\n\n\nclass NestedMultiSelector(MultiSelector):\n \"\"\"\n Widget for multiselection with nested structures, such as a file hierarchy\n \"\"\"\n sep_char = '/'\n undefined_value = '__UNDEFINED__'\n\n def __init__(self, master, values, *args, height=13, min_height=7, **kwargs\n ):\n \"\"\"\n Arguments\n master -- parent widget\n values -- nested structure to select from. Either:\n - a nested dict, with key mapping to all sub-elements and leaves ammped to '{}'\n - an iterable of tuples, where an inner iterable represents a path from root to element\n \"\"\"\n super().__init__(master, values, *args, height=height, min_height=\n min_height, **kwargs)\n self.bind('<Double-Button-1>', self.on_double_click)\n self.click_job = None\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n\n def _deepen(self, flat_data):\n nested_mapping = {}\n for element in flat_data:\n node = nested_mapping\n for value in element:\n node = node.setdefault(value, {})\n return nested_mapping\n\n def _rec_insert(self, mapping, prefix=tuple(), parent=''):\n for key, value in mapping.items():\n item = self.insert(parent=parent, index='end', text=str(key),\n open=True)\n if value:\n self._rec_insert(value, prefix=prefix + (key,), parent=item)\n else:\n self.id_value_map[item] = prefix + (key,)\n\n def set_values(self, nested_values):\n selection = self.get_selection()\n if self.get_children():\n self.delete(*self.get_children())\n self.id_value_map = {}\n if isinstance(nested_values, Mapping):\n self._rec_insert(nested_values)\n else:\n self._rec_insert(self._deepen(nested_values))\n self.selection_add([item for item, value in self.id_value_map.items\n () if value in selection])\n self.adapt_display(len(nested_values))\n\n def get_selection(self):\n return [self.id_value_map[item] for item in self.selection() if \n item in self.id_value_map]\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n If the items is selected/deselected, all its childrens enter the same\n selection state\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return 'break'\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n\n def on_double_click(self, event):\n \"\"\"\n Open/Close the item\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if self.get_children(item):\n self.item(item, open=not self.item(item, 'open'))\n return 'break'\n\n def select_all(self, item=''):\n self.selection_add(item)\n for child in self.get_children(item):\n self.select_all(child)\n\n def select_clear(self, item=''):\n self.selection_remove(item)\n for child in self.get_children(item):\n self.select_clear(child)\n\n def select_toggle(self, item=''):\n self.selection_toggle(item)\n for child in self.get_children(item):\n self.select_toggle(child)\n",
"<docstring token>\n<import token>\n<class token>\n\n\nclass MultiSelector(ttk.Treeview):\n <docstring token>\n <function token>\n\n def adapt_display(self, item_number):\n height = max(self.min_height_arg, min(item_number, self.height_arg))\n self.config(height=height)\n\n def set_values(self, values):\n selection = set(self.get_selection())\n self.select_clear()\n self.delete(*self.get_children())\n self.id_value_map = {self.insert('', 'end', text=str(value)): value for\n value in values}\n self.set_selection(selection & set(values))\n self.adapt_display(len(values))\n <function token>\n\n def set_selection(self, values):\n \"\"\"\n Set the current selection from a subset of 'values' passed to __init__\n \"\"\"\n self.selection_set([item for item in self.get_children() if self.\n id_value_map[item] in values])\n <function token>\n\n def select_all(self):\n \"\"\"\n Select all items\n \"\"\"\n self.selection_add(*self.get_children())\n\n def select_clear(self):\n \"\"\"\n Deselect all items\n \"\"\"\n self.selection_remove(*self.get_children())\n\n def select_toggle(self):\n \"\"\"\n Toggle the selection of all items\n \"\"\"\n self.selection_toggle(*self.get_children())\n\n\nclass NestedMultiSelector(MultiSelector):\n \"\"\"\n Widget for multiselection with nested structures, such as a file hierarchy\n \"\"\"\n sep_char = '/'\n undefined_value = '__UNDEFINED__'\n\n def __init__(self, master, values, *args, height=13, min_height=7, **kwargs\n ):\n \"\"\"\n Arguments\n master -- parent widget\n values -- nested structure to select from. Either:\n - a nested dict, with key mapping to all sub-elements and leaves ammped to '{}'\n - an iterable of tuples, where an inner iterable represents a path from root to element\n \"\"\"\n super().__init__(master, values, *args, height=height, min_height=\n min_height, **kwargs)\n self.bind('<Double-Button-1>', self.on_double_click)\n self.click_job = None\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n\n def _deepen(self, flat_data):\n nested_mapping = {}\n for element in flat_data:\n node = nested_mapping\n for value in element:\n node = node.setdefault(value, {})\n return nested_mapping\n\n def _rec_insert(self, mapping, prefix=tuple(), parent=''):\n for key, value in mapping.items():\n item = self.insert(parent=parent, index='end', text=str(key),\n open=True)\n if value:\n self._rec_insert(value, prefix=prefix + (key,), parent=item)\n else:\n self.id_value_map[item] = prefix + (key,)\n\n def set_values(self, nested_values):\n selection = self.get_selection()\n if self.get_children():\n self.delete(*self.get_children())\n self.id_value_map = {}\n if isinstance(nested_values, Mapping):\n self._rec_insert(nested_values)\n else:\n self._rec_insert(self._deepen(nested_values))\n self.selection_add([item for item, value in self.id_value_map.items\n () if value in selection])\n self.adapt_display(len(nested_values))\n\n def get_selection(self):\n return [self.id_value_map[item] for item in self.selection() if \n item in self.id_value_map]\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n If the items is selected/deselected, all its childrens enter the same\n selection state\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return 'break'\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n\n def on_double_click(self, event):\n \"\"\"\n Open/Close the item\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if self.get_children(item):\n self.item(item, open=not self.item(item, 'open'))\n return 'break'\n\n def select_all(self, item=''):\n self.selection_add(item)\n for child in self.get_children(item):\n self.select_all(child)\n\n def select_clear(self, item=''):\n self.selection_remove(item)\n for child in self.get_children(item):\n self.select_clear(child)\n\n def select_toggle(self, item=''):\n self.selection_toggle(item)\n for child in self.get_children(item):\n self.select_toggle(child)\n",
"<docstring token>\n<import token>\n<class token>\n\n\nclass MultiSelector(ttk.Treeview):\n <docstring token>\n <function token>\n\n def adapt_display(self, item_number):\n height = max(self.min_height_arg, min(item_number, self.height_arg))\n self.config(height=height)\n\n def set_values(self, values):\n selection = set(self.get_selection())\n self.select_clear()\n self.delete(*self.get_children())\n self.id_value_map = {self.insert('', 'end', text=str(value)): value for\n value in values}\n self.set_selection(selection & set(values))\n self.adapt_display(len(values))\n <function token>\n\n def set_selection(self, values):\n \"\"\"\n Set the current selection from a subset of 'values' passed to __init__\n \"\"\"\n self.selection_set([item for item in self.get_children() if self.\n id_value_map[item] in values])\n <function token>\n\n def select_all(self):\n \"\"\"\n Select all items\n \"\"\"\n self.selection_add(*self.get_children())\n\n def select_clear(self):\n \"\"\"\n Deselect all items\n \"\"\"\n self.selection_remove(*self.get_children())\n <function token>\n\n\nclass NestedMultiSelector(MultiSelector):\n \"\"\"\n Widget for multiselection with nested structures, such as a file hierarchy\n \"\"\"\n sep_char = '/'\n undefined_value = '__UNDEFINED__'\n\n def __init__(self, master, values, *args, height=13, min_height=7, **kwargs\n ):\n \"\"\"\n Arguments\n master -- parent widget\n values -- nested structure to select from. Either:\n - a nested dict, with key mapping to all sub-elements and leaves ammped to '{}'\n - an iterable of tuples, where an inner iterable represents a path from root to element\n \"\"\"\n super().__init__(master, values, *args, height=height, min_height=\n min_height, **kwargs)\n self.bind('<Double-Button-1>', self.on_double_click)\n self.click_job = None\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n\n def _deepen(self, flat_data):\n nested_mapping = {}\n for element in flat_data:\n node = nested_mapping\n for value in element:\n node = node.setdefault(value, {})\n return nested_mapping\n\n def _rec_insert(self, mapping, prefix=tuple(), parent=''):\n for key, value in mapping.items():\n item = self.insert(parent=parent, index='end', text=str(key),\n open=True)\n if value:\n self._rec_insert(value, prefix=prefix + (key,), parent=item)\n else:\n self.id_value_map[item] = prefix + (key,)\n\n def set_values(self, nested_values):\n selection = self.get_selection()\n if self.get_children():\n self.delete(*self.get_children())\n self.id_value_map = {}\n if isinstance(nested_values, Mapping):\n self._rec_insert(nested_values)\n else:\n self._rec_insert(self._deepen(nested_values))\n self.selection_add([item for item, value in self.id_value_map.items\n () if value in selection])\n self.adapt_display(len(nested_values))\n\n def get_selection(self):\n return [self.id_value_map[item] for item in self.selection() if \n item in self.id_value_map]\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n If the items is selected/deselected, all its childrens enter the same\n selection state\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return 'break'\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n\n def on_double_click(self, event):\n \"\"\"\n Open/Close the item\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if self.get_children(item):\n self.item(item, open=not self.item(item, 'open'))\n return 'break'\n\n def select_all(self, item=''):\n self.selection_add(item)\n for child in self.get_children(item):\n self.select_all(child)\n\n def select_clear(self, item=''):\n self.selection_remove(item)\n for child in self.get_children(item):\n self.select_clear(child)\n\n def select_toggle(self, item=''):\n self.selection_toggle(item)\n for child in self.get_children(item):\n self.select_toggle(child)\n",
"<docstring token>\n<import token>\n<class token>\n\n\nclass MultiSelector(ttk.Treeview):\n <docstring token>\n <function token>\n <function token>\n\n def set_values(self, values):\n selection = set(self.get_selection())\n self.select_clear()\n self.delete(*self.get_children())\n self.id_value_map = {self.insert('', 'end', text=str(value)): value for\n value in values}\n self.set_selection(selection & set(values))\n self.adapt_display(len(values))\n <function token>\n\n def set_selection(self, values):\n \"\"\"\n Set the current selection from a subset of 'values' passed to __init__\n \"\"\"\n self.selection_set([item for item in self.get_children() if self.\n id_value_map[item] in values])\n <function token>\n\n def select_all(self):\n \"\"\"\n Select all items\n \"\"\"\n self.selection_add(*self.get_children())\n\n def select_clear(self):\n \"\"\"\n Deselect all items\n \"\"\"\n self.selection_remove(*self.get_children())\n <function token>\n\n\nclass NestedMultiSelector(MultiSelector):\n \"\"\"\n Widget for multiselection with nested structures, such as a file hierarchy\n \"\"\"\n sep_char = '/'\n undefined_value = '__UNDEFINED__'\n\n def __init__(self, master, values, *args, height=13, min_height=7, **kwargs\n ):\n \"\"\"\n Arguments\n master -- parent widget\n values -- nested structure to select from. Either:\n - a nested dict, with key mapping to all sub-elements and leaves ammped to '{}'\n - an iterable of tuples, where an inner iterable represents a path from root to element\n \"\"\"\n super().__init__(master, values, *args, height=height, min_height=\n min_height, **kwargs)\n self.bind('<Double-Button-1>', self.on_double_click)\n self.click_job = None\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n\n def _deepen(self, flat_data):\n nested_mapping = {}\n for element in flat_data:\n node = nested_mapping\n for value in element:\n node = node.setdefault(value, {})\n return nested_mapping\n\n def _rec_insert(self, mapping, prefix=tuple(), parent=''):\n for key, value in mapping.items():\n item = self.insert(parent=parent, index='end', text=str(key),\n open=True)\n if value:\n self._rec_insert(value, prefix=prefix + (key,), parent=item)\n else:\n self.id_value_map[item] = prefix + (key,)\n\n def set_values(self, nested_values):\n selection = self.get_selection()\n if self.get_children():\n self.delete(*self.get_children())\n self.id_value_map = {}\n if isinstance(nested_values, Mapping):\n self._rec_insert(nested_values)\n else:\n self._rec_insert(self._deepen(nested_values))\n self.selection_add([item for item, value in self.id_value_map.items\n () if value in selection])\n self.adapt_display(len(nested_values))\n\n def get_selection(self):\n return [self.id_value_map[item] for item in self.selection() if \n item in self.id_value_map]\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n If the items is selected/deselected, all its childrens enter the same\n selection state\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return 'break'\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n\n def on_double_click(self, event):\n \"\"\"\n Open/Close the item\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if self.get_children(item):\n self.item(item, open=not self.item(item, 'open'))\n return 'break'\n\n def select_all(self, item=''):\n self.selection_add(item)\n for child in self.get_children(item):\n self.select_all(child)\n\n def select_clear(self, item=''):\n self.selection_remove(item)\n for child in self.get_children(item):\n self.select_clear(child)\n\n def select_toggle(self, item=''):\n self.selection_toggle(item)\n for child in self.get_children(item):\n self.select_toggle(child)\n",
"<docstring token>\n<import token>\n<class token>\n\n\nclass MultiSelector(ttk.Treeview):\n <docstring token>\n <function token>\n <function token>\n\n def set_values(self, values):\n selection = set(self.get_selection())\n self.select_clear()\n self.delete(*self.get_children())\n self.id_value_map = {self.insert('', 'end', text=str(value)): value for\n value in values}\n self.set_selection(selection & set(values))\n self.adapt_display(len(values))\n <function token>\n\n def set_selection(self, values):\n \"\"\"\n Set the current selection from a subset of 'values' passed to __init__\n \"\"\"\n self.selection_set([item for item in self.get_children() if self.\n id_value_map[item] in values])\n <function token>\n <function token>\n\n def select_clear(self):\n \"\"\"\n Deselect all items\n \"\"\"\n self.selection_remove(*self.get_children())\n <function token>\n\n\nclass NestedMultiSelector(MultiSelector):\n \"\"\"\n Widget for multiselection with nested structures, such as a file hierarchy\n \"\"\"\n sep_char = '/'\n undefined_value = '__UNDEFINED__'\n\n def __init__(self, master, values, *args, height=13, min_height=7, **kwargs\n ):\n \"\"\"\n Arguments\n master -- parent widget\n values -- nested structure to select from. Either:\n - a nested dict, with key mapping to all sub-elements and leaves ammped to '{}'\n - an iterable of tuples, where an inner iterable represents a path from root to element\n \"\"\"\n super().__init__(master, values, *args, height=height, min_height=\n min_height, **kwargs)\n self.bind('<Double-Button-1>', self.on_double_click)\n self.click_job = None\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n\n def _deepen(self, flat_data):\n nested_mapping = {}\n for element in flat_data:\n node = nested_mapping\n for value in element:\n node = node.setdefault(value, {})\n return nested_mapping\n\n def _rec_insert(self, mapping, prefix=tuple(), parent=''):\n for key, value in mapping.items():\n item = self.insert(parent=parent, index='end', text=str(key),\n open=True)\n if value:\n self._rec_insert(value, prefix=prefix + (key,), parent=item)\n else:\n self.id_value_map[item] = prefix + (key,)\n\n def set_values(self, nested_values):\n selection = self.get_selection()\n if self.get_children():\n self.delete(*self.get_children())\n self.id_value_map = {}\n if isinstance(nested_values, Mapping):\n self._rec_insert(nested_values)\n else:\n self._rec_insert(self._deepen(nested_values))\n self.selection_add([item for item, value in self.id_value_map.items\n () if value in selection])\n self.adapt_display(len(nested_values))\n\n def get_selection(self):\n return [self.id_value_map[item] for item in self.selection() if \n item in self.id_value_map]\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n If the items is selected/deselected, all its childrens enter the same\n selection state\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return 'break'\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n\n def on_double_click(self, event):\n \"\"\"\n Open/Close the item\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if self.get_children(item):\n self.item(item, open=not self.item(item, 'open'))\n return 'break'\n\n def select_all(self, item=''):\n self.selection_add(item)\n for child in self.get_children(item):\n self.select_all(child)\n\n def select_clear(self, item=''):\n self.selection_remove(item)\n for child in self.get_children(item):\n self.select_clear(child)\n\n def select_toggle(self, item=''):\n self.selection_toggle(item)\n for child in self.get_children(item):\n self.select_toggle(child)\n",
"<docstring token>\n<import token>\n<class token>\n\n\nclass MultiSelector(ttk.Treeview):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def set_selection(self, values):\n \"\"\"\n Set the current selection from a subset of 'values' passed to __init__\n \"\"\"\n self.selection_set([item for item in self.get_children() if self.\n id_value_map[item] in values])\n <function token>\n <function token>\n\n def select_clear(self):\n \"\"\"\n Deselect all items\n \"\"\"\n self.selection_remove(*self.get_children())\n <function token>\n\n\nclass NestedMultiSelector(MultiSelector):\n \"\"\"\n Widget for multiselection with nested structures, such as a file hierarchy\n \"\"\"\n sep_char = '/'\n undefined_value = '__UNDEFINED__'\n\n def __init__(self, master, values, *args, height=13, min_height=7, **kwargs\n ):\n \"\"\"\n Arguments\n master -- parent widget\n values -- nested structure to select from. Either:\n - a nested dict, with key mapping to all sub-elements and leaves ammped to '{}'\n - an iterable of tuples, where an inner iterable represents a path from root to element\n \"\"\"\n super().__init__(master, values, *args, height=height, min_height=\n min_height, **kwargs)\n self.bind('<Double-Button-1>', self.on_double_click)\n self.click_job = None\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n\n def _deepen(self, flat_data):\n nested_mapping = {}\n for element in flat_data:\n node = nested_mapping\n for value in element:\n node = node.setdefault(value, {})\n return nested_mapping\n\n def _rec_insert(self, mapping, prefix=tuple(), parent=''):\n for key, value in mapping.items():\n item = self.insert(parent=parent, index='end', text=str(key),\n open=True)\n if value:\n self._rec_insert(value, prefix=prefix + (key,), parent=item)\n else:\n self.id_value_map[item] = prefix + (key,)\n\n def set_values(self, nested_values):\n selection = self.get_selection()\n if self.get_children():\n self.delete(*self.get_children())\n self.id_value_map = {}\n if isinstance(nested_values, Mapping):\n self._rec_insert(nested_values)\n else:\n self._rec_insert(self._deepen(nested_values))\n self.selection_add([item for item, value in self.id_value_map.items\n () if value in selection])\n self.adapt_display(len(nested_values))\n\n def get_selection(self):\n return [self.id_value_map[item] for item in self.selection() if \n item in self.id_value_map]\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n If the items is selected/deselected, all its childrens enter the same\n selection state\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return 'break'\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n\n def on_double_click(self, event):\n \"\"\"\n Open/Close the item\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if self.get_children(item):\n self.item(item, open=not self.item(item, 'open'))\n return 'break'\n\n def select_all(self, item=''):\n self.selection_add(item)\n for child in self.get_children(item):\n self.select_all(child)\n\n def select_clear(self, item=''):\n self.selection_remove(item)\n for child in self.get_children(item):\n self.select_clear(child)\n\n def select_toggle(self, item=''):\n self.selection_toggle(item)\n for child in self.get_children(item):\n self.select_toggle(child)\n",
"<docstring token>\n<import token>\n<class token>\n\n\nclass MultiSelector(ttk.Treeview):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def set_selection(self, values):\n \"\"\"\n Set the current selection from a subset of 'values' passed to __init__\n \"\"\"\n self.selection_set([item for item in self.get_children() if self.\n id_value_map[item] in values])\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass NestedMultiSelector(MultiSelector):\n \"\"\"\n Widget for multiselection with nested structures, such as a file hierarchy\n \"\"\"\n sep_char = '/'\n undefined_value = '__UNDEFINED__'\n\n def __init__(self, master, values, *args, height=13, min_height=7, **kwargs\n ):\n \"\"\"\n Arguments\n master -- parent widget\n values -- nested structure to select from. Either:\n - a nested dict, with key mapping to all sub-elements and leaves ammped to '{}'\n - an iterable of tuples, where an inner iterable represents a path from root to element\n \"\"\"\n super().__init__(master, values, *args, height=height, min_height=\n min_height, **kwargs)\n self.bind('<Double-Button-1>', self.on_double_click)\n self.click_job = None\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n\n def _deepen(self, flat_data):\n nested_mapping = {}\n for element in flat_data:\n node = nested_mapping\n for value in element:\n node = node.setdefault(value, {})\n return nested_mapping\n\n def _rec_insert(self, mapping, prefix=tuple(), parent=''):\n for key, value in mapping.items():\n item = self.insert(parent=parent, index='end', text=str(key),\n open=True)\n if value:\n self._rec_insert(value, prefix=prefix + (key,), parent=item)\n else:\n self.id_value_map[item] = prefix + (key,)\n\n def set_values(self, nested_values):\n selection = self.get_selection()\n if self.get_children():\n self.delete(*self.get_children())\n self.id_value_map = {}\n if isinstance(nested_values, Mapping):\n self._rec_insert(nested_values)\n else:\n self._rec_insert(self._deepen(nested_values))\n self.selection_add([item for item, value in self.id_value_map.items\n () if value in selection])\n self.adapt_display(len(nested_values))\n\n def get_selection(self):\n return [self.id_value_map[item] for item in self.selection() if \n item in self.id_value_map]\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n If the items is selected/deselected, all its childrens enter the same\n selection state\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return 'break'\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n\n def on_double_click(self, event):\n \"\"\"\n Open/Close the item\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if self.get_children(item):\n self.item(item, open=not self.item(item, 'open'))\n return 'break'\n\n def select_all(self, item=''):\n self.selection_add(item)\n for child in self.get_children(item):\n self.select_all(child)\n\n def select_clear(self, item=''):\n self.selection_remove(item)\n for child in self.get_children(item):\n self.select_clear(child)\n\n def select_toggle(self, item=''):\n self.selection_toggle(item)\n for child in self.get_children(item):\n self.select_toggle(child)\n",
"<docstring token>\n<import token>\n<class token>\n\n\nclass MultiSelector(ttk.Treeview):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass NestedMultiSelector(MultiSelector):\n \"\"\"\n Widget for multiselection with nested structures, such as a file hierarchy\n \"\"\"\n sep_char = '/'\n undefined_value = '__UNDEFINED__'\n\n def __init__(self, master, values, *args, height=13, min_height=7, **kwargs\n ):\n \"\"\"\n Arguments\n master -- parent widget\n values -- nested structure to select from. Either:\n - a nested dict, with key mapping to all sub-elements and leaves ammped to '{}'\n - an iterable of tuples, where an inner iterable represents a path from root to element\n \"\"\"\n super().__init__(master, values, *args, height=height, min_height=\n min_height, **kwargs)\n self.bind('<Double-Button-1>', self.on_double_click)\n self.click_job = None\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n\n def _deepen(self, flat_data):\n nested_mapping = {}\n for element in flat_data:\n node = nested_mapping\n for value in element:\n node = node.setdefault(value, {})\n return nested_mapping\n\n def _rec_insert(self, mapping, prefix=tuple(), parent=''):\n for key, value in mapping.items():\n item = self.insert(parent=parent, index='end', text=str(key),\n open=True)\n if value:\n self._rec_insert(value, prefix=prefix + (key,), parent=item)\n else:\n self.id_value_map[item] = prefix + (key,)\n\n def set_values(self, nested_values):\n selection = self.get_selection()\n if self.get_children():\n self.delete(*self.get_children())\n self.id_value_map = {}\n if isinstance(nested_values, Mapping):\n self._rec_insert(nested_values)\n else:\n self._rec_insert(self._deepen(nested_values))\n self.selection_add([item for item, value in self.id_value_map.items\n () if value in selection])\n self.adapt_display(len(nested_values))\n\n def get_selection(self):\n return [self.id_value_map[item] for item in self.selection() if \n item in self.id_value_map]\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n If the items is selected/deselected, all its childrens enter the same\n selection state\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return 'break'\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n\n def on_double_click(self, event):\n \"\"\"\n Open/Close the item\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if self.get_children(item):\n self.item(item, open=not self.item(item, 'open'))\n return 'break'\n\n def select_all(self, item=''):\n self.selection_add(item)\n for child in self.get_children(item):\n self.select_all(child)\n\n def select_clear(self, item=''):\n self.selection_remove(item)\n for child in self.get_children(item):\n self.select_clear(child)\n\n def select_toggle(self, item=''):\n self.selection_toggle(item)\n for child in self.get_children(item):\n self.select_toggle(child)\n",
"<docstring token>\n<import token>\n<class token>\n<class token>\n\n\nclass NestedMultiSelector(MultiSelector):\n \"\"\"\n Widget for multiselection with nested structures, such as a file hierarchy\n \"\"\"\n sep_char = '/'\n undefined_value = '__UNDEFINED__'\n\n def __init__(self, master, values, *args, height=13, min_height=7, **kwargs\n ):\n \"\"\"\n Arguments\n master -- parent widget\n values -- nested structure to select from. Either:\n - a nested dict, with key mapping to all sub-elements and leaves ammped to '{}'\n - an iterable of tuples, where an inner iterable represents a path from root to element\n \"\"\"\n super().__init__(master, values, *args, height=height, min_height=\n min_height, **kwargs)\n self.bind('<Double-Button-1>', self.on_double_click)\n self.click_job = None\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n\n def _deepen(self, flat_data):\n nested_mapping = {}\n for element in flat_data:\n node = nested_mapping\n for value in element:\n node = node.setdefault(value, {})\n return nested_mapping\n\n def _rec_insert(self, mapping, prefix=tuple(), parent=''):\n for key, value in mapping.items():\n item = self.insert(parent=parent, index='end', text=str(key),\n open=True)\n if value:\n self._rec_insert(value, prefix=prefix + (key,), parent=item)\n else:\n self.id_value_map[item] = prefix + (key,)\n\n def set_values(self, nested_values):\n selection = self.get_selection()\n if self.get_children():\n self.delete(*self.get_children())\n self.id_value_map = {}\n if isinstance(nested_values, Mapping):\n self._rec_insert(nested_values)\n else:\n self._rec_insert(self._deepen(nested_values))\n self.selection_add([item for item, value in self.id_value_map.items\n () if value in selection])\n self.adapt_display(len(nested_values))\n\n def get_selection(self):\n return [self.id_value_map[item] for item in self.selection() if \n item in self.id_value_map]\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n If the items is selected/deselected, all its childrens enter the same\n selection state\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return 'break'\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n\n def on_double_click(self, event):\n \"\"\"\n Open/Close the item\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if self.get_children(item):\n self.item(item, open=not self.item(item, 'open'))\n return 'break'\n\n def select_all(self, item=''):\n self.selection_add(item)\n for child in self.get_children(item):\n self.select_all(child)\n\n def select_clear(self, item=''):\n self.selection_remove(item)\n for child in self.get_children(item):\n self.select_clear(child)\n\n def select_toggle(self, item=''):\n self.selection_toggle(item)\n for child in self.get_children(item):\n self.select_toggle(child)\n",
"<docstring token>\n<import token>\n<class token>\n<class token>\n\n\nclass NestedMultiSelector(MultiSelector):\n <docstring token>\n sep_char = '/'\n undefined_value = '__UNDEFINED__'\n\n def __init__(self, master, values, *args, height=13, min_height=7, **kwargs\n ):\n \"\"\"\n Arguments\n master -- parent widget\n values -- nested structure to select from. Either:\n - a nested dict, with key mapping to all sub-elements and leaves ammped to '{}'\n - an iterable of tuples, where an inner iterable represents a path from root to element\n \"\"\"\n super().__init__(master, values, *args, height=height, min_height=\n min_height, **kwargs)\n self.bind('<Double-Button-1>', self.on_double_click)\n self.click_job = None\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n\n def _deepen(self, flat_data):\n nested_mapping = {}\n for element in flat_data:\n node = nested_mapping\n for value in element:\n node = node.setdefault(value, {})\n return nested_mapping\n\n def _rec_insert(self, mapping, prefix=tuple(), parent=''):\n for key, value in mapping.items():\n item = self.insert(parent=parent, index='end', text=str(key),\n open=True)\n if value:\n self._rec_insert(value, prefix=prefix + (key,), parent=item)\n else:\n self.id_value_map[item] = prefix + (key,)\n\n def set_values(self, nested_values):\n selection = self.get_selection()\n if self.get_children():\n self.delete(*self.get_children())\n self.id_value_map = {}\n if isinstance(nested_values, Mapping):\n self._rec_insert(nested_values)\n else:\n self._rec_insert(self._deepen(nested_values))\n self.selection_add([item for item, value in self.id_value_map.items\n () if value in selection])\n self.adapt_display(len(nested_values))\n\n def get_selection(self):\n return [self.id_value_map[item] for item in self.selection() if \n item in self.id_value_map]\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n If the items is selected/deselected, all its childrens enter the same\n selection state\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return 'break'\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n\n def on_double_click(self, event):\n \"\"\"\n Open/Close the item\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if self.get_children(item):\n self.item(item, open=not self.item(item, 'open'))\n return 'break'\n\n def select_all(self, item=''):\n self.selection_add(item)\n for child in self.get_children(item):\n self.select_all(child)\n\n def select_clear(self, item=''):\n self.selection_remove(item)\n for child in self.get_children(item):\n self.select_clear(child)\n\n def select_toggle(self, item=''):\n self.selection_toggle(item)\n for child in self.get_children(item):\n self.select_toggle(child)\n",
"<docstring token>\n<import token>\n<class token>\n<class token>\n\n\nclass NestedMultiSelector(MultiSelector):\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, master, values, *args, height=13, min_height=7, **kwargs\n ):\n \"\"\"\n Arguments\n master -- parent widget\n values -- nested structure to select from. Either:\n - a nested dict, with key mapping to all sub-elements and leaves ammped to '{}'\n - an iterable of tuples, where an inner iterable represents a path from root to element\n \"\"\"\n super().__init__(master, values, *args, height=height, min_height=\n min_height, **kwargs)\n self.bind('<Double-Button-1>', self.on_double_click)\n self.click_job = None\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n\n def _deepen(self, flat_data):\n nested_mapping = {}\n for element in flat_data:\n node = nested_mapping\n for value in element:\n node = node.setdefault(value, {})\n return nested_mapping\n\n def _rec_insert(self, mapping, prefix=tuple(), parent=''):\n for key, value in mapping.items():\n item = self.insert(parent=parent, index='end', text=str(key),\n open=True)\n if value:\n self._rec_insert(value, prefix=prefix + (key,), parent=item)\n else:\n self.id_value_map[item] = prefix + (key,)\n\n def set_values(self, nested_values):\n selection = self.get_selection()\n if self.get_children():\n self.delete(*self.get_children())\n self.id_value_map = {}\n if isinstance(nested_values, Mapping):\n self._rec_insert(nested_values)\n else:\n self._rec_insert(self._deepen(nested_values))\n self.selection_add([item for item, value in self.id_value_map.items\n () if value in selection])\n self.adapt_display(len(nested_values))\n\n def get_selection(self):\n return [self.id_value_map[item] for item in self.selection() if \n item in self.id_value_map]\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n If the items is selected/deselected, all its childrens enter the same\n selection state\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return 'break'\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n\n def on_double_click(self, event):\n \"\"\"\n Open/Close the item\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if self.get_children(item):\n self.item(item, open=not self.item(item, 'open'))\n return 'break'\n\n def select_all(self, item=''):\n self.selection_add(item)\n for child in self.get_children(item):\n self.select_all(child)\n\n def select_clear(self, item=''):\n self.selection_remove(item)\n for child in self.get_children(item):\n self.select_clear(child)\n\n def select_toggle(self, item=''):\n self.selection_toggle(item)\n for child in self.get_children(item):\n self.select_toggle(child)\n",
"<docstring token>\n<import token>\n<class token>\n<class token>\n\n\nclass NestedMultiSelector(MultiSelector):\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, master, values, *args, height=13, min_height=7, **kwargs\n ):\n \"\"\"\n Arguments\n master -- parent widget\n values -- nested structure to select from. Either:\n - a nested dict, with key mapping to all sub-elements and leaves ammped to '{}'\n - an iterable of tuples, where an inner iterable represents a path from root to element\n \"\"\"\n super().__init__(master, values, *args, height=height, min_height=\n min_height, **kwargs)\n self.bind('<Double-Button-1>', self.on_double_click)\n self.click_job = None\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n\n def _deepen(self, flat_data):\n nested_mapping = {}\n for element in flat_data:\n node = nested_mapping\n for value in element:\n node = node.setdefault(value, {})\n return nested_mapping\n\n def _rec_insert(self, mapping, prefix=tuple(), parent=''):\n for key, value in mapping.items():\n item = self.insert(parent=parent, index='end', text=str(key),\n open=True)\n if value:\n self._rec_insert(value, prefix=prefix + (key,), parent=item)\n else:\n self.id_value_map[item] = prefix + (key,)\n\n def set_values(self, nested_values):\n selection = self.get_selection()\n if self.get_children():\n self.delete(*self.get_children())\n self.id_value_map = {}\n if isinstance(nested_values, Mapping):\n self._rec_insert(nested_values)\n else:\n self._rec_insert(self._deepen(nested_values))\n self.selection_add([item for item, value in self.id_value_map.items\n () if value in selection])\n self.adapt_display(len(nested_values))\n\n def get_selection(self):\n return [self.id_value_map[item] for item in self.selection() if \n item in self.id_value_map]\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n If the items is selected/deselected, all its childrens enter the same\n selection state\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return 'break'\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n\n def on_double_click(self, event):\n \"\"\"\n Open/Close the item\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if self.get_children(item):\n self.item(item, open=not self.item(item, 'open'))\n return 'break'\n\n def select_all(self, item=''):\n self.selection_add(item)\n for child in self.get_children(item):\n self.select_all(child)\n <function token>\n\n def select_toggle(self, item=''):\n self.selection_toggle(item)\n for child in self.get_children(item):\n self.select_toggle(child)\n",
"<docstring token>\n<import token>\n<class token>\n<class token>\n\n\nclass NestedMultiSelector(MultiSelector):\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, master, values, *args, height=13, min_height=7, **kwargs\n ):\n \"\"\"\n Arguments\n master -- parent widget\n values -- nested structure to select from. Either:\n - a nested dict, with key mapping to all sub-elements and leaves ammped to '{}'\n - an iterable of tuples, where an inner iterable represents a path from root to element\n \"\"\"\n super().__init__(master, values, *args, height=height, min_height=\n min_height, **kwargs)\n self.bind('<Double-Button-1>', self.on_double_click)\n self.click_job = None\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n <function token>\n\n def _rec_insert(self, mapping, prefix=tuple(), parent=''):\n for key, value in mapping.items():\n item = self.insert(parent=parent, index='end', text=str(key),\n open=True)\n if value:\n self._rec_insert(value, prefix=prefix + (key,), parent=item)\n else:\n self.id_value_map[item] = prefix + (key,)\n\n def set_values(self, nested_values):\n selection = self.get_selection()\n if self.get_children():\n self.delete(*self.get_children())\n self.id_value_map = {}\n if isinstance(nested_values, Mapping):\n self._rec_insert(nested_values)\n else:\n self._rec_insert(self._deepen(nested_values))\n self.selection_add([item for item, value in self.id_value_map.items\n () if value in selection])\n self.adapt_display(len(nested_values))\n\n def get_selection(self):\n return [self.id_value_map[item] for item in self.selection() if \n item in self.id_value_map]\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n If the items is selected/deselected, all its childrens enter the same\n selection state\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return 'break'\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n\n def on_double_click(self, event):\n \"\"\"\n Open/Close the item\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if self.get_children(item):\n self.item(item, open=not self.item(item, 'open'))\n return 'break'\n\n def select_all(self, item=''):\n self.selection_add(item)\n for child in self.get_children(item):\n self.select_all(child)\n <function token>\n\n def select_toggle(self, item=''):\n self.selection_toggle(item)\n for child in self.get_children(item):\n self.select_toggle(child)\n",
"<docstring token>\n<import token>\n<class token>\n<class token>\n\n\nclass NestedMultiSelector(MultiSelector):\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, master, values, *args, height=13, min_height=7, **kwargs\n ):\n \"\"\"\n Arguments\n master -- parent widget\n values -- nested structure to select from. Either:\n - a nested dict, with key mapping to all sub-elements and leaves ammped to '{}'\n - an iterable of tuples, where an inner iterable represents a path from root to element\n \"\"\"\n super().__init__(master, values, *args, height=height, min_height=\n min_height, **kwargs)\n self.bind('<Double-Button-1>', self.on_double_click)\n self.click_job = None\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n <function token>\n\n def _rec_insert(self, mapping, prefix=tuple(), parent=''):\n for key, value in mapping.items():\n item = self.insert(parent=parent, index='end', text=str(key),\n open=True)\n if value:\n self._rec_insert(value, prefix=prefix + (key,), parent=item)\n else:\n self.id_value_map[item] = prefix + (key,)\n\n def set_values(self, nested_values):\n selection = self.get_selection()\n if self.get_children():\n self.delete(*self.get_children())\n self.id_value_map = {}\n if isinstance(nested_values, Mapping):\n self._rec_insert(nested_values)\n else:\n self._rec_insert(self._deepen(nested_values))\n self.selection_add([item for item, value in self.id_value_map.items\n () if value in selection])\n self.adapt_display(len(nested_values))\n <function token>\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n If the items is selected/deselected, all its childrens enter the same\n selection state\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return 'break'\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n\n def on_double_click(self, event):\n \"\"\"\n Open/Close the item\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if self.get_children(item):\n self.item(item, open=not self.item(item, 'open'))\n return 'break'\n\n def select_all(self, item=''):\n self.selection_add(item)\n for child in self.get_children(item):\n self.select_all(child)\n <function token>\n\n def select_toggle(self, item=''):\n self.selection_toggle(item)\n for child in self.get_children(item):\n self.select_toggle(child)\n",
"<docstring token>\n<import token>\n<class token>\n<class token>\n\n\nclass NestedMultiSelector(MultiSelector):\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, master, values, *args, height=13, min_height=7, **kwargs\n ):\n \"\"\"\n Arguments\n master -- parent widget\n values -- nested structure to select from. Either:\n - a nested dict, with key mapping to all sub-elements and leaves ammped to '{}'\n - an iterable of tuples, where an inner iterable represents a path from root to element\n \"\"\"\n super().__init__(master, values, *args, height=height, min_height=\n min_height, **kwargs)\n self.bind('<Double-Button-1>', self.on_double_click)\n self.click_job = None\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n <function token>\n\n def _rec_insert(self, mapping, prefix=tuple(), parent=''):\n for key, value in mapping.items():\n item = self.insert(parent=parent, index='end', text=str(key),\n open=True)\n if value:\n self._rec_insert(value, prefix=prefix + (key,), parent=item)\n else:\n self.id_value_map[item] = prefix + (key,)\n <function token>\n <function token>\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n If the items is selected/deselected, all its childrens enter the same\n selection state\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return 'break'\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n\n def on_double_click(self, event):\n \"\"\"\n Open/Close the item\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if self.get_children(item):\n self.item(item, open=not self.item(item, 'open'))\n return 'break'\n\n def select_all(self, item=''):\n self.selection_add(item)\n for child in self.get_children(item):\n self.select_all(child)\n <function token>\n\n def select_toggle(self, item=''):\n self.selection_toggle(item)\n for child in self.get_children(item):\n self.select_toggle(child)\n",
"<docstring token>\n<import token>\n<class token>\n<class token>\n\n\nclass NestedMultiSelector(MultiSelector):\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, master, values, *args, height=13, min_height=7, **kwargs\n ):\n \"\"\"\n Arguments\n master -- parent widget\n values -- nested structure to select from. Either:\n - a nested dict, with key mapping to all sub-elements and leaves ammped to '{}'\n - an iterable of tuples, where an inner iterable represents a path from root to element\n \"\"\"\n super().__init__(master, values, *args, height=height, min_height=\n min_height, **kwargs)\n self.bind('<Double-Button-1>', self.on_double_click)\n self.click_job = None\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n <function token>\n\n def _rec_insert(self, mapping, prefix=tuple(), parent=''):\n for key, value in mapping.items():\n item = self.insert(parent=parent, index='end', text=str(key),\n open=True)\n if value:\n self._rec_insert(value, prefix=prefix + (key,), parent=item)\n else:\n self.id_value_map[item] = prefix + (key,)\n <function token>\n <function token>\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n If the items is selected/deselected, all its childrens enter the same\n selection state\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return 'break'\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n\n def on_double_click(self, event):\n \"\"\"\n Open/Close the item\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if self.get_children(item):\n self.item(item, open=not self.item(item, 'open'))\n return 'break'\n <function token>\n <function token>\n\n def select_toggle(self, item=''):\n self.selection_toggle(item)\n for child in self.get_children(item):\n self.select_toggle(child)\n",
"<docstring token>\n<import token>\n<class token>\n<class token>\n\n\nclass NestedMultiSelector(MultiSelector):\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, master, values, *args, height=13, min_height=7, **kwargs\n ):\n \"\"\"\n Arguments\n master -- parent widget\n values -- nested structure to select from. Either:\n - a nested dict, with key mapping to all sub-elements and leaves ammped to '{}'\n - an iterable of tuples, where an inner iterable represents a path from root to element\n \"\"\"\n super().__init__(master, values, *args, height=height, min_height=\n min_height, **kwargs)\n self.bind('<Double-Button-1>', self.on_double_click)\n self.click_job = None\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n <function token>\n <function token>\n <function token>\n <function token>\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n If the items is selected/deselected, all its childrens enter the same\n selection state\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return 'break'\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n\n def on_double_click(self, event):\n \"\"\"\n Open/Close the item\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if self.get_children(item):\n self.item(item, open=not self.item(item, 'open'))\n return 'break'\n <function token>\n <function token>\n\n def select_toggle(self, item=''):\n self.selection_toggle(item)\n for child in self.get_children(item):\n self.select_toggle(child)\n",
"<docstring token>\n<import token>\n<class token>\n<class token>\n\n\nclass NestedMultiSelector(MultiSelector):\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, master, values, *args, height=13, min_height=7, **kwargs\n ):\n \"\"\"\n Arguments\n master -- parent widget\n values -- nested structure to select from. Either:\n - a nested dict, with key mapping to all sub-elements and leaves ammped to '{}'\n - an iterable of tuples, where an inner iterable represents a path from root to element\n \"\"\"\n super().__init__(master, values, *args, height=height, min_height=\n min_height, **kwargs)\n self.bind('<Double-Button-1>', self.on_double_click)\n self.click_job = None\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n <function token>\n <function token>\n <function token>\n <function token>\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n If the items is selected/deselected, all its childrens enter the same\n selection state\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return 'break'\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n\n def on_double_click(self, event):\n \"\"\"\n Open/Close the item\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if self.get_children(item):\n self.item(item, open=not self.item(item, 'open'))\n return 'break'\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<class token>\n<class token>\n\n\nclass NestedMultiSelector(MultiSelector):\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n <function token>\n <function token>\n <function token>\n <function token>\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n If the items is selected/deselected, all its childrens enter the same\n selection state\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return 'break'\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n\n def on_double_click(self, event):\n \"\"\"\n Open/Close the item\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if self.get_children(item):\n self.item(item, open=not self.item(item, 'open'))\n return 'break'\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<class token>\n<class token>\n\n\nclass NestedMultiSelector(MultiSelector):\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n <function token>\n <function token>\n <function token>\n <function token>\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n\n def on_click(self, event):\n \"\"\"\n Toggle the selection of an item that is clicked on instead of\n the default behavior that is to select only that item\n If the items is selected/deselected, all its childrens enter the same\n selection state\n \"\"\"\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify('item', event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return 'break'\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<class token>\n<class token>\n\n\nclass NestedMultiSelector(MultiSelector):\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n <function token>\n <function token>\n <function token>\n <function token>\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n <function token>\n\n def clicked(self, item):\n if item in self.selection():\n self.select_clear(item)\n else:\n self.select_all(item)\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<class token>\n<class token>\n\n\nclass NestedMultiSelector(MultiSelector):\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n\n def _flatten_dfs(self, mapping, prefix=tuple(), flattened=None):\n flat_values = flattened or []\n for key, value in mapping.items():\n if value:\n self._flatten_dfs(mapping=value, prefix=prefix + (key,),\n flattened=flat_values)\n else:\n flat_values.append(prefix + (key,))\n return flat_values\n <function token>\n <function token>\n <function token>\n <function token>\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<class token>\n<class token>\n\n\nclass NestedMultiSelector(MultiSelector):\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def set_selection(self, nested_values):\n if isinstance(nested_values, Mapping):\n nested_values = self._flatten_dfs(nested_values)\n self.selection_set([item for item, value in self.id_value_map.items\n () if value in nested_values])\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<class token>\n<class token>\n\n\nclass NestedMultiSelector(MultiSelector):\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<class token>\n<class token>\n<class token>\n"
] | false |
99,625 |
ecb262c19c988f37ed398da7516ac4a21b85cdc5
|
# coding: utf-8
# # Ham Spam Filter for Mobile Phone Messsages
# In[2]:
# import the dataset containing messages along with their ham spam labels
import pandas as pd
df=pd.read_csv("https://cdn.rawgit.com/aviram2308/vp-hamspam/af94d24e/spam%20(1).csv", encoding="ISO-8859-1")
# In[3]:
df.head()
# In[4]:
df=df.iloc[:,0:2]
df.head()
# In[5]:
df=df.rename(columns={'v1':'Label', 'v2':'Message'})
df.head()
# In[6]:
# converting character categorical to numeric categorical
df['Label_num'] = df.Label.map({'ham':0, 'spam':1})
df.head(10)
# In[7]:
#extracting feature matrix and response vector
X=df.Message
y=df.Label_num
# In[8]:
# split X and y into training and testing sets
from sklearn import model_selection as ms
X_train, X_test, y_train, y_test = ms.train_test_split(X, y, random_state=1)
# In[9]:
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
# In[10]:
# import and instantiate CountVectorizer (with the default parameters)# impor
from sklearn.feature_extraction.text import CountVectorizer
# In[11]:
vect = CountVectorizer(min_df=0.04,max_df=.96)
# In[12]:
# learn the 'vocabulary' of the training data (occurs in-place)
vect.fit(X_train)
# In[13]:
# transform training data into a 'document-term matrix'
dtm_train=vect.transform(X_train)
dtm_test=vect.transform(X_test)
type(dtm_train)
# In[14]:
#Comparing different models/estimators
from sklearn import linear_model as lm
est1=lm.LogisticRegression()
est1.fit(dtm_train,y_train)
# In[15]:
# checking if a given message is ham(0) or spam(1)
dtm_3=vect.transform(['Data cleansing is hard to do, hard to maintain, hard to know where to start'])
est1.predict(dtm_3)
# In[16]:
dtm_3=vect.transform(['Hello, plz do this'])
est1.predict(dtm_3)
# In[17]:
dtm_3=vect.transform(["The Facebook Team wish to inform you that you are one of the Lucky winners from your Region (PAKISTAN) in this year's Online Promotion and your Facebook account has won the sum of EIGHT HUNDRED THOUSAND GREAT BRITISH POUNDS (£800,000 GBP)"])
est1.predict(dtm_3)
# In[18]:
#calculating y predicted and y predicted probability for estimator 1
y_pred=est1.predict(dtm_test)
y_pred_prob=est1.predict_proba(dtm_test)
y_pred_prob.shape
# In[19]:
# calculating score for estimator 1(sc1) and roc score(scp1)
from sklearn import metrics as mt
# In[20]:
sc1=mt.accuracy_score(y_test,y_pred)
sc1
# In[21]:
scp1=mt.roc_auc_score(y_test,y_pred_prob[:,1])
scp1
# In[22]:
# import and instantiate a Multinomial Naive Bayes model
from sklearn.naive_bayes import MultinomialNB
est2= MultinomialNB()
# In[23]:
est2.fit(dtm_train,y_train)
# In[24]:
dtm_3=vect.transform(['Data cleansing is hard to do, hard to maintain, hard to know where to start'])
est2.predict(dtm_3)
# In[25]:
dtm_3=vect.transform(['Hello, plz do this'])
est2.predict(dtm_3)
# In[26]:
dtm_3=vect.transform(["The Facebook Team wish to inform you that you are one of the Lucky winners from your Region (PAKISTAN) in this year's Online Promotion and your Facebook account has won the sum of EIGHT HUNDRED THOUSAND GREAT BRITISH POUNDS (£800,000 GBP)"])
est2.predict(dtm_3)
# In[27]:
y_pred2=est2.predict(dtm_test)
y_pred_prob2=est2.predict_proba(dtm_test)
y_pred_prob2.shape
# In[28]:
from sklearn import metrics as mt
sc2=mt.accuracy_score(y_test,y_pred2)
sc2
# In[29]:
scp2=mt.roc_auc_score(y_test,y_pred_prob2[:,1])
scp2
# In[30]:
# instantiate random forest model with default paramter
from sklearn import ensemble as eb
est3=eb.RandomForestClassifier(random_state=5)
# In[31]:
est3.fit(dtm_train,y_train)
# In[32]:
dtm_3=vect.transform(['Data cleansing is hard to do, hard to maintain, hard to know where to start'])
est3.predict(dtm_3)
# In[33]:
dtm_3=vect.transform(['Hello, plz do this'])
est3.predict(dtm_3)
# In[34]:
dtm_3=vect.transform(["The Facebook Team wish to inform you that you are one of the Lucky winners from your Region (PAKISTAN) in this year's Online Promotion and your Facebook account has won the sum of EIGHT HUNDRED THOUSAND GREAT BRITISH POUNDS (£800,000 GBP)"])
est3.predict(dtm_3)
# In[35]:
y_pred3=est3.predict(dtm_test)
y_pred_prob3=est3.predict_proba(dtm_test)
y_pred_prob3.shape
# In[36]:
from sklearn import metrics as mt
sc3=mt.accuracy_score(y_test,y_pred3)
sc3
# In[37]:
scp3=mt.roc_auc_score(y_test,y_pred_prob3[:,1])
scp3
# In[38]:
# from calculated scores, we select random forest model
# To tune parameters of random forest model, make parameter grid(pg)
n=[5,10,20,40]
c=['gini', 'entropy']
m=[5,20,25,30]
m2=[.2,.1,.05]
# In[39]:
pg=dict(n_estimators=n, criterion=c, max_features=m)
# In[40]:
#make a score grid using random search cross validation
#grid search cross validation does exhaustive search and takes a lot of computational resource
#first score grid is made using 10 iterations, thus, sg10
sg10=ms.RandomizedSearchCV(est3,pg,cv=10,scoring='accuracy', n_iter=10,random_state=5)
# In[41]:
dtm=vect.transform(X)
sg10.fit(dtm,y)
# In[42]:
sg10.best_score_
# In[43]:
sg20=ms.RandomizedSearchCV(est3,pg,cv=10,scoring='accuracy', n_iter=20,random_state=5)
# In[44]:
sg20.fit(dtm,y)
# In[45]:
sg20.best_score_
# In[46]:
sg25=ms.RandomizedSearchCV(est3,pg,cv=10,scoring='accuracy', n_iter=25,random_state=5)
# In[47]:
sg25.fit(dtm,y)
# In[48]:
sg25.best_score_
# In[49]:
sg20.best_params_
# In[50]:
sg25.best_params_
# In[51]:
# creating a model with tuned parameters
est4=eb.RandomForestClassifier(criterion= 'entropy', max_features= 5, n_estimators= 40, random_state=5)
# In[52]:
est4.fit(dtm_train,y_train)
# In[53]:
y_pred4=est4.predict(dtm_test)
y_pred_prob4=est4.predict_proba(dtm_test)
# In[54]:
sc4=mt.accuracy_score(y_test,y_pred4)
sc4
# In[55]:
scp4=mt.roc_auc_score(y_test,y_pred_prob4[:,1])
scp4
# In[58]:
# calculating sensitivities of all models
print(mt.recall_score(y_test,y_pred))
print(mt.recall_score(y_test,y_pred2))
print(mt.recall_score(y_test,y_pred3))
print(mt.recall_score(y_test,y_pred4))
# In[59]:
# for spam detection type 1 error more important than type 2
# therefore specificity more important than sensitivity
#create confusion matrix for all models
cm=mt.confusion_matrix(y_test,y_pred)
cm2=mt.confusion_matrix(y_test,y_pred2)
cm3=mt.confusion_matrix(y_test,y_pred3)
cm4=mt.confusion_matrix(y_test,y_pred4)
# In[61]:
# calculating specificity for all models using confusion matrix of the respective model
sp1=cm[0,0]/(cm[0,0]+cm[0,1])
print(sp1)
sp2=cm2[0,0]/(cm2[0,0]+cm2[0,1])
print(sp2)
sp3=cm3[0,0]/(cm3[0,0]+cm3[0,1])
print(sp3)
sp4=cm4[0,0]/(cm4[0,0]+cm4[0,1])
print(sp4)
# In[65]:
se4=mt.recall_score(y_test,y_pred4)
# In[66]:
# choosing estimator 4 with following optimal metrics
print('Accuracy score: ',sc4)
print('ROC AUC score: ',scp4)
print('Sensitivity: ',se4)
print('Specificity: ',sp4)
|
[
"\n# coding: utf-8\n\n# # Ham Spam Filter for Mobile Phone Messsages\n\n# In[2]:\n\n\n# import the dataset containing messages along with their ham spam labels\nimport pandas as pd\ndf=pd.read_csv(\"https://cdn.rawgit.com/aviram2308/vp-hamspam/af94d24e/spam%20(1).csv\", encoding=\"ISO-8859-1\")\n\n\n# In[3]:\n\n\ndf.head()\n\n\n# In[4]:\n\n\ndf=df.iloc[:,0:2]\ndf.head()\n\n\n# In[5]:\n\n\ndf=df.rename(columns={'v1':'Label', 'v2':'Message'})\ndf.head()\n\n\n# In[6]:\n\n\n# converting character categorical to numeric categorical\ndf['Label_num'] = df.Label.map({'ham':0, 'spam':1})\ndf.head(10)\n\n\n# In[7]:\n\n\n#extracting feature matrix and response vector\nX=df.Message\ny=df.Label_num\n\n\n# In[8]:\n\n\n# split X and y into training and testing sets\nfrom sklearn import model_selection as ms\nX_train, X_test, y_train, y_test = ms.train_test_split(X, y, random_state=1)\n\n\n# In[9]:\n\n\nprint(X_train.shape)\nprint(X_test.shape)\nprint(y_train.shape)\nprint(y_test.shape)\n\n\n# In[10]:\n\n\n# import and instantiate CountVectorizer (with the default parameters)# impor \nfrom sklearn.feature_extraction.text import CountVectorizer\n\n\n# In[11]:\n\n\nvect = CountVectorizer(min_df=0.04,max_df=.96)\n\n\n# In[12]:\n\n\n# learn the 'vocabulary' of the training data (occurs in-place)\nvect.fit(X_train)\n\n\n# In[13]:\n\n\n# transform training data into a 'document-term matrix'\ndtm_train=vect.transform(X_train)\ndtm_test=vect.transform(X_test)\ntype(dtm_train)\n\n\n# In[14]:\n\n\n#Comparing different models/estimators\nfrom sklearn import linear_model as lm\nest1=lm.LogisticRegression()\nest1.fit(dtm_train,y_train)\n\n\n# In[15]:\n\n\n# checking if a given message is ham(0) or spam(1)\ndtm_3=vect.transform(['Data cleansing is hard to do, hard to maintain, hard to know where to start'])\nest1.predict(dtm_3)\n\n\n# In[16]:\n\n\ndtm_3=vect.transform(['Hello, plz do this'])\nest1.predict(dtm_3)\n\n\n# In[17]:\n\n\ndtm_3=vect.transform([\"The Facebook Team wish to inform you that you are one of the Lucky winners from your Region (PAKISTAN) in this year's Online Promotion and your Facebook account has won the sum of EIGHT HUNDRED THOUSAND GREAT BRITISH POUNDS (£800,000 GBP)\"])\nest1.predict(dtm_3)\n\n\n# In[18]:\n\n\n#calculating y predicted and y predicted probability for estimator 1\ny_pred=est1.predict(dtm_test)\ny_pred_prob=est1.predict_proba(dtm_test)\ny_pred_prob.shape\n\n\n# In[19]:\n\n\n# calculating score for estimator 1(sc1) and roc score(scp1)\nfrom sklearn import metrics as mt\n\n\n# In[20]:\n\n\nsc1=mt.accuracy_score(y_test,y_pred)\nsc1\n\n\n# In[21]:\n\n\nscp1=mt.roc_auc_score(y_test,y_pred_prob[:,1])\nscp1\n\n\n# In[22]:\n\n\n# import and instantiate a Multinomial Naive Bayes model\nfrom sklearn.naive_bayes import MultinomialNB\nest2= MultinomialNB()\n\n\n# In[23]:\n\n\nest2.fit(dtm_train,y_train)\n\n\n# In[24]:\n\n\ndtm_3=vect.transform(['Data cleansing is hard to do, hard to maintain, hard to know where to start'])\nest2.predict(dtm_3)\n\n\n# In[25]:\n\n\ndtm_3=vect.transform(['Hello, plz do this'])\nest2.predict(dtm_3)\n\n\n# In[26]:\n\n\ndtm_3=vect.transform([\"The Facebook Team wish to inform you that you are one of the Lucky winners from your Region (PAKISTAN) in this year's Online Promotion and your Facebook account has won the sum of EIGHT HUNDRED THOUSAND GREAT BRITISH POUNDS (£800,000 GBP)\"])\nest2.predict(dtm_3)\n\n\n# In[27]:\n\n\ny_pred2=est2.predict(dtm_test)\ny_pred_prob2=est2.predict_proba(dtm_test)\ny_pred_prob2.shape\n\n\n# In[28]:\n\n\nfrom sklearn import metrics as mt\nsc2=mt.accuracy_score(y_test,y_pred2)\nsc2\n\n\n# In[29]:\n\n\nscp2=mt.roc_auc_score(y_test,y_pred_prob2[:,1])\nscp2\n\n\n# In[30]:\n\n\n# instantiate random forest model with default paramter\nfrom sklearn import ensemble as eb\nest3=eb.RandomForestClassifier(random_state=5)\n\n\n# In[31]:\n\n\nest3.fit(dtm_train,y_train)\n\n\n# In[32]:\n\n\ndtm_3=vect.transform(['Data cleansing is hard to do, hard to maintain, hard to know where to start'])\nest3.predict(dtm_3)\n\n\n# In[33]:\n\n\ndtm_3=vect.transform(['Hello, plz do this'])\nest3.predict(dtm_3)\n\n\n# In[34]:\n\n\ndtm_3=vect.transform([\"The Facebook Team wish to inform you that you are one of the Lucky winners from your Region (PAKISTAN) in this year's Online Promotion and your Facebook account has won the sum of EIGHT HUNDRED THOUSAND GREAT BRITISH POUNDS (£800,000 GBP)\"])\nest3.predict(dtm_3)\n\n\n# In[35]:\n\n\ny_pred3=est3.predict(dtm_test)\ny_pred_prob3=est3.predict_proba(dtm_test)\ny_pred_prob3.shape\n\n\n# In[36]:\n\n\nfrom sklearn import metrics as mt\nsc3=mt.accuracy_score(y_test,y_pred3)\nsc3\n\n\n# In[37]:\n\n\nscp3=mt.roc_auc_score(y_test,y_pred_prob3[:,1])\nscp3\n\n\n# In[38]:\n\n\n# from calculated scores, we select random forest model\n# To tune parameters of random forest model, make parameter grid(pg) \nn=[5,10,20,40]\nc=['gini', 'entropy']\nm=[5,20,25,30]\nm2=[.2,.1,.05]\n\n\n# In[39]:\n\n\npg=dict(n_estimators=n, criterion=c, max_features=m)\n\n\n# In[40]:\n\n\n#make a score grid using random search cross validation\n#grid search cross validation does exhaustive search and takes a lot of computational resource\n#first score grid is made using 10 iterations, thus, sg10\nsg10=ms.RandomizedSearchCV(est3,pg,cv=10,scoring='accuracy', n_iter=10,random_state=5)\n\n\n# In[41]:\n\n\ndtm=vect.transform(X)\nsg10.fit(dtm,y)\n\n\n# In[42]:\n\n\nsg10.best_score_\n\n\n# In[43]:\n\n\nsg20=ms.RandomizedSearchCV(est3,pg,cv=10,scoring='accuracy', n_iter=20,random_state=5)\n\n\n# In[44]:\n\n\nsg20.fit(dtm,y)\n\n\n# In[45]:\n\n\nsg20.best_score_\n\n\n# In[46]:\n\n\nsg25=ms.RandomizedSearchCV(est3,pg,cv=10,scoring='accuracy', n_iter=25,random_state=5)\n\n\n# In[47]:\n\n\nsg25.fit(dtm,y)\n\n\n# In[48]:\n\n\nsg25.best_score_\n\n\n# In[49]:\n\n\nsg20.best_params_\n\n\n# In[50]:\n\n\nsg25.best_params_\n\n\n# In[51]:\n\n\n# creating a model with tuned parameters\nest4=eb.RandomForestClassifier(criterion= 'entropy', max_features= 5, n_estimators= 40, random_state=5)\n\n\n# In[52]:\n\n\nest4.fit(dtm_train,y_train)\n\n\n# In[53]:\n\n\ny_pred4=est4.predict(dtm_test)\ny_pred_prob4=est4.predict_proba(dtm_test)\n\n\n# In[54]:\n\n\nsc4=mt.accuracy_score(y_test,y_pred4)\nsc4\n\n\n# In[55]:\n\n\nscp4=mt.roc_auc_score(y_test,y_pred_prob4[:,1])\nscp4\n\n\n# In[58]:\n\n\n# calculating sensitivities of all models\nprint(mt.recall_score(y_test,y_pred))\nprint(mt.recall_score(y_test,y_pred2))\nprint(mt.recall_score(y_test,y_pred3))\nprint(mt.recall_score(y_test,y_pred4))\n\n\n# In[59]:\n\n\n# for spam detection type 1 error more important than type 2\n# therefore specificity more important than sensitivity\n#create confusion matrix for all models\ncm=mt.confusion_matrix(y_test,y_pred)\n\ncm2=mt.confusion_matrix(y_test,y_pred2)\n\ncm3=mt.confusion_matrix(y_test,y_pred3)\n\ncm4=mt.confusion_matrix(y_test,y_pred4)\n\n\n# In[61]:\n\n\n# calculating specificity for all models using confusion matrix of the respective model\nsp1=cm[0,0]/(cm[0,0]+cm[0,1])\nprint(sp1)\nsp2=cm2[0,0]/(cm2[0,0]+cm2[0,1])\nprint(sp2)\nsp3=cm3[0,0]/(cm3[0,0]+cm3[0,1])\nprint(sp3)\nsp4=cm4[0,0]/(cm4[0,0]+cm4[0,1])\nprint(sp4)\n\n\n\n# In[65]:\n\n\nse4=mt.recall_score(y_test,y_pred4)\n\n\n# In[66]:\n\n\n# choosing estimator 4 with following optimal metrics\nprint('Accuracy score: ',sc4)\nprint('ROC AUC score: ',scp4)\nprint('Sensitivity: ',se4)\nprint('Specificity: ',sp4)\n\n",
"import pandas as pd\ndf = pd.read_csv(\n 'https://cdn.rawgit.com/aviram2308/vp-hamspam/af94d24e/spam%20(1).csv',\n encoding='ISO-8859-1')\ndf.head()\ndf = df.iloc[:, 0:2]\ndf.head()\ndf = df.rename(columns={'v1': 'Label', 'v2': 'Message'})\ndf.head()\ndf['Label_num'] = df.Label.map({'ham': 0, 'spam': 1})\ndf.head(10)\nX = df.Message\ny = df.Label_num\nfrom sklearn import model_selection as ms\nX_train, X_test, y_train, y_test = ms.train_test_split(X, y, random_state=1)\nprint(X_train.shape)\nprint(X_test.shape)\nprint(y_train.shape)\nprint(y_test.shape)\nfrom sklearn.feature_extraction.text import CountVectorizer\nvect = CountVectorizer(min_df=0.04, max_df=0.96)\nvect.fit(X_train)\ndtm_train = vect.transform(X_train)\ndtm_test = vect.transform(X_test)\ntype(dtm_train)\nfrom sklearn import linear_model as lm\nest1 = lm.LogisticRegression()\nest1.fit(dtm_train, y_train)\ndtm_3 = vect.transform([\n 'Data cleansing is hard to do, hard to maintain, hard to know where to start'\n ])\nest1.predict(dtm_3)\ndtm_3 = vect.transform(['Hello, plz do this'])\nest1.predict(dtm_3)\ndtm_3 = vect.transform([\n \"The Facebook Team wish to inform you that you are one of the Lucky winners from your Region (PAKISTAN) in this year's Online Promotion and your Facebook account has won the sum of EIGHT HUNDRED THOUSAND GREAT BRITISH POUNDS (£800,000 GBP)\"\n ])\nest1.predict(dtm_3)\ny_pred = est1.predict(dtm_test)\ny_pred_prob = est1.predict_proba(dtm_test)\ny_pred_prob.shape\nfrom sklearn import metrics as mt\nsc1 = mt.accuracy_score(y_test, y_pred)\nsc1\nscp1 = mt.roc_auc_score(y_test, y_pred_prob[:, 1])\nscp1\nfrom sklearn.naive_bayes import MultinomialNB\nest2 = MultinomialNB()\nest2.fit(dtm_train, y_train)\ndtm_3 = vect.transform([\n 'Data cleansing is hard to do, hard to maintain, hard to know where to start'\n ])\nest2.predict(dtm_3)\ndtm_3 = vect.transform(['Hello, plz do this'])\nest2.predict(dtm_3)\ndtm_3 = vect.transform([\n \"The Facebook Team wish to inform you that you are one of the Lucky winners from your Region (PAKISTAN) in this year's Online Promotion and your Facebook account has won the sum of EIGHT HUNDRED THOUSAND GREAT BRITISH POUNDS (£800,000 GBP)\"\n ])\nest2.predict(dtm_3)\ny_pred2 = est2.predict(dtm_test)\ny_pred_prob2 = est2.predict_proba(dtm_test)\ny_pred_prob2.shape\nfrom sklearn import metrics as mt\nsc2 = mt.accuracy_score(y_test, y_pred2)\nsc2\nscp2 = mt.roc_auc_score(y_test, y_pred_prob2[:, 1])\nscp2\nfrom sklearn import ensemble as eb\nest3 = eb.RandomForestClassifier(random_state=5)\nest3.fit(dtm_train, y_train)\ndtm_3 = vect.transform([\n 'Data cleansing is hard to do, hard to maintain, hard to know where to start'\n ])\nest3.predict(dtm_3)\ndtm_3 = vect.transform(['Hello, plz do this'])\nest3.predict(dtm_3)\ndtm_3 = vect.transform([\n \"The Facebook Team wish to inform you that you are one of the Lucky winners from your Region (PAKISTAN) in this year's Online Promotion and your Facebook account has won the sum of EIGHT HUNDRED THOUSAND GREAT BRITISH POUNDS (£800,000 GBP)\"\n ])\nest3.predict(dtm_3)\ny_pred3 = est3.predict(dtm_test)\ny_pred_prob3 = est3.predict_proba(dtm_test)\ny_pred_prob3.shape\nfrom sklearn import metrics as mt\nsc3 = mt.accuracy_score(y_test, y_pred3)\nsc3\nscp3 = mt.roc_auc_score(y_test, y_pred_prob3[:, 1])\nscp3\nn = [5, 10, 20, 40]\nc = ['gini', 'entropy']\nm = [5, 20, 25, 30]\nm2 = [0.2, 0.1, 0.05]\npg = dict(n_estimators=n, criterion=c, max_features=m)\nsg10 = ms.RandomizedSearchCV(est3, pg, cv=10, scoring='accuracy', n_iter=10,\n random_state=5)\ndtm = vect.transform(X)\nsg10.fit(dtm, y)\nsg10.best_score_\nsg20 = ms.RandomizedSearchCV(est3, pg, cv=10, scoring='accuracy', n_iter=20,\n random_state=5)\nsg20.fit(dtm, y)\nsg20.best_score_\nsg25 = ms.RandomizedSearchCV(est3, pg, cv=10, scoring='accuracy', n_iter=25,\n random_state=5)\nsg25.fit(dtm, y)\nsg25.best_score_\nsg20.best_params_\nsg25.best_params_\nest4 = eb.RandomForestClassifier(criterion='entropy', max_features=5,\n n_estimators=40, random_state=5)\nest4.fit(dtm_train, y_train)\ny_pred4 = est4.predict(dtm_test)\ny_pred_prob4 = est4.predict_proba(dtm_test)\nsc4 = mt.accuracy_score(y_test, y_pred4)\nsc4\nscp4 = mt.roc_auc_score(y_test, y_pred_prob4[:, 1])\nscp4\nprint(mt.recall_score(y_test, y_pred))\nprint(mt.recall_score(y_test, y_pred2))\nprint(mt.recall_score(y_test, y_pred3))\nprint(mt.recall_score(y_test, y_pred4))\ncm = mt.confusion_matrix(y_test, y_pred)\ncm2 = mt.confusion_matrix(y_test, y_pred2)\ncm3 = mt.confusion_matrix(y_test, y_pred3)\ncm4 = mt.confusion_matrix(y_test, y_pred4)\nsp1 = cm[0, 0] / (cm[0, 0] + cm[0, 1])\nprint(sp1)\nsp2 = cm2[0, 0] / (cm2[0, 0] + cm2[0, 1])\nprint(sp2)\nsp3 = cm3[0, 0] / (cm3[0, 0] + cm3[0, 1])\nprint(sp3)\nsp4 = cm4[0, 0] / (cm4[0, 0] + cm4[0, 1])\nprint(sp4)\nse4 = mt.recall_score(y_test, y_pred4)\nprint('Accuracy score: ', sc4)\nprint('ROC AUC score: ', scp4)\nprint('Sensitivity: ', se4)\nprint('Specificity: ', sp4)\n",
"<import token>\ndf = pd.read_csv(\n 'https://cdn.rawgit.com/aviram2308/vp-hamspam/af94d24e/spam%20(1).csv',\n encoding='ISO-8859-1')\ndf.head()\ndf = df.iloc[:, 0:2]\ndf.head()\ndf = df.rename(columns={'v1': 'Label', 'v2': 'Message'})\ndf.head()\ndf['Label_num'] = df.Label.map({'ham': 0, 'spam': 1})\ndf.head(10)\nX = df.Message\ny = df.Label_num\n<import token>\nX_train, X_test, y_train, y_test = ms.train_test_split(X, y, random_state=1)\nprint(X_train.shape)\nprint(X_test.shape)\nprint(y_train.shape)\nprint(y_test.shape)\n<import token>\nvect = CountVectorizer(min_df=0.04, max_df=0.96)\nvect.fit(X_train)\ndtm_train = vect.transform(X_train)\ndtm_test = vect.transform(X_test)\ntype(dtm_train)\n<import token>\nest1 = lm.LogisticRegression()\nest1.fit(dtm_train, y_train)\ndtm_3 = vect.transform([\n 'Data cleansing is hard to do, hard to maintain, hard to know where to start'\n ])\nest1.predict(dtm_3)\ndtm_3 = vect.transform(['Hello, plz do this'])\nest1.predict(dtm_3)\ndtm_3 = vect.transform([\n \"The Facebook Team wish to inform you that you are one of the Lucky winners from your Region (PAKISTAN) in this year's Online Promotion and your Facebook account has won the sum of EIGHT HUNDRED THOUSAND GREAT BRITISH POUNDS (£800,000 GBP)\"\n ])\nest1.predict(dtm_3)\ny_pred = est1.predict(dtm_test)\ny_pred_prob = est1.predict_proba(dtm_test)\ny_pred_prob.shape\n<import token>\nsc1 = mt.accuracy_score(y_test, y_pred)\nsc1\nscp1 = mt.roc_auc_score(y_test, y_pred_prob[:, 1])\nscp1\n<import token>\nest2 = MultinomialNB()\nest2.fit(dtm_train, y_train)\ndtm_3 = vect.transform([\n 'Data cleansing is hard to do, hard to maintain, hard to know where to start'\n ])\nest2.predict(dtm_3)\ndtm_3 = vect.transform(['Hello, plz do this'])\nest2.predict(dtm_3)\ndtm_3 = vect.transform([\n \"The Facebook Team wish to inform you that you are one of the Lucky winners from your Region (PAKISTAN) in this year's Online Promotion and your Facebook account has won the sum of EIGHT HUNDRED THOUSAND GREAT BRITISH POUNDS (£800,000 GBP)\"\n ])\nest2.predict(dtm_3)\ny_pred2 = est2.predict(dtm_test)\ny_pred_prob2 = est2.predict_proba(dtm_test)\ny_pred_prob2.shape\n<import token>\nsc2 = mt.accuracy_score(y_test, y_pred2)\nsc2\nscp2 = mt.roc_auc_score(y_test, y_pred_prob2[:, 1])\nscp2\n<import token>\nest3 = eb.RandomForestClassifier(random_state=5)\nest3.fit(dtm_train, y_train)\ndtm_3 = vect.transform([\n 'Data cleansing is hard to do, hard to maintain, hard to know where to start'\n ])\nest3.predict(dtm_3)\ndtm_3 = vect.transform(['Hello, plz do this'])\nest3.predict(dtm_3)\ndtm_3 = vect.transform([\n \"The Facebook Team wish to inform you that you are one of the Lucky winners from your Region (PAKISTAN) in this year's Online Promotion and your Facebook account has won the sum of EIGHT HUNDRED THOUSAND GREAT BRITISH POUNDS (£800,000 GBP)\"\n ])\nest3.predict(dtm_3)\ny_pred3 = est3.predict(dtm_test)\ny_pred_prob3 = est3.predict_proba(dtm_test)\ny_pred_prob3.shape\n<import token>\nsc3 = mt.accuracy_score(y_test, y_pred3)\nsc3\nscp3 = mt.roc_auc_score(y_test, y_pred_prob3[:, 1])\nscp3\nn = [5, 10, 20, 40]\nc = ['gini', 'entropy']\nm = [5, 20, 25, 30]\nm2 = [0.2, 0.1, 0.05]\npg = dict(n_estimators=n, criterion=c, max_features=m)\nsg10 = ms.RandomizedSearchCV(est3, pg, cv=10, scoring='accuracy', n_iter=10,\n random_state=5)\ndtm = vect.transform(X)\nsg10.fit(dtm, y)\nsg10.best_score_\nsg20 = ms.RandomizedSearchCV(est3, pg, cv=10, scoring='accuracy', n_iter=20,\n random_state=5)\nsg20.fit(dtm, y)\nsg20.best_score_\nsg25 = ms.RandomizedSearchCV(est3, pg, cv=10, scoring='accuracy', n_iter=25,\n random_state=5)\nsg25.fit(dtm, y)\nsg25.best_score_\nsg20.best_params_\nsg25.best_params_\nest4 = eb.RandomForestClassifier(criterion='entropy', max_features=5,\n n_estimators=40, random_state=5)\nest4.fit(dtm_train, y_train)\ny_pred4 = est4.predict(dtm_test)\ny_pred_prob4 = est4.predict_proba(dtm_test)\nsc4 = mt.accuracy_score(y_test, y_pred4)\nsc4\nscp4 = mt.roc_auc_score(y_test, y_pred_prob4[:, 1])\nscp4\nprint(mt.recall_score(y_test, y_pred))\nprint(mt.recall_score(y_test, y_pred2))\nprint(mt.recall_score(y_test, y_pred3))\nprint(mt.recall_score(y_test, y_pred4))\ncm = mt.confusion_matrix(y_test, y_pred)\ncm2 = mt.confusion_matrix(y_test, y_pred2)\ncm3 = mt.confusion_matrix(y_test, y_pred3)\ncm4 = mt.confusion_matrix(y_test, y_pred4)\nsp1 = cm[0, 0] / (cm[0, 0] + cm[0, 1])\nprint(sp1)\nsp2 = cm2[0, 0] / (cm2[0, 0] + cm2[0, 1])\nprint(sp2)\nsp3 = cm3[0, 0] / (cm3[0, 0] + cm3[0, 1])\nprint(sp3)\nsp4 = cm4[0, 0] / (cm4[0, 0] + cm4[0, 1])\nprint(sp4)\nse4 = mt.recall_score(y_test, y_pred4)\nprint('Accuracy score: ', sc4)\nprint('ROC AUC score: ', scp4)\nprint('Sensitivity: ', se4)\nprint('Specificity: ', sp4)\n",
"<import token>\n<assignment token>\ndf.head()\n<assignment token>\ndf.head()\n<assignment token>\ndf.head()\n<assignment token>\ndf.head(10)\n<assignment token>\n<import token>\n<assignment token>\nprint(X_train.shape)\nprint(X_test.shape)\nprint(y_train.shape)\nprint(y_test.shape)\n<import token>\n<assignment token>\nvect.fit(X_train)\n<assignment token>\ntype(dtm_train)\n<import token>\n<assignment token>\nest1.fit(dtm_train, y_train)\n<assignment token>\nest1.predict(dtm_3)\n<assignment token>\nest1.predict(dtm_3)\n<assignment token>\nest1.predict(dtm_3)\n<assignment token>\ny_pred_prob.shape\n<import token>\n<assignment token>\nsc1\n<assignment token>\nscp1\n<import token>\n<assignment token>\nest2.fit(dtm_train, y_train)\n<assignment token>\nest2.predict(dtm_3)\n<assignment token>\nest2.predict(dtm_3)\n<assignment token>\nest2.predict(dtm_3)\n<assignment token>\ny_pred_prob2.shape\n<import token>\n<assignment token>\nsc2\n<assignment token>\nscp2\n<import token>\n<assignment token>\nest3.fit(dtm_train, y_train)\n<assignment token>\nest3.predict(dtm_3)\n<assignment token>\nest3.predict(dtm_3)\n<assignment token>\nest3.predict(dtm_3)\n<assignment token>\ny_pred_prob3.shape\n<import token>\n<assignment token>\nsc3\n<assignment token>\nscp3\n<assignment token>\nsg10.fit(dtm, y)\nsg10.best_score_\n<assignment token>\nsg20.fit(dtm, y)\nsg20.best_score_\n<assignment token>\nsg25.fit(dtm, y)\nsg25.best_score_\nsg20.best_params_\nsg25.best_params_\n<assignment token>\nest4.fit(dtm_train, y_train)\n<assignment token>\nsc4\n<assignment token>\nscp4\nprint(mt.recall_score(y_test, y_pred))\nprint(mt.recall_score(y_test, y_pred2))\nprint(mt.recall_score(y_test, y_pred3))\nprint(mt.recall_score(y_test, y_pred4))\n<assignment token>\nprint(sp1)\n<assignment token>\nprint(sp2)\n<assignment token>\nprint(sp3)\n<assignment token>\nprint(sp4)\n<assignment token>\nprint('Accuracy score: ', sc4)\nprint('ROC AUC score: ', scp4)\nprint('Sensitivity: ', se4)\nprint('Specificity: ', sp4)\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,626 |
9e5e38a897053bdfd7abf359aa1eb67b5f81996a
|
from typing import Any
_FIELDS = "__container_fields__"
_ID_FIELD_NAME = "__container_id_field_name__"
def fields(class_or_instance):
"""Return a tuple describing the fields of this dataclass.
Accepts a dataclass or an instance of one. Tuple elements are of
type Field.
"""
# Might it be worth caching this, per class?
try:
fields_dict = getattr(class_or_instance, _FIELDS)
except AttributeError:
raise TypeError("must be called with a dataclass type or instance")
return fields_dict
def id_field(class_or_instance):
try:
field_name = getattr(class_or_instance, _ID_FIELD_NAME)
except AttributeError:
raise TypeError("must be called with a dataclass type or instance")
return fields(class_or_instance)[field_name]
def has_id_field(class_or_instance: Any) -> bool:
"""Check if class/instance has an identity attribute.
Args:
class_or_instance (Any): Domain Element to check.
Returns:
bool: True if the element has an identity field.
"""
return hasattr(class_or_instance, _ID_FIELD_NAME)
def has_fields(class_or_instance):
"""Check if Protean element encloses fields"""
return hasattr(class_or_instance, _FIELDS)
def attributes(class_or_instance):
attributes_dict = {}
for _, field_obj in fields(class_or_instance).items():
# FIXME Make these checks elegant
# Because of circular import issues, `Reference` class cannot be imported
# in this file. So we are resorting to check for method presence in
# field objects. Not the most elegant way, but will have to suffice
# until class heirarchies are restructured.
if hasattr(field_obj, "get_shadow_fields"):
shadow_fields = field_obj.get_shadow_fields()
for _, shadow_field in shadow_fields:
attributes_dict[shadow_field.attribute_name] = shadow_field
elif hasattr(field_obj, "relation"):
attributes_dict[field_obj.get_attribute_name()] = field_obj.relation
elif not hasattr(field_obj, "to_cls"):
attributes_dict[field_obj.get_attribute_name()] = field_obj
else: # This field is an association. Ignore recording it as an attribute
pass
return attributes_dict
def unique_fields(class_or_instance):
"""Return the unique fields for this class or instance"""
return {
field_name: field_obj
for field_name, field_obj in attributes(class_or_instance).items()
if field_obj.unique
}
def declared_fields(class_or_instance):
"""Return a tuple describing the declared fields of this dataclass.
Accepts a dataclass or an instance of one. Tuple elements are of
type Field.
`_version` is a auto-controlled, internal field, so is not returned
among declared fields.
"""
# Might it be worth caching this, per class?
try:
fields_dict = dict(getattr(class_or_instance, _FIELDS))
fields_dict.pop("_version", None)
except AttributeError:
raise TypeError("must be called with a dataclass type or instance")
return fields_dict
|
[
"from typing import Any\n\n_FIELDS = \"__container_fields__\"\n_ID_FIELD_NAME = \"__container_id_field_name__\"\n\n\ndef fields(class_or_instance):\n \"\"\"Return a tuple describing the fields of this dataclass.\n\n Accepts a dataclass or an instance of one. Tuple elements are of\n type Field.\n \"\"\"\n\n # Might it be worth caching this, per class?\n try:\n fields_dict = getattr(class_or_instance, _FIELDS)\n except AttributeError:\n raise TypeError(\"must be called with a dataclass type or instance\")\n\n return fields_dict\n\n\ndef id_field(class_or_instance):\n try:\n field_name = getattr(class_or_instance, _ID_FIELD_NAME)\n except AttributeError:\n raise TypeError(\"must be called with a dataclass type or instance\")\n\n return fields(class_or_instance)[field_name]\n\n\ndef has_id_field(class_or_instance: Any) -> bool:\n \"\"\"Check if class/instance has an identity attribute.\n\n Args:\n class_or_instance (Any): Domain Element to check.\n\n Returns:\n bool: True if the element has an identity field.\n \"\"\"\n return hasattr(class_or_instance, _ID_FIELD_NAME)\n\n\ndef has_fields(class_or_instance):\n \"\"\"Check if Protean element encloses fields\"\"\"\n return hasattr(class_or_instance, _FIELDS)\n\n\ndef attributes(class_or_instance):\n attributes_dict = {}\n\n for _, field_obj in fields(class_or_instance).items():\n # FIXME Make these checks elegant\n # Because of circular import issues, `Reference` class cannot be imported\n # in this file. So we are resorting to check for method presence in\n # field objects. Not the most elegant way, but will have to suffice\n # until class heirarchies are restructured.\n if hasattr(field_obj, \"get_shadow_fields\"):\n shadow_fields = field_obj.get_shadow_fields()\n for _, shadow_field in shadow_fields:\n attributes_dict[shadow_field.attribute_name] = shadow_field\n elif hasattr(field_obj, \"relation\"):\n attributes_dict[field_obj.get_attribute_name()] = field_obj.relation\n elif not hasattr(field_obj, \"to_cls\"):\n attributes_dict[field_obj.get_attribute_name()] = field_obj\n else: # This field is an association. Ignore recording it as an attribute\n pass\n\n return attributes_dict\n\n\ndef unique_fields(class_or_instance):\n \"\"\"Return the unique fields for this class or instance\"\"\"\n return {\n field_name: field_obj\n for field_name, field_obj in attributes(class_or_instance).items()\n if field_obj.unique\n }\n\n\ndef declared_fields(class_or_instance):\n \"\"\"Return a tuple describing the declared fields of this dataclass.\n\n Accepts a dataclass or an instance of one. Tuple elements are of\n type Field.\n\n `_version` is a auto-controlled, internal field, so is not returned\n among declared fields.\n \"\"\"\n\n # Might it be worth caching this, per class?\n try:\n fields_dict = dict(getattr(class_or_instance, _FIELDS))\n fields_dict.pop(\"_version\", None)\n except AttributeError:\n raise TypeError(\"must be called with a dataclass type or instance\")\n\n return fields_dict\n",
"from typing import Any\n_FIELDS = '__container_fields__'\n_ID_FIELD_NAME = '__container_id_field_name__'\n\n\ndef fields(class_or_instance):\n \"\"\"Return a tuple describing the fields of this dataclass.\n\n Accepts a dataclass or an instance of one. Tuple elements are of\n type Field.\n \"\"\"\n try:\n fields_dict = getattr(class_or_instance, _FIELDS)\n except AttributeError:\n raise TypeError('must be called with a dataclass type or instance')\n return fields_dict\n\n\ndef id_field(class_or_instance):\n try:\n field_name = getattr(class_or_instance, _ID_FIELD_NAME)\n except AttributeError:\n raise TypeError('must be called with a dataclass type or instance')\n return fields(class_or_instance)[field_name]\n\n\ndef has_id_field(class_or_instance: Any) ->bool:\n \"\"\"Check if class/instance has an identity attribute.\n\n Args:\n class_or_instance (Any): Domain Element to check.\n\n Returns:\n bool: True if the element has an identity field.\n \"\"\"\n return hasattr(class_or_instance, _ID_FIELD_NAME)\n\n\ndef has_fields(class_or_instance):\n \"\"\"Check if Protean element encloses fields\"\"\"\n return hasattr(class_or_instance, _FIELDS)\n\n\ndef attributes(class_or_instance):\n attributes_dict = {}\n for _, field_obj in fields(class_or_instance).items():\n if hasattr(field_obj, 'get_shadow_fields'):\n shadow_fields = field_obj.get_shadow_fields()\n for _, shadow_field in shadow_fields:\n attributes_dict[shadow_field.attribute_name] = shadow_field\n elif hasattr(field_obj, 'relation'):\n attributes_dict[field_obj.get_attribute_name()\n ] = field_obj.relation\n elif not hasattr(field_obj, 'to_cls'):\n attributes_dict[field_obj.get_attribute_name()] = field_obj\n else:\n pass\n return attributes_dict\n\n\ndef unique_fields(class_or_instance):\n \"\"\"Return the unique fields for this class or instance\"\"\"\n return {field_name: field_obj for field_name, field_obj in attributes(\n class_or_instance).items() if field_obj.unique}\n\n\ndef declared_fields(class_or_instance):\n \"\"\"Return a tuple describing the declared fields of this dataclass.\n\n Accepts a dataclass or an instance of one. Tuple elements are of\n type Field.\n\n `_version` is a auto-controlled, internal field, so is not returned\n among declared fields.\n \"\"\"\n try:\n fields_dict = dict(getattr(class_or_instance, _FIELDS))\n fields_dict.pop('_version', None)\n except AttributeError:\n raise TypeError('must be called with a dataclass type or instance')\n return fields_dict\n",
"<import token>\n_FIELDS = '__container_fields__'\n_ID_FIELD_NAME = '__container_id_field_name__'\n\n\ndef fields(class_or_instance):\n \"\"\"Return a tuple describing the fields of this dataclass.\n\n Accepts a dataclass or an instance of one. Tuple elements are of\n type Field.\n \"\"\"\n try:\n fields_dict = getattr(class_or_instance, _FIELDS)\n except AttributeError:\n raise TypeError('must be called with a dataclass type or instance')\n return fields_dict\n\n\ndef id_field(class_or_instance):\n try:\n field_name = getattr(class_or_instance, _ID_FIELD_NAME)\n except AttributeError:\n raise TypeError('must be called with a dataclass type or instance')\n return fields(class_or_instance)[field_name]\n\n\ndef has_id_field(class_or_instance: Any) ->bool:\n \"\"\"Check if class/instance has an identity attribute.\n\n Args:\n class_or_instance (Any): Domain Element to check.\n\n Returns:\n bool: True if the element has an identity field.\n \"\"\"\n return hasattr(class_or_instance, _ID_FIELD_NAME)\n\n\ndef has_fields(class_or_instance):\n \"\"\"Check if Protean element encloses fields\"\"\"\n return hasattr(class_or_instance, _FIELDS)\n\n\ndef attributes(class_or_instance):\n attributes_dict = {}\n for _, field_obj in fields(class_or_instance).items():\n if hasattr(field_obj, 'get_shadow_fields'):\n shadow_fields = field_obj.get_shadow_fields()\n for _, shadow_field in shadow_fields:\n attributes_dict[shadow_field.attribute_name] = shadow_field\n elif hasattr(field_obj, 'relation'):\n attributes_dict[field_obj.get_attribute_name()\n ] = field_obj.relation\n elif not hasattr(field_obj, 'to_cls'):\n attributes_dict[field_obj.get_attribute_name()] = field_obj\n else:\n pass\n return attributes_dict\n\n\ndef unique_fields(class_or_instance):\n \"\"\"Return the unique fields for this class or instance\"\"\"\n return {field_name: field_obj for field_name, field_obj in attributes(\n class_or_instance).items() if field_obj.unique}\n\n\ndef declared_fields(class_or_instance):\n \"\"\"Return a tuple describing the declared fields of this dataclass.\n\n Accepts a dataclass or an instance of one. Tuple elements are of\n type Field.\n\n `_version` is a auto-controlled, internal field, so is not returned\n among declared fields.\n \"\"\"\n try:\n fields_dict = dict(getattr(class_or_instance, _FIELDS))\n fields_dict.pop('_version', None)\n except AttributeError:\n raise TypeError('must be called with a dataclass type or instance')\n return fields_dict\n",
"<import token>\n<assignment token>\n\n\ndef fields(class_or_instance):\n \"\"\"Return a tuple describing the fields of this dataclass.\n\n Accepts a dataclass or an instance of one. Tuple elements are of\n type Field.\n \"\"\"\n try:\n fields_dict = getattr(class_or_instance, _FIELDS)\n except AttributeError:\n raise TypeError('must be called with a dataclass type or instance')\n return fields_dict\n\n\ndef id_field(class_or_instance):\n try:\n field_name = getattr(class_or_instance, _ID_FIELD_NAME)\n except AttributeError:\n raise TypeError('must be called with a dataclass type or instance')\n return fields(class_or_instance)[field_name]\n\n\ndef has_id_field(class_or_instance: Any) ->bool:\n \"\"\"Check if class/instance has an identity attribute.\n\n Args:\n class_or_instance (Any): Domain Element to check.\n\n Returns:\n bool: True if the element has an identity field.\n \"\"\"\n return hasattr(class_or_instance, _ID_FIELD_NAME)\n\n\ndef has_fields(class_or_instance):\n \"\"\"Check if Protean element encloses fields\"\"\"\n return hasattr(class_or_instance, _FIELDS)\n\n\ndef attributes(class_or_instance):\n attributes_dict = {}\n for _, field_obj in fields(class_or_instance).items():\n if hasattr(field_obj, 'get_shadow_fields'):\n shadow_fields = field_obj.get_shadow_fields()\n for _, shadow_field in shadow_fields:\n attributes_dict[shadow_field.attribute_name] = shadow_field\n elif hasattr(field_obj, 'relation'):\n attributes_dict[field_obj.get_attribute_name()\n ] = field_obj.relation\n elif not hasattr(field_obj, 'to_cls'):\n attributes_dict[field_obj.get_attribute_name()] = field_obj\n else:\n pass\n return attributes_dict\n\n\ndef unique_fields(class_or_instance):\n \"\"\"Return the unique fields for this class or instance\"\"\"\n return {field_name: field_obj for field_name, field_obj in attributes(\n class_or_instance).items() if field_obj.unique}\n\n\ndef declared_fields(class_or_instance):\n \"\"\"Return a tuple describing the declared fields of this dataclass.\n\n Accepts a dataclass or an instance of one. Tuple elements are of\n type Field.\n\n `_version` is a auto-controlled, internal field, so is not returned\n among declared fields.\n \"\"\"\n try:\n fields_dict = dict(getattr(class_or_instance, _FIELDS))\n fields_dict.pop('_version', None)\n except AttributeError:\n raise TypeError('must be called with a dataclass type or instance')\n return fields_dict\n",
"<import token>\n<assignment token>\n\n\ndef fields(class_or_instance):\n \"\"\"Return a tuple describing the fields of this dataclass.\n\n Accepts a dataclass or an instance of one. Tuple elements are of\n type Field.\n \"\"\"\n try:\n fields_dict = getattr(class_or_instance, _FIELDS)\n except AttributeError:\n raise TypeError('must be called with a dataclass type or instance')\n return fields_dict\n\n\ndef id_field(class_or_instance):\n try:\n field_name = getattr(class_or_instance, _ID_FIELD_NAME)\n except AttributeError:\n raise TypeError('must be called with a dataclass type or instance')\n return fields(class_or_instance)[field_name]\n\n\ndef has_id_field(class_or_instance: Any) ->bool:\n \"\"\"Check if class/instance has an identity attribute.\n\n Args:\n class_or_instance (Any): Domain Element to check.\n\n Returns:\n bool: True if the element has an identity field.\n \"\"\"\n return hasattr(class_or_instance, _ID_FIELD_NAME)\n\n\n<function token>\n\n\ndef attributes(class_or_instance):\n attributes_dict = {}\n for _, field_obj in fields(class_or_instance).items():\n if hasattr(field_obj, 'get_shadow_fields'):\n shadow_fields = field_obj.get_shadow_fields()\n for _, shadow_field in shadow_fields:\n attributes_dict[shadow_field.attribute_name] = shadow_field\n elif hasattr(field_obj, 'relation'):\n attributes_dict[field_obj.get_attribute_name()\n ] = field_obj.relation\n elif not hasattr(field_obj, 'to_cls'):\n attributes_dict[field_obj.get_attribute_name()] = field_obj\n else:\n pass\n return attributes_dict\n\n\ndef unique_fields(class_or_instance):\n \"\"\"Return the unique fields for this class or instance\"\"\"\n return {field_name: field_obj for field_name, field_obj in attributes(\n class_or_instance).items() if field_obj.unique}\n\n\ndef declared_fields(class_or_instance):\n \"\"\"Return a tuple describing the declared fields of this dataclass.\n\n Accepts a dataclass or an instance of one. Tuple elements are of\n type Field.\n\n `_version` is a auto-controlled, internal field, so is not returned\n among declared fields.\n \"\"\"\n try:\n fields_dict = dict(getattr(class_or_instance, _FIELDS))\n fields_dict.pop('_version', None)\n except AttributeError:\n raise TypeError('must be called with a dataclass type or instance')\n return fields_dict\n",
"<import token>\n<assignment token>\n\n\ndef fields(class_or_instance):\n \"\"\"Return a tuple describing the fields of this dataclass.\n\n Accepts a dataclass or an instance of one. Tuple elements are of\n type Field.\n \"\"\"\n try:\n fields_dict = getattr(class_or_instance, _FIELDS)\n except AttributeError:\n raise TypeError('must be called with a dataclass type or instance')\n return fields_dict\n\n\ndef id_field(class_or_instance):\n try:\n field_name = getattr(class_or_instance, _ID_FIELD_NAME)\n except AttributeError:\n raise TypeError('must be called with a dataclass type or instance')\n return fields(class_or_instance)[field_name]\n\n\ndef has_id_field(class_or_instance: Any) ->bool:\n \"\"\"Check if class/instance has an identity attribute.\n\n Args:\n class_or_instance (Any): Domain Element to check.\n\n Returns:\n bool: True if the element has an identity field.\n \"\"\"\n return hasattr(class_or_instance, _ID_FIELD_NAME)\n\n\n<function token>\n\n\ndef attributes(class_or_instance):\n attributes_dict = {}\n for _, field_obj in fields(class_or_instance).items():\n if hasattr(field_obj, 'get_shadow_fields'):\n shadow_fields = field_obj.get_shadow_fields()\n for _, shadow_field in shadow_fields:\n attributes_dict[shadow_field.attribute_name] = shadow_field\n elif hasattr(field_obj, 'relation'):\n attributes_dict[field_obj.get_attribute_name()\n ] = field_obj.relation\n elif not hasattr(field_obj, 'to_cls'):\n attributes_dict[field_obj.get_attribute_name()] = field_obj\n else:\n pass\n return attributes_dict\n\n\n<function token>\n\n\ndef declared_fields(class_or_instance):\n \"\"\"Return a tuple describing the declared fields of this dataclass.\n\n Accepts a dataclass or an instance of one. Tuple elements are of\n type Field.\n\n `_version` is a auto-controlled, internal field, so is not returned\n among declared fields.\n \"\"\"\n try:\n fields_dict = dict(getattr(class_or_instance, _FIELDS))\n fields_dict.pop('_version', None)\n except AttributeError:\n raise TypeError('must be called with a dataclass type or instance')\n return fields_dict\n",
"<import token>\n<assignment token>\n<function token>\n\n\ndef id_field(class_or_instance):\n try:\n field_name = getattr(class_or_instance, _ID_FIELD_NAME)\n except AttributeError:\n raise TypeError('must be called with a dataclass type or instance')\n return fields(class_or_instance)[field_name]\n\n\ndef has_id_field(class_or_instance: Any) ->bool:\n \"\"\"Check if class/instance has an identity attribute.\n\n Args:\n class_or_instance (Any): Domain Element to check.\n\n Returns:\n bool: True if the element has an identity field.\n \"\"\"\n return hasattr(class_or_instance, _ID_FIELD_NAME)\n\n\n<function token>\n\n\ndef attributes(class_or_instance):\n attributes_dict = {}\n for _, field_obj in fields(class_or_instance).items():\n if hasattr(field_obj, 'get_shadow_fields'):\n shadow_fields = field_obj.get_shadow_fields()\n for _, shadow_field in shadow_fields:\n attributes_dict[shadow_field.attribute_name] = shadow_field\n elif hasattr(field_obj, 'relation'):\n attributes_dict[field_obj.get_attribute_name()\n ] = field_obj.relation\n elif not hasattr(field_obj, 'to_cls'):\n attributes_dict[field_obj.get_attribute_name()] = field_obj\n else:\n pass\n return attributes_dict\n\n\n<function token>\n\n\ndef declared_fields(class_or_instance):\n \"\"\"Return a tuple describing the declared fields of this dataclass.\n\n Accepts a dataclass or an instance of one. Tuple elements are of\n type Field.\n\n `_version` is a auto-controlled, internal field, so is not returned\n among declared fields.\n \"\"\"\n try:\n fields_dict = dict(getattr(class_or_instance, _FIELDS))\n fields_dict.pop('_version', None)\n except AttributeError:\n raise TypeError('must be called with a dataclass type or instance')\n return fields_dict\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef has_id_field(class_or_instance: Any) ->bool:\n \"\"\"Check if class/instance has an identity attribute.\n\n Args:\n class_or_instance (Any): Domain Element to check.\n\n Returns:\n bool: True if the element has an identity field.\n \"\"\"\n return hasattr(class_or_instance, _ID_FIELD_NAME)\n\n\n<function token>\n\n\ndef attributes(class_or_instance):\n attributes_dict = {}\n for _, field_obj in fields(class_or_instance).items():\n if hasattr(field_obj, 'get_shadow_fields'):\n shadow_fields = field_obj.get_shadow_fields()\n for _, shadow_field in shadow_fields:\n attributes_dict[shadow_field.attribute_name] = shadow_field\n elif hasattr(field_obj, 'relation'):\n attributes_dict[field_obj.get_attribute_name()\n ] = field_obj.relation\n elif not hasattr(field_obj, 'to_cls'):\n attributes_dict[field_obj.get_attribute_name()] = field_obj\n else:\n pass\n return attributes_dict\n\n\n<function token>\n\n\ndef declared_fields(class_or_instance):\n \"\"\"Return a tuple describing the declared fields of this dataclass.\n\n Accepts a dataclass or an instance of one. Tuple elements are of\n type Field.\n\n `_version` is a auto-controlled, internal field, so is not returned\n among declared fields.\n \"\"\"\n try:\n fields_dict = dict(getattr(class_or_instance, _FIELDS))\n fields_dict.pop('_version', None)\n except AttributeError:\n raise TypeError('must be called with a dataclass type or instance')\n return fields_dict\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef has_id_field(class_or_instance: Any) ->bool:\n \"\"\"Check if class/instance has an identity attribute.\n\n Args:\n class_or_instance (Any): Domain Element to check.\n\n Returns:\n bool: True if the element has an identity field.\n \"\"\"\n return hasattr(class_or_instance, _ID_FIELD_NAME)\n\n\n<function token>\n\n\ndef attributes(class_or_instance):\n attributes_dict = {}\n for _, field_obj in fields(class_or_instance).items():\n if hasattr(field_obj, 'get_shadow_fields'):\n shadow_fields = field_obj.get_shadow_fields()\n for _, shadow_field in shadow_fields:\n attributes_dict[shadow_field.attribute_name] = shadow_field\n elif hasattr(field_obj, 'relation'):\n attributes_dict[field_obj.get_attribute_name()\n ] = field_obj.relation\n elif not hasattr(field_obj, 'to_cls'):\n attributes_dict[field_obj.get_attribute_name()] = field_obj\n else:\n pass\n return attributes_dict\n\n\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef attributes(class_or_instance):\n attributes_dict = {}\n for _, field_obj in fields(class_or_instance).items():\n if hasattr(field_obj, 'get_shadow_fields'):\n shadow_fields = field_obj.get_shadow_fields()\n for _, shadow_field in shadow_fields:\n attributes_dict[shadow_field.attribute_name] = shadow_field\n elif hasattr(field_obj, 'relation'):\n attributes_dict[field_obj.get_attribute_name()\n ] = field_obj.relation\n elif not hasattr(field_obj, 'to_cls'):\n attributes_dict[field_obj.get_attribute_name()] = field_obj\n else:\n pass\n return attributes_dict\n\n\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,627 |
2663656561063f6b160d82d1f7aea23dd2ae2382
|
# Generated by Django 3.2 on 2021-04-14 16:52
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('orders', '0006_auto_20210413_1755'),
('orders', '0010_auto_20210414_1639'),
]
operations = [
]
|
[
"# Generated by Django 3.2 on 2021-04-14 16:52\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('orders', '0006_auto_20210413_1755'),\n ('orders', '0010_auto_20210414_1639'),\n ]\n\n operations = [\n ]\n",
"from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('orders', '0006_auto_20210413_1755'), ('orders',\n '0010_auto_20210414_1639')]\n operations = []\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('orders', '0006_auto_20210413_1755'), ('orders',\n '0010_auto_20210414_1639')]\n operations = []\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
99,628 |
609ceffc97ea194b0f1ea987f33772607d24988c
|
CASS_CONFIG = {
"username": "username",
"password": "password",
"cluster": ["127.0.0.1"],
"keyspace": "projects",
"protocol_version": 2,
"cache": "newton_orders"
}
MONGO_CONFIG = {
"username": "username",
"password": "password",
"host": "127.0.0.1",
"port": 27017,
"db": "newton",
"collection": "orders",
"enable_auth": False
}
|
[
"CASS_CONFIG = {\n\t\"username\": \"username\",\n\t\"password\": \"password\",\n\t\"cluster\": [\"127.0.0.1\"],\n\t\"keyspace\": \"projects\",\n\t\"protocol_version\": 2,\n\t\"cache\": \"newton_orders\"\n}\nMONGO_CONFIG = {\n\t\"username\": \"username\",\n\t\"password\": \"password\",\n\t\"host\": \"127.0.0.1\",\n\t\"port\": 27017,\n\t\"db\": \"newton\",\n\t\"collection\": \"orders\",\n\t\"enable_auth\": False\n}",
"CASS_CONFIG = {'username': 'username', 'password': 'password', 'cluster': [\n '127.0.0.1'], 'keyspace': 'projects', 'protocol_version': 2, 'cache':\n 'newton_orders'}\nMONGO_CONFIG = {'username': 'username', 'password': 'password', 'host':\n '127.0.0.1', 'port': 27017, 'db': 'newton', 'collection': 'orders',\n 'enable_auth': False}\n",
"<assignment token>\n"
] | false |
99,629 |
4a982179bf38c8855966f78afc9c41abda886443
|
"""
PyElant main module.
@brief PyElant
@author Paulo Marcos
@date 2021-03-19
Copyright (c) 2021 paulomarcosdj <@> outlook.com
"""
import argparse
from .pyelant import PyElant
def main():
parser = argparse.ArgumentParser(description='PyElant: insert input and output languages.')
parser.add_argument('-i', '--input_language', type=str,
help='Input language to be translated. Example: "en", "jp", "fr".')
parser.add_argument('-o', '--output_language', type=str,
help='Output language desired. Example: "jp", "fr", "en".')
parser.add_argument('-t', '--text', type=str,
help='Text to be translated. Example "Hello world."')
parser.add_argument('-dn', '--disable_notification', action="store_true",
help='Disable system notification displaying result of the translations.')
parser.add_argument('-v', '--verbose', action="store_true",
help='Verbose mode')
args = parser.parse_args()
PyElant(args.input_language,
args.output_language,
args.text,
args.disable_notification,
args.verbose)
if __name__ == "__main__":
main()
|
[
"\"\"\"\nPyElant main module.\n\n @brief PyElant\n @author Paulo Marcos\n @date 2021-03-19\n Copyright (c) 2021 paulomarcosdj <@> outlook.com\n\"\"\"\n\nimport argparse\nfrom .pyelant import PyElant\n\ndef main():\n parser = argparse.ArgumentParser(description='PyElant: insert input and output languages.')\n parser.add_argument('-i', '--input_language', type=str,\n help='Input language to be translated. Example: \"en\", \"jp\", \"fr\".')\n parser.add_argument('-o', '--output_language', type=str,\n help='Output language desired. Example: \"jp\", \"fr\", \"en\".')\n parser.add_argument('-t', '--text', type=str,\n help='Text to be translated. Example \"Hello world.\"')\n parser.add_argument('-dn', '--disable_notification', action=\"store_true\",\n help='Disable system notification displaying result of the translations.')\n parser.add_argument('-v', '--verbose', action=\"store_true\",\n help='Verbose mode')\n\n args = parser.parse_args()\n PyElant(args.input_language,\n args.output_language,\n args.text,\n args.disable_notification,\n args.verbose)\n\n\nif __name__ == \"__main__\":\n main()",
"<docstring token>\nimport argparse\nfrom .pyelant import PyElant\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\n 'PyElant: insert input and output languages.')\n parser.add_argument('-i', '--input_language', type=str, help=\n 'Input language to be translated. Example: \"en\", \"jp\", \"fr\".')\n parser.add_argument('-o', '--output_language', type=str, help=\n 'Output language desired. Example: \"jp\", \"fr\", \"en\".')\n parser.add_argument('-t', '--text', type=str, help=\n 'Text to be translated. Example \"Hello world.\"')\n parser.add_argument('-dn', '--disable_notification', action=\n 'store_true', help=\n 'Disable system notification displaying result of the translations.')\n parser.add_argument('-v', '--verbose', action='store_true', help=\n 'Verbose mode')\n args = parser.parse_args()\n PyElant(args.input_language, args.output_language, args.text, args.\n disable_notification, args.verbose)\n\n\nif __name__ == '__main__':\n main()\n",
"<docstring token>\n<import token>\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\n 'PyElant: insert input and output languages.')\n parser.add_argument('-i', '--input_language', type=str, help=\n 'Input language to be translated. Example: \"en\", \"jp\", \"fr\".')\n parser.add_argument('-o', '--output_language', type=str, help=\n 'Output language desired. Example: \"jp\", \"fr\", \"en\".')\n parser.add_argument('-t', '--text', type=str, help=\n 'Text to be translated. Example \"Hello world.\"')\n parser.add_argument('-dn', '--disable_notification', action=\n 'store_true', help=\n 'Disable system notification displaying result of the translations.')\n parser.add_argument('-v', '--verbose', action='store_true', help=\n 'Verbose mode')\n args = parser.parse_args()\n PyElant(args.input_language, args.output_language, args.text, args.\n disable_notification, args.verbose)\n\n\nif __name__ == '__main__':\n main()\n",
"<docstring token>\n<import token>\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\n 'PyElant: insert input and output languages.')\n parser.add_argument('-i', '--input_language', type=str, help=\n 'Input language to be translated. Example: \"en\", \"jp\", \"fr\".')\n parser.add_argument('-o', '--output_language', type=str, help=\n 'Output language desired. Example: \"jp\", \"fr\", \"en\".')\n parser.add_argument('-t', '--text', type=str, help=\n 'Text to be translated. Example \"Hello world.\"')\n parser.add_argument('-dn', '--disable_notification', action=\n 'store_true', help=\n 'Disable system notification displaying result of the translations.')\n parser.add_argument('-v', '--verbose', action='store_true', help=\n 'Verbose mode')\n args = parser.parse_args()\n PyElant(args.input_language, args.output_language, args.text, args.\n disable_notification, args.verbose)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<code token>\n"
] | false |
99,630 |
177841cae417da85deb4bc14bd89ddd2893c49da
|
def add(num1, num2):
return num1+num2
def subtract(num1, num2):
return num1 - num2
def multiply(num1, num2):
return num1 * num2
def divide(num1, num2):
float_num1 = float(num1)
float_num2 = float(num2)
return float_num1 / float_num2
def square(num1):
return num1 * num1
def cube(num1):
return num1 * num1 * num1
def power(num1, num2):
return num1**num2
def mod(num1, num2):
return num1 % num2
|
[
"def add(num1, num2):\n return num1+num2\n\ndef subtract(num1, num2):\n return num1 - num2\n\ndef multiply(num1, num2):\n return num1 * num2\n\ndef divide(num1, num2):\n float_num1 = float(num1)\n float_num2 = float(num2)\n return float_num1 / float_num2\n\ndef square(num1):\n return num1 * num1\n\ndef cube(num1):\n return num1 * num1 * num1\n\ndef power(num1, num2):\n return num1**num2\n\ndef mod(num1, num2):\n return num1 % num2\n",
"def add(num1, num2):\n return num1 + num2\n\n\ndef subtract(num1, num2):\n return num1 - num2\n\n\ndef multiply(num1, num2):\n return num1 * num2\n\n\ndef divide(num1, num2):\n float_num1 = float(num1)\n float_num2 = float(num2)\n return float_num1 / float_num2\n\n\ndef square(num1):\n return num1 * num1\n\n\ndef cube(num1):\n return num1 * num1 * num1\n\n\ndef power(num1, num2):\n return num1 ** num2\n\n\ndef mod(num1, num2):\n return num1 % num2\n",
"def add(num1, num2):\n return num1 + num2\n\n\ndef subtract(num1, num2):\n return num1 - num2\n\n\ndef multiply(num1, num2):\n return num1 * num2\n\n\n<function token>\n\n\ndef square(num1):\n return num1 * num1\n\n\ndef cube(num1):\n return num1 * num1 * num1\n\n\ndef power(num1, num2):\n return num1 ** num2\n\n\ndef mod(num1, num2):\n return num1 % num2\n",
"def add(num1, num2):\n return num1 + num2\n\n\ndef subtract(num1, num2):\n return num1 - num2\n\n\ndef multiply(num1, num2):\n return num1 * num2\n\n\n<function token>\n\n\ndef square(num1):\n return num1 * num1\n\n\ndef cube(num1):\n return num1 * num1 * num1\n\n\n<function token>\n\n\ndef mod(num1, num2):\n return num1 % num2\n",
"<function token>\n\n\ndef subtract(num1, num2):\n return num1 - num2\n\n\ndef multiply(num1, num2):\n return num1 * num2\n\n\n<function token>\n\n\ndef square(num1):\n return num1 * num1\n\n\ndef cube(num1):\n return num1 * num1 * num1\n\n\n<function token>\n\n\ndef mod(num1, num2):\n return num1 % num2\n",
"<function token>\n\n\ndef subtract(num1, num2):\n return num1 - num2\n\n\ndef multiply(num1, num2):\n return num1 * num2\n\n\n<function token>\n\n\ndef square(num1):\n return num1 * num1\n\n\n<function token>\n<function token>\n\n\ndef mod(num1, num2):\n return num1 % num2\n",
"<function token>\n\n\ndef subtract(num1, num2):\n return num1 - num2\n\n\ndef multiply(num1, num2):\n return num1 * num2\n\n\n<function token>\n\n\ndef square(num1):\n return num1 * num1\n\n\n<function token>\n<function token>\n<function token>\n",
"<function token>\n\n\ndef subtract(num1, num2):\n return num1 - num2\n\n\ndef multiply(num1, num2):\n return num1 * num2\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<function token>\n<function token>\n\n\ndef multiply(num1, num2):\n return num1 * num2\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,631 |
261b67d5c83d3bc397e5e8ebcfaae160d8f7996c
|
from rest_framework.permissions import BasePermission
import api.models as models
class ItisMeorAdminPermission(BasePermission):
# Premission to control
def has_object_permission(self, request, view, obj):
admin = bool(request.user and request.user.is_staff)
if admin:
return True
if request.user == obj:
return True
return False
class MyScheduleorAdminPermission(BasePermission):
def has_object_permission(self, request, view, obj):
# return False
admin = bool(request.user and request.user.is_staff)
if admin:
return True
if obj.owner == request.user:
return True
return False
class SchedulePermission(BasePermission):
def has_object_permission(self, request, view, obj):
# return False
admin = bool(request.user and request.user.is_staff)
if obj.owner == request.user:
return True
if admin:
return True
if obj.permission["general"]=="public":
return True
if request.user.username in obj.permission['exceptions']:
return True
return True
class MySchedulePermission(BasePermission):
pass
class ActivityisinSchedulePermission(BasePermission):
def has_object_permission(self, request, view, activity,schedule):
return schedule.activities.filter(activity=activity)
|
[
"from rest_framework.permissions import BasePermission\nimport api.models as models\n\n\nclass ItisMeorAdminPermission(BasePermission):\n # Premission to control \n def has_object_permission(self, request, view, obj): \n admin = bool(request.user and request.user.is_staff)\n if admin:\n return True\n if request.user == obj:\n return True\n return False \n\nclass MyScheduleorAdminPermission(BasePermission): \n def has_object_permission(self, request, view, obj):\n # return False\n admin = bool(request.user and request.user.is_staff)\n if admin:\n return True\n if obj.owner == request.user:\n return True\n return False \nclass SchedulePermission(BasePermission):\n def has_object_permission(self, request, view, obj):\n # return False\n admin = bool(request.user and request.user.is_staff)\n if obj.owner == request.user:\n return True\n if admin:\n return True \n if obj.permission[\"general\"]==\"public\":\n return True\n if request.user.username in obj.permission['exceptions']:\n return True \n return True \nclass MySchedulePermission(BasePermission):\n pass\n\nclass ActivityisinSchedulePermission(BasePermission):\n def has_object_permission(self, request, view, activity,schedule): \n return schedule.activities.filter(activity=activity)\n ",
"from rest_framework.permissions import BasePermission\nimport api.models as models\n\n\nclass ItisMeorAdminPermission(BasePermission):\n\n def has_object_permission(self, request, view, obj):\n admin = bool(request.user and request.user.is_staff)\n if admin:\n return True\n if request.user == obj:\n return True\n return False\n\n\nclass MyScheduleorAdminPermission(BasePermission):\n\n def has_object_permission(self, request, view, obj):\n admin = bool(request.user and request.user.is_staff)\n if admin:\n return True\n if obj.owner == request.user:\n return True\n return False\n\n\nclass SchedulePermission(BasePermission):\n\n def has_object_permission(self, request, view, obj):\n admin = bool(request.user and request.user.is_staff)\n if obj.owner == request.user:\n return True\n if admin:\n return True\n if obj.permission['general'] == 'public':\n return True\n if request.user.username in obj.permission['exceptions']:\n return True\n return True\n\n\nclass MySchedulePermission(BasePermission):\n pass\n\n\nclass ActivityisinSchedulePermission(BasePermission):\n\n def has_object_permission(self, request, view, activity, schedule):\n return schedule.activities.filter(activity=activity)\n",
"<import token>\n\n\nclass ItisMeorAdminPermission(BasePermission):\n\n def has_object_permission(self, request, view, obj):\n admin = bool(request.user and request.user.is_staff)\n if admin:\n return True\n if request.user == obj:\n return True\n return False\n\n\nclass MyScheduleorAdminPermission(BasePermission):\n\n def has_object_permission(self, request, view, obj):\n admin = bool(request.user and request.user.is_staff)\n if admin:\n return True\n if obj.owner == request.user:\n return True\n return False\n\n\nclass SchedulePermission(BasePermission):\n\n def has_object_permission(self, request, view, obj):\n admin = bool(request.user and request.user.is_staff)\n if obj.owner == request.user:\n return True\n if admin:\n return True\n if obj.permission['general'] == 'public':\n return True\n if request.user.username in obj.permission['exceptions']:\n return True\n return True\n\n\nclass MySchedulePermission(BasePermission):\n pass\n\n\nclass ActivityisinSchedulePermission(BasePermission):\n\n def has_object_permission(self, request, view, activity, schedule):\n return schedule.activities.filter(activity=activity)\n",
"<import token>\n\n\nclass ItisMeorAdminPermission(BasePermission):\n <function token>\n\n\nclass MyScheduleorAdminPermission(BasePermission):\n\n def has_object_permission(self, request, view, obj):\n admin = bool(request.user and request.user.is_staff)\n if admin:\n return True\n if obj.owner == request.user:\n return True\n return False\n\n\nclass SchedulePermission(BasePermission):\n\n def has_object_permission(self, request, view, obj):\n admin = bool(request.user and request.user.is_staff)\n if obj.owner == request.user:\n return True\n if admin:\n return True\n if obj.permission['general'] == 'public':\n return True\n if request.user.username in obj.permission['exceptions']:\n return True\n return True\n\n\nclass MySchedulePermission(BasePermission):\n pass\n\n\nclass ActivityisinSchedulePermission(BasePermission):\n\n def has_object_permission(self, request, view, activity, schedule):\n return schedule.activities.filter(activity=activity)\n",
"<import token>\n<class token>\n\n\nclass MyScheduleorAdminPermission(BasePermission):\n\n def has_object_permission(self, request, view, obj):\n admin = bool(request.user and request.user.is_staff)\n if admin:\n return True\n if obj.owner == request.user:\n return True\n return False\n\n\nclass SchedulePermission(BasePermission):\n\n def has_object_permission(self, request, view, obj):\n admin = bool(request.user and request.user.is_staff)\n if obj.owner == request.user:\n return True\n if admin:\n return True\n if obj.permission['general'] == 'public':\n return True\n if request.user.username in obj.permission['exceptions']:\n return True\n return True\n\n\nclass MySchedulePermission(BasePermission):\n pass\n\n\nclass ActivityisinSchedulePermission(BasePermission):\n\n def has_object_permission(self, request, view, activity, schedule):\n return schedule.activities.filter(activity=activity)\n",
"<import token>\n<class token>\n\n\nclass MyScheduleorAdminPermission(BasePermission):\n <function token>\n\n\nclass SchedulePermission(BasePermission):\n\n def has_object_permission(self, request, view, obj):\n admin = bool(request.user and request.user.is_staff)\n if obj.owner == request.user:\n return True\n if admin:\n return True\n if obj.permission['general'] == 'public':\n return True\n if request.user.username in obj.permission['exceptions']:\n return True\n return True\n\n\nclass MySchedulePermission(BasePermission):\n pass\n\n\nclass ActivityisinSchedulePermission(BasePermission):\n\n def has_object_permission(self, request, view, activity, schedule):\n return schedule.activities.filter(activity=activity)\n",
"<import token>\n<class token>\n<class token>\n\n\nclass SchedulePermission(BasePermission):\n\n def has_object_permission(self, request, view, obj):\n admin = bool(request.user and request.user.is_staff)\n if obj.owner == request.user:\n return True\n if admin:\n return True\n if obj.permission['general'] == 'public':\n return True\n if request.user.username in obj.permission['exceptions']:\n return True\n return True\n\n\nclass MySchedulePermission(BasePermission):\n pass\n\n\nclass ActivityisinSchedulePermission(BasePermission):\n\n def has_object_permission(self, request, view, activity, schedule):\n return schedule.activities.filter(activity=activity)\n",
"<import token>\n<class token>\n<class token>\n\n\nclass SchedulePermission(BasePermission):\n <function token>\n\n\nclass MySchedulePermission(BasePermission):\n pass\n\n\nclass ActivityisinSchedulePermission(BasePermission):\n\n def has_object_permission(self, request, view, activity, schedule):\n return schedule.activities.filter(activity=activity)\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass MySchedulePermission(BasePermission):\n pass\n\n\nclass ActivityisinSchedulePermission(BasePermission):\n\n def has_object_permission(self, request, view, activity, schedule):\n return schedule.activities.filter(activity=activity)\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ActivityisinSchedulePermission(BasePermission):\n\n def has_object_permission(self, request, view, activity, schedule):\n return schedule.activities.filter(activity=activity)\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ActivityisinSchedulePermission(BasePermission):\n <function token>\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n"
] | false |
99,632 |
bf013c78ee404650529a8609daf839a79737712c
|
#!/Users/weixinping/PycharmProjects/fuxi/py/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"#!/Users/weixinping/PycharmProjects/fuxi/py/bin/python\nfrom django.core import management\n\nif __name__ == \"__main__\":\n management.execute_from_command_line()\n",
"from django.core import management\nif __name__ == '__main__':\n management.execute_from_command_line()\n",
"<import token>\nif __name__ == '__main__':\n management.execute_from_command_line()\n",
"<import token>\n<code token>\n"
] | false |
99,633 |
c49a43b4c9f51d55579c478dd2e307301c527854
|
from __future__ import unicode_literals
from datetime import datetime
from django.db import models
# Create your models here.
class Todo(models.Model):
title = models.CharField(max_length=200)
text = models.TextField()
created_at = models.DateTimeField(default=datetime.now,blank=True)
def __str__(self): #django adminde olstrdugmuz todolarn tdo object olarak titlelaryla goruntulernmelrni saglayacak.
return self.title
|
[
"from __future__ import unicode_literals\nfrom datetime import datetime\nfrom django.db import models\n\n# Create your models here.\nclass Todo(models.Model):\n title = models.CharField(max_length=200)\n text = models.TextField()\n created_at = models.DateTimeField(default=datetime.now,blank=True)\n\n def __str__(self): #django adminde olstrdugmuz todolarn tdo object olarak titlelaryla goruntulernmelrni saglayacak.\n return self.title",
"from __future__ import unicode_literals\nfrom datetime import datetime\nfrom django.db import models\n\n\nclass Todo(models.Model):\n title = models.CharField(max_length=200)\n text = models.TextField()\n created_at = models.DateTimeField(default=datetime.now, blank=True)\n\n def __str__(self):\n return self.title\n",
"<import token>\n\n\nclass Todo(models.Model):\n title = models.CharField(max_length=200)\n text = models.TextField()\n created_at = models.DateTimeField(default=datetime.now, blank=True)\n\n def __str__(self):\n return self.title\n",
"<import token>\n\n\nclass Todo(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return self.title\n",
"<import token>\n\n\nclass Todo(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
99,634 |
0e4cc4b4bf4483566122da4d15f3827270180658
|
def quchong(dirpath):
a = 0
readDir = dirpath + "\\node_type.txt" # old
writeDir = dirpath + "\\node_type_mapings.txt" # new
# txtDir = "/home/Administrator/Desktop/1"
lines_seen = set()
outfile = open(writeDir, "w")
f = open(readDir, "r")
for line in f:
if line not in lines_seen:
a += 1
outfile.write(line)
lines_seen.add(line)
# print(a)
# print('\n')
outfile.close()
print("success")
def train_testdata():
delnum = 30#删除少于delnum 个签到的用户
a = 0.8 #80%作为训练集
data = '.\\data\\data\\checkin.txt'
train = '.\\data\\data\\train.txt'
test = '.\\data\\data\\test.txt'
#打开原文件
f = open(data, 'r', encoding='UTF-8', errors='ignore')
line = f.readline()
fa = open(train, 'w')
fb = open(test, 'w')
fa.write(line) #写列名
fb.write(line)
# 统计user_poi dict
user_poilist = dict()
while line:
line = f.readline()
toks = line.strip().split("\t")
if len(toks) == 4: #17
u, tl, p, c = toks[0], toks[1], toks[2], toks[3]
# u, t, l, p, c = toks[0], toks[8], toks[9], toks[13], toks[16]
# if u is not None and t is not None and l is not None and p is not None and c is not None:
if u not in user_poilist:
user_poilist[u] = []
user_poilist[u].append(str(line))
f.close()
# #地点少于delnum的删除
# for user in list(user_poilist):
# if len(user_poilist[user]) <= delnum:
# user_poilist.pop(user)
#写入train&test,并且每一项不为空
for user in user_poilist:
num = round(a*len(user_poilist[user])) #训练集 每个用户的签到数量, 四舍五入
l = user_poilist[user]
for i in range(0,num):
fa.write(str(l[i]))
for i in range(num,len(user_poilist[user])):
fb.write(str(l[i]))
fa.close()
fb.close()
def xieleibie(dirpath):
f = open(dirpath + '\\random_walks.txt','r', encoding='UTF-8', errors='ignore')
line = f.readline() # 调用文件的 readline()方法
with open(dirpath + '\\node_type.txt', 'w') as fb:
while line:
list = line.strip().split(" ")
for i in range(0,len(list)):
if list[i].startswith('U'):
fb.write(list[i] + " user\n")
#print(list[i]+" user")
elif list[i].startswith('p'):
fb.write(list[i] + " poi\n")
#print(list[i]+" poi")
elif list[i].startswith('t'):
fb.write(list[i] + " time\n")
#print(list[i] + " t")
elif list[i].startswith('l'):
fb.write(list[i] + " loc\n")
# print(list[i] + " l")
elif list[i].startswith('c'):
fb.write(list[i] + " category\n")
# print(list[i] + " category")
line = f.readline()
f.close()
#分离序列中的user和poi
def separatewalks(dirpath):
f = open(dirpath + '\\random_walks.txt', 'r', encoding='UTF-8', errors='ignore')
line = f.readline() # 调用文件的 readline()方法
fp = open(dirpath + '\\poi_walks.txt', 'w')
fu = open(dirpath + '\\user_walks.txt', 'w')
while line:
list = line.strip().split(" ")
for i in range(0, len(list)):
if list[i].startswith('u'):
fu.write(list[i] + " ")
if list[i].startswith('p'):
fp.write(list[i] + " ")
fu.write("\n")
fp.write("\n")
line = f.readline()
f.close()
#训练集测试集划分
# train_testdata()
# ucu = ".\\data\\ucu\\vector"
# xieleibie(ucu)
# quchong(ucu)
# utlp= ".\\data\\utlp\\vector"
# # separatewalks(utlp)
# xieleibie(utlp)
# quchong(utlp)
utlp2= ".\\data\\utlp2\\vector"
xieleibie(utlp2)
quchong(utlp2)
|
[
"\ndef quchong(dirpath):\n a = 0\n readDir = dirpath + \"\\\\node_type.txt\" # old\n writeDir = dirpath + \"\\\\node_type_mapings.txt\" # new\n # txtDir = \"/home/Administrator/Desktop/1\"\n lines_seen = set()\n outfile = open(writeDir, \"w\")\n f = open(readDir, \"r\")\n for line in f:\n if line not in lines_seen:\n a += 1\n outfile.write(line)\n lines_seen.add(line)\n # print(a)\n # print('\\n')\n outfile.close()\n print(\"success\")\n\ndef train_testdata():\n delnum = 30#删除少于delnum 个签到的用户\n a = 0.8 #80%作为训练集\n data = '.\\\\data\\\\data\\\\checkin.txt'\n train = '.\\\\data\\\\data\\\\train.txt'\n test = '.\\\\data\\\\data\\\\test.txt'\n\n #打开原文件\n f = open(data, 'r', encoding='UTF-8', errors='ignore')\n line = f.readline()\n\n fa = open(train, 'w')\n fb = open(test, 'w')\n fa.write(line) #写列名\n fb.write(line)\n\n # 统计user_poi dict\n user_poilist = dict()\n while line:\n line = f.readline()\n toks = line.strip().split(\"\\t\")\n if len(toks) == 4: #17\n u, tl, p, c = toks[0], toks[1], toks[2], toks[3]\n # u, t, l, p, c = toks[0], toks[8], toks[9], toks[13], toks[16]\n # if u is not None and t is not None and l is not None and p is not None and c is not None:\n if u not in user_poilist:\n user_poilist[u] = []\n user_poilist[u].append(str(line))\n f.close()\n\n # #地点少于delnum的删除\n # for user in list(user_poilist):\n # if len(user_poilist[user]) <= delnum:\n # user_poilist.pop(user)\n\n #写入train&test,并且每一项不为空\n for user in user_poilist:\n num = round(a*len(user_poilist[user])) #训练集 每个用户的签到数量, 四舍五入\n l = user_poilist[user]\n for i in range(0,num):\n fa.write(str(l[i]))\n\n for i in range(num,len(user_poilist[user])):\n fb.write(str(l[i]))\n\n fa.close()\n fb.close()\n\ndef xieleibie(dirpath):\n f = open(dirpath + '\\\\random_walks.txt','r', encoding='UTF-8', errors='ignore')\n line = f.readline() \t\t # 调用文件的 readline()方法\n with open(dirpath + '\\\\node_type.txt', 'w') as fb:\n while line:\n list = line.strip().split(\" \")\n for i in range(0,len(list)):\n if list[i].startswith('U'):\n fb.write(list[i] + \" user\\n\")\n #print(list[i]+\" user\")\n elif list[i].startswith('p'):\n fb.write(list[i] + \" poi\\n\")\n #print(list[i]+\" poi\")\n elif list[i].startswith('t'):\n fb.write(list[i] + \" time\\n\")\n #print(list[i] + \" t\")\n elif list[i].startswith('l'):\n fb.write(list[i] + \" loc\\n\")\n # print(list[i] + \" l\")\n elif list[i].startswith('c'):\n fb.write(list[i] + \" category\\n\")\n # print(list[i] + \" category\")\n line = f.readline()\n f.close()\n\n#分离序列中的user和poi\ndef separatewalks(dirpath):\n f = open(dirpath + '\\\\random_walks.txt', 'r', encoding='UTF-8', errors='ignore')\n line = f.readline() # 调用文件的 readline()方法\n fp = open(dirpath + '\\\\poi_walks.txt', 'w')\n fu = open(dirpath + '\\\\user_walks.txt', 'w')\n while line:\n list = line.strip().split(\" \")\n for i in range(0, len(list)):\n if list[i].startswith('u'):\n fu.write(list[i] + \" \")\n if list[i].startswith('p'):\n fp.write(list[i] + \" \")\n fu.write(\"\\n\")\n fp.write(\"\\n\")\n line = f.readline()\n f.close()\n\n#训练集测试集划分\n# train_testdata()\n\n# ucu = \".\\\\data\\\\ucu\\\\vector\"\n# xieleibie(ucu)\n# quchong(ucu)\n\n# utlp= \".\\\\data\\\\utlp\\\\vector\"\n# # separatewalks(utlp)\n# xieleibie(utlp)\n# quchong(utlp)\n\nutlp2= \".\\\\data\\\\utlp2\\\\vector\"\nxieleibie(utlp2)\nquchong(utlp2)\n",
"def quchong(dirpath):\n a = 0\n readDir = dirpath + '\\\\node_type.txt'\n writeDir = dirpath + '\\\\node_type_mapings.txt'\n lines_seen = set()\n outfile = open(writeDir, 'w')\n f = open(readDir, 'r')\n for line in f:\n if line not in lines_seen:\n a += 1\n outfile.write(line)\n lines_seen.add(line)\n outfile.close()\n print('success')\n\n\ndef train_testdata():\n delnum = 30\n a = 0.8\n data = '.\\\\data\\\\data\\\\checkin.txt'\n train = '.\\\\data\\\\data\\\\train.txt'\n test = '.\\\\data\\\\data\\\\test.txt'\n f = open(data, 'r', encoding='UTF-8', errors='ignore')\n line = f.readline()\n fa = open(train, 'w')\n fb = open(test, 'w')\n fa.write(line)\n fb.write(line)\n user_poilist = dict()\n while line:\n line = f.readline()\n toks = line.strip().split('\\t')\n if len(toks) == 4:\n u, tl, p, c = toks[0], toks[1], toks[2], toks[3]\n if u not in user_poilist:\n user_poilist[u] = []\n user_poilist[u].append(str(line))\n f.close()\n for user in user_poilist:\n num = round(a * len(user_poilist[user]))\n l = user_poilist[user]\n for i in range(0, num):\n fa.write(str(l[i]))\n for i in range(num, len(user_poilist[user])):\n fb.write(str(l[i]))\n fa.close()\n fb.close()\n\n\ndef xieleibie(dirpath):\n f = open(dirpath + '\\\\random_walks.txt', 'r', encoding='UTF-8', errors=\n 'ignore')\n line = f.readline()\n with open(dirpath + '\\\\node_type.txt', 'w') as fb:\n while line:\n list = line.strip().split(' ')\n for i in range(0, len(list)):\n if list[i].startswith('U'):\n fb.write(list[i] + ' user\\n')\n elif list[i].startswith('p'):\n fb.write(list[i] + ' poi\\n')\n elif list[i].startswith('t'):\n fb.write(list[i] + ' time\\n')\n elif list[i].startswith('l'):\n fb.write(list[i] + ' loc\\n')\n elif list[i].startswith('c'):\n fb.write(list[i] + ' category\\n')\n line = f.readline()\n f.close()\n\n\ndef separatewalks(dirpath):\n f = open(dirpath + '\\\\random_walks.txt', 'r', encoding='UTF-8', errors=\n 'ignore')\n line = f.readline()\n fp = open(dirpath + '\\\\poi_walks.txt', 'w')\n fu = open(dirpath + '\\\\user_walks.txt', 'w')\n while line:\n list = line.strip().split(' ')\n for i in range(0, len(list)):\n if list[i].startswith('u'):\n fu.write(list[i] + ' ')\n if list[i].startswith('p'):\n fp.write(list[i] + ' ')\n fu.write('\\n')\n fp.write('\\n')\n line = f.readline()\n f.close()\n\n\nutlp2 = '.\\\\data\\\\utlp2\\\\vector'\nxieleibie(utlp2)\nquchong(utlp2)\n",
"def quchong(dirpath):\n a = 0\n readDir = dirpath + '\\\\node_type.txt'\n writeDir = dirpath + '\\\\node_type_mapings.txt'\n lines_seen = set()\n outfile = open(writeDir, 'w')\n f = open(readDir, 'r')\n for line in f:\n if line not in lines_seen:\n a += 1\n outfile.write(line)\n lines_seen.add(line)\n outfile.close()\n print('success')\n\n\ndef train_testdata():\n delnum = 30\n a = 0.8\n data = '.\\\\data\\\\data\\\\checkin.txt'\n train = '.\\\\data\\\\data\\\\train.txt'\n test = '.\\\\data\\\\data\\\\test.txt'\n f = open(data, 'r', encoding='UTF-8', errors='ignore')\n line = f.readline()\n fa = open(train, 'w')\n fb = open(test, 'w')\n fa.write(line)\n fb.write(line)\n user_poilist = dict()\n while line:\n line = f.readline()\n toks = line.strip().split('\\t')\n if len(toks) == 4:\n u, tl, p, c = toks[0], toks[1], toks[2], toks[3]\n if u not in user_poilist:\n user_poilist[u] = []\n user_poilist[u].append(str(line))\n f.close()\n for user in user_poilist:\n num = round(a * len(user_poilist[user]))\n l = user_poilist[user]\n for i in range(0, num):\n fa.write(str(l[i]))\n for i in range(num, len(user_poilist[user])):\n fb.write(str(l[i]))\n fa.close()\n fb.close()\n\n\ndef xieleibie(dirpath):\n f = open(dirpath + '\\\\random_walks.txt', 'r', encoding='UTF-8', errors=\n 'ignore')\n line = f.readline()\n with open(dirpath + '\\\\node_type.txt', 'w') as fb:\n while line:\n list = line.strip().split(' ')\n for i in range(0, len(list)):\n if list[i].startswith('U'):\n fb.write(list[i] + ' user\\n')\n elif list[i].startswith('p'):\n fb.write(list[i] + ' poi\\n')\n elif list[i].startswith('t'):\n fb.write(list[i] + ' time\\n')\n elif list[i].startswith('l'):\n fb.write(list[i] + ' loc\\n')\n elif list[i].startswith('c'):\n fb.write(list[i] + ' category\\n')\n line = f.readline()\n f.close()\n\n\ndef separatewalks(dirpath):\n f = open(dirpath + '\\\\random_walks.txt', 'r', encoding='UTF-8', errors=\n 'ignore')\n line = f.readline()\n fp = open(dirpath + '\\\\poi_walks.txt', 'w')\n fu = open(dirpath + '\\\\user_walks.txt', 'w')\n while line:\n list = line.strip().split(' ')\n for i in range(0, len(list)):\n if list[i].startswith('u'):\n fu.write(list[i] + ' ')\n if list[i].startswith('p'):\n fp.write(list[i] + ' ')\n fu.write('\\n')\n fp.write('\\n')\n line = f.readline()\n f.close()\n\n\n<assignment token>\nxieleibie(utlp2)\nquchong(utlp2)\n",
"def quchong(dirpath):\n a = 0\n readDir = dirpath + '\\\\node_type.txt'\n writeDir = dirpath + '\\\\node_type_mapings.txt'\n lines_seen = set()\n outfile = open(writeDir, 'w')\n f = open(readDir, 'r')\n for line in f:\n if line not in lines_seen:\n a += 1\n outfile.write(line)\n lines_seen.add(line)\n outfile.close()\n print('success')\n\n\ndef train_testdata():\n delnum = 30\n a = 0.8\n data = '.\\\\data\\\\data\\\\checkin.txt'\n train = '.\\\\data\\\\data\\\\train.txt'\n test = '.\\\\data\\\\data\\\\test.txt'\n f = open(data, 'r', encoding='UTF-8', errors='ignore')\n line = f.readline()\n fa = open(train, 'w')\n fb = open(test, 'w')\n fa.write(line)\n fb.write(line)\n user_poilist = dict()\n while line:\n line = f.readline()\n toks = line.strip().split('\\t')\n if len(toks) == 4:\n u, tl, p, c = toks[0], toks[1], toks[2], toks[3]\n if u not in user_poilist:\n user_poilist[u] = []\n user_poilist[u].append(str(line))\n f.close()\n for user in user_poilist:\n num = round(a * len(user_poilist[user]))\n l = user_poilist[user]\n for i in range(0, num):\n fa.write(str(l[i]))\n for i in range(num, len(user_poilist[user])):\n fb.write(str(l[i]))\n fa.close()\n fb.close()\n\n\ndef xieleibie(dirpath):\n f = open(dirpath + '\\\\random_walks.txt', 'r', encoding='UTF-8', errors=\n 'ignore')\n line = f.readline()\n with open(dirpath + '\\\\node_type.txt', 'w') as fb:\n while line:\n list = line.strip().split(' ')\n for i in range(0, len(list)):\n if list[i].startswith('U'):\n fb.write(list[i] + ' user\\n')\n elif list[i].startswith('p'):\n fb.write(list[i] + ' poi\\n')\n elif list[i].startswith('t'):\n fb.write(list[i] + ' time\\n')\n elif list[i].startswith('l'):\n fb.write(list[i] + ' loc\\n')\n elif list[i].startswith('c'):\n fb.write(list[i] + ' category\\n')\n line = f.readline()\n f.close()\n\n\ndef separatewalks(dirpath):\n f = open(dirpath + '\\\\random_walks.txt', 'r', encoding='UTF-8', errors=\n 'ignore')\n line = f.readline()\n fp = open(dirpath + '\\\\poi_walks.txt', 'w')\n fu = open(dirpath + '\\\\user_walks.txt', 'w')\n while line:\n list = line.strip().split(' ')\n for i in range(0, len(list)):\n if list[i].startswith('u'):\n fu.write(list[i] + ' ')\n if list[i].startswith('p'):\n fp.write(list[i] + ' ')\n fu.write('\\n')\n fp.write('\\n')\n line = f.readline()\n f.close()\n\n\n<assignment token>\n<code token>\n",
"def quchong(dirpath):\n a = 0\n readDir = dirpath + '\\\\node_type.txt'\n writeDir = dirpath + '\\\\node_type_mapings.txt'\n lines_seen = set()\n outfile = open(writeDir, 'w')\n f = open(readDir, 'r')\n for line in f:\n if line not in lines_seen:\n a += 1\n outfile.write(line)\n lines_seen.add(line)\n outfile.close()\n print('success')\n\n\n<function token>\n\n\ndef xieleibie(dirpath):\n f = open(dirpath + '\\\\random_walks.txt', 'r', encoding='UTF-8', errors=\n 'ignore')\n line = f.readline()\n with open(dirpath + '\\\\node_type.txt', 'w') as fb:\n while line:\n list = line.strip().split(' ')\n for i in range(0, len(list)):\n if list[i].startswith('U'):\n fb.write(list[i] + ' user\\n')\n elif list[i].startswith('p'):\n fb.write(list[i] + ' poi\\n')\n elif list[i].startswith('t'):\n fb.write(list[i] + ' time\\n')\n elif list[i].startswith('l'):\n fb.write(list[i] + ' loc\\n')\n elif list[i].startswith('c'):\n fb.write(list[i] + ' category\\n')\n line = f.readline()\n f.close()\n\n\ndef separatewalks(dirpath):\n f = open(dirpath + '\\\\random_walks.txt', 'r', encoding='UTF-8', errors=\n 'ignore')\n line = f.readline()\n fp = open(dirpath + '\\\\poi_walks.txt', 'w')\n fu = open(dirpath + '\\\\user_walks.txt', 'w')\n while line:\n list = line.strip().split(' ')\n for i in range(0, len(list)):\n if list[i].startswith('u'):\n fu.write(list[i] + ' ')\n if list[i].startswith('p'):\n fp.write(list[i] + ' ')\n fu.write('\\n')\n fp.write('\\n')\n line = f.readline()\n f.close()\n\n\n<assignment token>\n<code token>\n",
"<function token>\n<function token>\n\n\ndef xieleibie(dirpath):\n f = open(dirpath + '\\\\random_walks.txt', 'r', encoding='UTF-8', errors=\n 'ignore')\n line = f.readline()\n with open(dirpath + '\\\\node_type.txt', 'w') as fb:\n while line:\n list = line.strip().split(' ')\n for i in range(0, len(list)):\n if list[i].startswith('U'):\n fb.write(list[i] + ' user\\n')\n elif list[i].startswith('p'):\n fb.write(list[i] + ' poi\\n')\n elif list[i].startswith('t'):\n fb.write(list[i] + ' time\\n')\n elif list[i].startswith('l'):\n fb.write(list[i] + ' loc\\n')\n elif list[i].startswith('c'):\n fb.write(list[i] + ' category\\n')\n line = f.readline()\n f.close()\n\n\ndef separatewalks(dirpath):\n f = open(dirpath + '\\\\random_walks.txt', 'r', encoding='UTF-8', errors=\n 'ignore')\n line = f.readline()\n fp = open(dirpath + '\\\\poi_walks.txt', 'w')\n fu = open(dirpath + '\\\\user_walks.txt', 'w')\n while line:\n list = line.strip().split(' ')\n for i in range(0, len(list)):\n if list[i].startswith('u'):\n fu.write(list[i] + ' ')\n if list[i].startswith('p'):\n fp.write(list[i] + ' ')\n fu.write('\\n')\n fp.write('\\n')\n line = f.readline()\n f.close()\n\n\n<assignment token>\n<code token>\n",
"<function token>\n<function token>\n\n\ndef xieleibie(dirpath):\n f = open(dirpath + '\\\\random_walks.txt', 'r', encoding='UTF-8', errors=\n 'ignore')\n line = f.readline()\n with open(dirpath + '\\\\node_type.txt', 'w') as fb:\n while line:\n list = line.strip().split(' ')\n for i in range(0, len(list)):\n if list[i].startswith('U'):\n fb.write(list[i] + ' user\\n')\n elif list[i].startswith('p'):\n fb.write(list[i] + ' poi\\n')\n elif list[i].startswith('t'):\n fb.write(list[i] + ' time\\n')\n elif list[i].startswith('l'):\n fb.write(list[i] + ' loc\\n')\n elif list[i].startswith('c'):\n fb.write(list[i] + ' category\\n')\n line = f.readline()\n f.close()\n\n\n<function token>\n<assignment token>\n<code token>\n",
"<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
99,635 |
dee74255aeeb9662f505362702cbabdc0ff3dc80
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt # Matlab-style plotting
import seaborn as sns
import warnings
from scipy import stats
from scipy.stats import norm, skew #for some statistics
from subprocess import check_output
color = sns.color_palette()
sns.set_style('darkgrid')
############ Build Environment ############
############################################
def ignore_warn(*args, **kwargs):
pass
warnings.warn = ignore_warn # ignore annoying warning (from sklearn and seaborn)
pd.set_option('display.float_format', lambda x: '{:.3f}'.format(x)) # Limiting floats output to 3 decimal points
# print(check_output(["ls", "../data"]).decode("utf8")) # check the files available in the directory
############################################
#Now let's import and put the train and test datasets in pandas dataframe
train = pd.read_csv('../data/train.csv')
test = pd.read_csv('../data/test.csv')
##display the first five rows of the train dataset.
# print(train.head(5))
##display the first five rows of the test dataset.
# print(test.head(5))
############ Inro ############
############################################
#check the numbers of samples and features
print("The train data size before dropping Id feature is : {} ".format(train.shape))
print("The test data size before dropping Id feature is : {} ".format(test.shape))
#Save the 'Id' column
train_ID = train['Id']
test_ID = test['Id']
print('\n\n')
print("The train ID :\n {} ".format(np.array(train_ID)))
print("The test ID :\n {} ".format(np.array(test_ID)))
#Now drop the 'Id' colum since it's unnecessary for the prediction process.
train.drop("Id", axis = 1, inplace = True)
test.drop("Id", axis = 1, inplace = True)
#check again the data size after dropping the 'Id' variable
print("\nThe train data size after dropping Id feature is : {} ".format(train.shape))
print("The test data size after dropping Id feature is : {} ".format(test.shape))
############################################
############ Data Processing ############
################################################
############## Outliers ############
# fig, ax = plt.subplots()
# ax.scatter(x = train['GrLivArea'], y = train['SalePrice'])
# plt.ylabel('SalePrice', fontsize=13)
# plt.xlabel('GrLivArea', fontsize=13)
#Deleting outliers
train = train.drop(train[(train['GrLivArea']>4000) & (train['SalePrice']<300000)].index)
#Check the graphic again
# fig, ax = plt.subplots() # GRAPH 1
# ax.scatter(train['GrLivArea'], train['SalePrice'])
# plt.ylabel('SalePrice', fontsize=13)
# plt.xlabel('GrLivArea', fontsize=13)
############## Target variable #############
# Uncomment to PLOT
# # SalePrice is the variable we need to predict. So let's do some analysis on this variable first.
# sns.distplot(train['SalePrice'] , fit=norm) # GRAPH 2
# # Get the fitted parameters used by the function
# (mu, sigma) = norm.fit(train['SalePrice']) # STANDART
# print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
# #Now plot the distribution
# plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
# loc='best')
# plt.ylabel('Frequency')
# plt.title('SalePrice distribution')
# #Get also the QQ-plot
# fig = plt.figure() # GRAPH 3
# res = stats.probplot(train['SalePrice'], plot=plt)
# # The target variable is right skewed. As (linear) models love normally distributed data ,
# # we need to transform this variable and make it more normally distributed.
################################################################################
############## Log-transformation of the target variable ############
# Uncomment to PLOT
# #We use the numpy fuction log1p which applies log(1+x) to all elements of the column
# train["SalePrice"] = np.log1p(train["SalePrice"])
# #Check the new distribution
# sns.distplot(train['SalePrice'] , fit=norm)
# # Get the fitted parameters used by the function
# (mu, sigma) = norm.fit(train['SalePrice'])
# print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
# #Now plot the distribution
# plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
# loc='best')
# plt.ylabel('Frequency')
# plt.title('SalePrice distribution')
# #Get also the QQ-plot
# fig = plt.figure()
# res = stats.probplot(train['SalePrice'], plot=plt)
# plt.show()
############################################################
############## Features engineering ############
# let's first concatenate the train and test data in the same dataframe
ntrain = train.shape[0]
ntest = test.shape[0]
y_train = train.SalePrice.values
all_data = pd.concat((train, test)).reset_index(drop=True)
all_data.drop(['SalePrice'], axis=1, inplace=True)
print("\nall_data size is : {}".format(all_data.shape))
##### Missing Data. #####
# Uncomment to PLOT
all_data_na = (all_data.isnull().sum() / len(all_data)) * 100
all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)[:30]
missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
print(missing_data.head(20))
# f, ax = plt.subplots(figsize=(15, 12)) # GRAPH 4
# plt.xticks(rotation='90')
# sns.barplot(x=all_data_na.index, y=all_data_na)
# plt.xlabel('Features', fontsize=15)
# plt.ylabel('Percent of missing values', fontsize=15)
# plt.title('Percent missing data by feature', fontsize=15)
# plt.show()
############################################################
############## Data Correlation ############
#Correlation map to see how features are correlated with SalePrice
corrmat = train.corr()
plt.subplots(figsize=(12,9))
sns.heatmap(corrmat, vmax=0.9, square=True, linewidth = 1)
plt.show()
############################################################
############## Imputing missing values ############
# --------------->>>>>>>> groupby() and mode()
all_data["PoolQC"] = all_data["PoolQC"].fillna("None")
all_data["MiscFeature"] = all_data["MiscFeature"].fillna("None")
all_data["Alley"] = all_data["Alley"].fillna("None")
all_data["Fence"] = all_data["Fence"].fillna("None")
all_data["FireplaceQu"] = all_data["FireplaceQu"].fillna("None")
# LotFrontage : Since the area of each street connected to the house property most likely have a similar area to other houses in its neighborhood ,
# we can fill in missing values by the median LotFrontage of the neighborhood.
# Group by neighborhood and fill in missing value by the median LotFrontage of all the neighborhood
all_data["LotFrontage"] = all_data.groupby("Neighborhood")["LotFrontage"].transform(lambda x: x.fillna(x.median()))
# GarageType, GarageFinish, GarageQual and GarageCond : Replacing missing data with None
for col in ('GarageType', 'GarageFinish', 'GarageQual', 'GarageCond'):
all_data[col] = all_data[col].fillna('None')
# GarageYrBlt, GarageArea and GarageCars : Replacing missing data with 0 (Since No garage = no cars in such garage.)
for col in ('GarageYrBlt', 'GarageArea', 'GarageCars'):
all_data[col] = all_data[col].fillna(0)
# BsmtFinSF1, BsmtFinSF2, BsmtUnfSF, TotalBsmtSF, BsmtFullBath and BsmtHalfBath :
# missing values are likely zero for having no basement
for col in ('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF','TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'):
all_data[col] = all_data[col].fillna(0)
# BsmtQual, BsmtCond, BsmtExposure, BsmtFinType1 and BsmtFinType2 :
# For all these categorical basement-related features, NaN means that there is no basement
for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'):
all_data[col] = all_data[col].fillna('None')
# MasVnrArea and MasVnrType :
# NA most likely means no masonry veneer for these houses. We can fill 0 for the area and None for the type.
all_data["MasVnrType"] = all_data["MasVnrType"].fillna("None")
all_data["MasVnrArea"] = all_data["MasVnrArea"].fillna(0)
##### !!!!! #####
# MSZoning (The general zoning classification) :
# 'RL' is by far the most common value. So we can fill in missing values with 'RL'
all_data['MSZoning'] = all_data['MSZoning'].fillna(all_data['MSZoning'].mode()[0])
# Utilities : For this categorical feature all records are "AllPub", except for one "NoSeWa" and 2 NA .
# Since the house with 'NoSewa' is in the training set, this feature won't help in predictive modelling.
# We can then safely remove it.
all_data = all_data.drop(['Utilities'], axis=1)
# Functional : data description says NA means typical
all_data["Functional"] = all_data["Functional"].fillna("Typ")
# Electrical : It has one NA value. Since this feature has mostly 'SBrkr', we can set that for the missing value.
all_data['Electrical'] = all_data['Electrical'].fillna(all_data['Electrical'].mode()[0])
# KitchenQual: Only one NA value, and same as Electrical,
# we set 'TA' (which is the most frequent) for the missing value in KitchenQual.
all_data['KitchenQual'] = all_data['KitchenQual'].fillna(all_data['KitchenQual'].mode()[0])
# Exterior1st and Exterior2nd :
# Again Both Exterior 1 & 2 have only one missing value. We will just substitute in the most common string
all_data['Exterior1st'] = all_data['Exterior1st'].fillna(all_data['Exterior1st'].mode()[0])
all_data['Exterior2nd'] = all_data['Exterior2nd'].fillna(all_data['Exterior2nd'].mode()[0])
# SaleType : Fill in again with most frequent which is "WD"
all_data['SaleType'] = all_data['SaleType'].fillna(all_data['SaleType'].mode()[0])
# MSSubClass : Na most likely means No building class. We can replace missing values with None
all_data['MSSubClass'] = all_data['MSSubClass'].fillna("None")
########## Is there any remaining missing value ? ##########
#Check remaining missing values if any
print('\n\n\n\n')
all_data_na = (all_data.isnull().sum() / len(all_data)) * 100
all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)
missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
missing_data.head()
############################################################
############## More features engeneering ############
##### Transforming some numerical variables that are really categorical #####
#MSSubClass=The building class
all_data['MSSubClass'] = all_data['MSSubClass'].apply(str)
#Changing OverallCond into a categorical variable
all_data['OverallCond'] = all_data['OverallCond'].astype(str)
#Year and month sold are transformed into categorical features.
all_data['YrSold'] = all_data['YrSold'].astype(str)
all_data['MoSold'] = all_data['MoSold'].astype(str)
# Label Encoding some categorical variables that may contain information in their ordering set
# --------------->>>>>>>> LabelEncoder
# --------------->>>>>>>> WHY ?!?!?!?!
from sklearn.preprocessing import LabelEncoder
cols = ('FireplaceQu', 'BsmtQual', 'BsmtCond', 'GarageQual', 'GarageCond',
'ExterQual', 'ExterCond','HeatingQC', 'PoolQC', 'KitchenQual', 'BsmtFinType1',
'BsmtFinType2', 'Functional', 'Fence', 'BsmtExposure', 'GarageFinish', 'LandSlope',
'LotShape', 'PavedDrive', 'Street', 'Alley', 'CentralAir', 'MSSubClass', 'OverallCond',
'YrSold', 'MoSold')
# process columns, apply LabelEncoder to categorical features
for c in cols:
lbl = LabelEncoder()
lbl.fit(list(all_data[c].values))
all_data[c] = lbl.transform(list(all_data[c].values))
# shape
print('Shape all_data: {}'.format(all_data.shape))
######### Adding one more important feature ##########
# Since area related features are very important to determine house prices,
# we add one more feature which is the total area of basement, first and second floor areas of each house
# Adding total sqfootage feature
all_data['TotalSF'] = all_data['TotalBsmtSF'] + all_data['1stFlrSF'] + all_data['2ndFlrSF']
############## Skewed features ############
numeric_feats = all_data.dtypes[all_data.dtypes != "object"].index
# Check the skew of all numerical features
skewed_feats = all_data[numeric_feats].apply(lambda x: skew(x.dropna())).sort_values(ascending=False)
print("\nSkew in numerical features: \n")
skewness = pd.DataFrame({'Skew' :skewed_feats})
print(skewness.head(10))
##### Box Cox Transformation of (highly) skewed features #####
# --------------->>>>>>>> WHAT ?!?!?!?!
skewness = skewness[abs(skewness) > 0.75]
print("There are {} skewed numerical features to Box Cox transform".format(skewness.shape[0]))
from scipy.special import boxcox1p
skewed_features = skewness.index
lam = 0.15
for feat in skewed_features:
#all_data[feat] += 1
all_data[feat] = boxcox1p(all_data[feat], lam)
#all_data[skewed_features] = np.log1p(all_data[skewed_features])
# print('\n\n\n\t\t\tSKEW:')
# sk = pd.DataFrame({'Skew' :all_data[feat]})
# print(all_data.head(10))
# Getting dummy categorical features
all_data = pd.get_dummies(all_data)
print(all_data.shape)
train = all_data[:ntrain]
test = all_data[ntrain:]
############################################################
############## Modeling ############
from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.metrics import mean_squared_error
import xgboost as xgb
import lightgbm as lgb
|
[
"import numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport matplotlib.pyplot as plt # Matlab-style plotting\nimport seaborn as sns\nimport warnings\nfrom scipy import stats\nfrom scipy.stats import norm, skew #for some statistics\nfrom subprocess import check_output\n\ncolor = sns.color_palette()\nsns.set_style('darkgrid')\n\n\t\t\t\t\t############\tBuild Environment \t############\n\n############################################\ndef ignore_warn(*args, **kwargs):\n pass\nwarnings.warn = ignore_warn \t\t\t\t\t\t\t\t\t\t# ignore annoying warning (from sklearn and seaborn)\npd.set_option('display.float_format', lambda x: '{:.3f}'.format(x)) # Limiting floats output to 3 decimal points\n# print(check_output([\"ls\", \"../data\"]).decode(\"utf8\")) \t\t\t\t# check the files available in the directory\n############################################\n\n\n#Now let's import and put the train and test datasets in pandas dataframe\ntrain = pd.read_csv('../data/train.csv')\ntest = pd.read_csv('../data/test.csv')\n##display the first five rows of the train dataset.\n# print(train.head(5))\n##display the first five rows of the test dataset.\n# print(test.head(5))\n\n\n############\t\tInro \t\t############\n############################################\n#check the numbers of samples and features\nprint(\"The train data size before dropping Id feature is : {} \".format(train.shape))\nprint(\"The test data size before dropping Id feature is : {} \".format(test.shape))\n\n#Save the 'Id' column\ntrain_ID = train['Id']\ntest_ID = test['Id']\nprint('\\n\\n')\nprint(\"The train ID :\\n {} \".format(np.array(train_ID)))\nprint(\"The test ID :\\n {} \".format(np.array(test_ID)))\n\n#Now drop the 'Id' colum since it's unnecessary for the prediction process.\ntrain.drop(\"Id\", axis = 1, inplace = True)\ntest.drop(\"Id\", axis = 1, inplace = True)\n\n#check again the data size after dropping the 'Id' variable\nprint(\"\\nThe train data size after dropping Id feature is : {} \".format(train.shape)) \nprint(\"The test data size after dropping Id feature is : {} \".format(test.shape))\n############################################\n\n\n\n\t\t\t\t\t############\tData Processing \t############\n\n################################################\n############## \tOutliers \t\t############\n\n# fig, ax = plt.subplots()\n# ax.scatter(x = train['GrLivArea'], y = train['SalePrice'])\n# plt.ylabel('SalePrice', fontsize=13)\n# plt.xlabel('GrLivArea', fontsize=13)\n\n\n#Deleting outliers\ntrain = train.drop(train[(train['GrLivArea']>4000) & (train['SalePrice']<300000)].index)\n\n#Check the graphic again\n# fig, ax = plt.subplots()\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#\tGRAPH 1\n# ax.scatter(train['GrLivArea'], train['SalePrice'])\n# plt.ylabel('SalePrice', fontsize=13)\n# plt.xlabel('GrLivArea', fontsize=13)\n\n\n############## Target variable \t#############\n\n# Uncomment to PLOT\n\n# # SalePrice is the variable we need to predict. So let's do some analysis on this variable first.\n\n# sns.distplot(train['SalePrice'] , fit=norm)\t\t\t\t\t\t\t\t\t\t\t#\tGRAPH 2\n\n# # Get the fitted parameters used by the function\n# (mu, sigma) = norm.fit(train['SalePrice'])\t\t\t\t\t\t\t\t\t\t\t# \tSTANDART\n# print( '\\n mu = {:.2f} and sigma = {:.2f}\\n'.format(mu, sigma))\n\n\n# #Now plot the distribution\n# plt.legend(['Normal dist. ($\\mu=$ {:.2f} and $\\sigma=$ {:.2f} )'.format(mu, sigma)],\n# loc='best')\n# plt.ylabel('Frequency')\n# plt.title('SalePrice distribution')\n\n\n# #Get also the QQ-plot\n# fig = plt.figure()\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#\tGRAPH 3\n# res = stats.probplot(train['SalePrice'], plot=plt)\n\n\n# # The target variable is right skewed. As (linear) models love normally distributed data , \n# # we need to transform this variable and make it more normally distributed.\n\n################################################################################\n############## \tLog-transformation of the target variable \t\t############\n\n# Uncomment to PLOT\n\n# #We use the numpy fuction log1p which applies log(1+x) to all elements of the column\n# train[\"SalePrice\"] = np.log1p(train[\"SalePrice\"])\n\n# #Check the new distribution \n# sns.distplot(train['SalePrice'] , fit=norm)\n\n# # Get the fitted parameters used by the function\n# (mu, sigma) = norm.fit(train['SalePrice'])\n# print( '\\n mu = {:.2f} and sigma = {:.2f}\\n'.format(mu, sigma))\n\n\n# #Now plot the distribution\n# plt.legend(['Normal dist. ($\\mu=$ {:.2f} and $\\sigma=$ {:.2f} )'.format(mu, sigma)],\n# loc='best')\n# plt.ylabel('Frequency')\n# plt.title('SalePrice distribution')\n\n# #Get also the QQ-plot\n# fig = plt.figure()\n# res = stats.probplot(train['SalePrice'], plot=plt)\n# plt.show()\n\n\n\n\n############################################################\n############## \tFeatures engineering\t\t############\n# let's first concatenate the train and test data in the same dataframe\n\nntrain = train.shape[0]\nntest = test.shape[0]\ny_train = train.SalePrice.values\nall_data = pd.concat((train, test)).reset_index(drop=True)\nall_data.drop(['SalePrice'], axis=1, inplace=True)\nprint(\"\\nall_data size is : {}\".format(all_data.shape))\n\n\n\n#####\tMissing Data. #####\n# Uncomment to PLOT\n\nall_data_na = (all_data.isnull().sum() / len(all_data)) * 100\nall_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)[:30]\nmissing_data = pd.DataFrame({'Missing Ratio' :all_data_na})\nprint(missing_data.head(20))\n\n\n# f, ax = plt.subplots(figsize=(15, 12))\t\t\t\t\t\t\t\t\t\t\t\t#\tGRAPH 4\n# plt.xticks(rotation='90')\n# sns.barplot(x=all_data_na.index, y=all_data_na)\n# plt.xlabel('Features', fontsize=15)\n# plt.ylabel('Percent of missing values', fontsize=15)\n# plt.title('Percent missing data by feature', fontsize=15)\n# plt.show()\n\n\n############################################################\n############## \t Data Correlation \t\t############\n\n#Correlation map to see how features are correlated with SalePrice\ncorrmat = train.corr()\nplt.subplots(figsize=(12,9))\nsns.heatmap(corrmat, vmax=0.9, square=True, linewidth = 1)\nplt.show()\n\n\n\n############################################################\n############## Imputing missing values \t############\n\n#\t--------------->>>>>>>>\t\tgroupby() and mode()\n\n\nall_data[\"PoolQC\"] = all_data[\"PoolQC\"].fillna(\"None\")\n\nall_data[\"MiscFeature\"] = all_data[\"MiscFeature\"].fillna(\"None\")\n\nall_data[\"Alley\"] = all_data[\"Alley\"].fillna(\"None\")\n\nall_data[\"Fence\"] = all_data[\"Fence\"].fillna(\"None\")\n\nall_data[\"FireplaceQu\"] = all_data[\"FireplaceQu\"].fillna(\"None\")\n\n\n# LotFrontage : Since the area of each street connected to the house property most likely have a similar area to other houses in its neighborhood , \n# we can fill in missing values by the median LotFrontage of the neighborhood.\n# Group by neighborhood and fill in missing value by the median LotFrontage of all the neighborhood\nall_data[\"LotFrontage\"] = all_data.groupby(\"Neighborhood\")[\"LotFrontage\"].transform(lambda x: x.fillna(x.median()))\n\n# GarageType, GarageFinish, GarageQual and GarageCond : Replacing missing data with None\nfor col in ('GarageType', 'GarageFinish', 'GarageQual', 'GarageCond'):\n\tall_data[col] = all_data[col].fillna('None')\n\n# GarageYrBlt, GarageArea and GarageCars : Replacing missing data with 0 (Since No garage = no cars in such garage.)\nfor col in ('GarageYrBlt', 'GarageArea', 'GarageCars'):\n\tall_data[col] = all_data[col].fillna(0)\n\n# BsmtFinSF1, BsmtFinSF2, BsmtUnfSF, TotalBsmtSF, BsmtFullBath and BsmtHalfBath : \n# missing values are likely zero for having no basement\nfor col in ('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF','TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'):\n\tall_data[col] = all_data[col].fillna(0)\n\n# BsmtQual, BsmtCond, BsmtExposure, BsmtFinType1 and BsmtFinType2 : \n# For all these categorical basement-related features, NaN means that there is no basement\nfor col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'):\n all_data[col] = all_data[col].fillna('None')\n\n# MasVnrArea and MasVnrType : \n# NA most likely means no masonry veneer for these houses. We can fill 0 for the area and None for the type.\nall_data[\"MasVnrType\"] = all_data[\"MasVnrType\"].fillna(\"None\")\nall_data[\"MasVnrArea\"] = all_data[\"MasVnrArea\"].fillna(0)\n\n#####\t!!!!!\t#####\n# MSZoning (The general zoning classification) :\n# 'RL' is by far the most common value. So we can fill in missing values with 'RL'\nall_data['MSZoning'] = all_data['MSZoning'].fillna(all_data['MSZoning'].mode()[0])\n\n\n# Utilities : For this categorical feature all records are \"AllPub\", except for one \"NoSeWa\" and 2 NA . \n# Since the house with 'NoSewa' is in the training set, this feature won't help in predictive modelling. \n# We can then safely remove it.\nall_data = all_data.drop(['Utilities'], axis=1)\n\n# Functional : data description says NA means typical\nall_data[\"Functional\"] = all_data[\"Functional\"].fillna(\"Typ\")\n\n\n# Electrical : It has one NA value. Since this feature has mostly 'SBrkr', we can set that for the missing value.\nall_data['Electrical'] = all_data['Electrical'].fillna(all_data['Electrical'].mode()[0])\n\n# KitchenQual: Only one NA value, and same as Electrical, \n# we set 'TA' (which is the most frequent) for the missing value in KitchenQual.\nall_data['KitchenQual'] = all_data['KitchenQual'].fillna(all_data['KitchenQual'].mode()[0])\n\n# Exterior1st and Exterior2nd : \n# Again Both Exterior 1 & 2 have only one missing value. We will just substitute in the most common string\nall_data['Exterior1st'] = all_data['Exterior1st'].fillna(all_data['Exterior1st'].mode()[0])\nall_data['Exterior2nd'] = all_data['Exterior2nd'].fillna(all_data['Exterior2nd'].mode()[0])\n\n# SaleType : Fill in again with most frequent which is \"WD\"\nall_data['SaleType'] = all_data['SaleType'].fillna(all_data['SaleType'].mode()[0])\n\n# MSSubClass : Na most likely means No building class. We can replace missing values with None\nall_data['MSSubClass'] = all_data['MSSubClass'].fillna(\"None\")\n\n##########\tIs there any remaining missing value ?\t\t##########\n\n#Check remaining missing values if any \nprint('\\n\\n\\n\\n')\nall_data_na = (all_data.isnull().sum() / len(all_data)) * 100\nall_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)\nmissing_data = pd.DataFrame({'Missing Ratio' :all_data_na})\nmissing_data.head()\n\n\n############################################################\n############## More features engeneering \t############\n\n##### Transforming some numerical variables that are really categorical #####\n\n\n\n#MSSubClass=The building class\nall_data['MSSubClass'] = all_data['MSSubClass'].apply(str)\n\n\n#Changing OverallCond into a categorical variable\nall_data['OverallCond'] = all_data['OverallCond'].astype(str)\n\n\n#Year and month sold are transformed into categorical features.\nall_data['YrSold'] = all_data['YrSold'].astype(str)\nall_data['MoSold'] = all_data['MoSold'].astype(str)\n\n\n\n# Label Encoding some categorical variables that may contain information in their ordering set\n\n#\t--------------->>>>>>>>\t\tLabelEncoder\n#\t--------------->>>>>>>>\t\tWHY ?!?!?!?!\n\nfrom sklearn.preprocessing import LabelEncoder\ncols = ('FireplaceQu', 'BsmtQual', 'BsmtCond', 'GarageQual', 'GarageCond', \n 'ExterQual', 'ExterCond','HeatingQC', 'PoolQC', 'KitchenQual', 'BsmtFinType1', \n 'BsmtFinType2', 'Functional', 'Fence', 'BsmtExposure', 'GarageFinish', 'LandSlope',\n 'LotShape', 'PavedDrive', 'Street', 'Alley', 'CentralAir', 'MSSubClass', 'OverallCond', \n 'YrSold', 'MoSold')\n# process columns, apply LabelEncoder to categorical features\nfor c in cols:\n lbl = LabelEncoder() \n lbl.fit(list(all_data[c].values)) \n all_data[c] = lbl.transform(list(all_data[c].values))\n\n# shape \nprint('Shape all_data: {}'.format(all_data.shape))\n\n\n######### \tAdding one more important feature \t##########\n\n# Since area related features are very important to determine house prices, \n# we add one more feature which is the total area of basement, first and second floor areas of each house\n\n# Adding total sqfootage feature \nall_data['TotalSF'] = all_data['TotalBsmtSF'] + all_data['1stFlrSF'] + all_data['2ndFlrSF']\n\n\n############## Skewed features \t############\n\nnumeric_feats = all_data.dtypes[all_data.dtypes != \"object\"].index\n\n# Check the skew of all numerical features\nskewed_feats = all_data[numeric_feats].apply(lambda x: skew(x.dropna())).sort_values(ascending=False)\nprint(\"\\nSkew in numerical features: \\n\")\nskewness = pd.DataFrame({'Skew' :skewed_feats})\nprint(skewness.head(10))\n\n##### \tBox Cox Transformation of (highly) skewed features \t#####\n#\t--------------->>>>>>>>\t\tWHAT ?!?!?!?!\n\nskewness = skewness[abs(skewness) > 0.75]\nprint(\"There are {} skewed numerical features to Box Cox transform\".format(skewness.shape[0]))\n\nfrom scipy.special import boxcox1p\nskewed_features = skewness.index\nlam = 0.15\nfor feat in skewed_features:\n #all_data[feat] += 1\n all_data[feat] = boxcox1p(all_data[feat], lam)\n \n#all_data[skewed_features] = np.log1p(all_data[skewed_features])\n# print('\\n\\n\\n\\t\\t\\tSKEW:')\n# sk = pd.DataFrame({'Skew' :all_data[feat]})\n# print(all_data.head(10))\n\n\n# \tGetting dummy categorical features\nall_data = pd.get_dummies(all_data)\nprint(all_data.shape)\n\ntrain = all_data[:ntrain]\ntest = all_data[ntrain:]\n\n\n\n\n############################################################\n############## \t\t Modeling \t\t\t############\n\nfrom sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC\nfrom sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\nfrom sklearn.kernel_ridge import KernelRidge\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import RobustScaler\nfrom sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone\nfrom sklearn.model_selection import KFold, cross_val_score, train_test_split\nfrom sklearn.metrics import mean_squared_error\nimport xgboost as xgb\nimport lightgbm as lgb\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport warnings\nfrom scipy import stats\nfrom scipy.stats import norm, skew\nfrom subprocess import check_output\ncolor = sns.color_palette()\nsns.set_style('darkgrid')\n\n\ndef ignore_warn(*args, **kwargs):\n pass\n\n\nwarnings.warn = ignore_warn\npd.set_option('display.float_format', lambda x: '{:.3f}'.format(x))\ntrain = pd.read_csv('../data/train.csv')\ntest = pd.read_csv('../data/test.csv')\nprint('The train data size before dropping Id feature is : {} '.format(\n train.shape))\nprint('The test data size before dropping Id feature is : {} '.format(test.\n shape))\ntrain_ID = train['Id']\ntest_ID = test['Id']\nprint('\\n\\n')\nprint(\"\"\"The train ID :\n {} \"\"\".format(np.array(train_ID)))\nprint(\"\"\"The test ID :\n {} \"\"\".format(np.array(test_ID)))\ntrain.drop('Id', axis=1, inplace=True)\ntest.drop('Id', axis=1, inplace=True)\nprint(\"\"\"\nThe train data size after dropping Id feature is : {} \"\"\".format(\n train.shape))\nprint('The test data size after dropping Id feature is : {} '.format(test.\n shape))\ntrain = train.drop(train[(train['GrLivArea'] > 4000) & (train['SalePrice'] <\n 300000)].index)\nntrain = train.shape[0]\nntest = test.shape[0]\ny_train = train.SalePrice.values\nall_data = pd.concat((train, test)).reset_index(drop=True)\nall_data.drop(['SalePrice'], axis=1, inplace=True)\nprint(\"\"\"\nall_data size is : {}\"\"\".format(all_data.shape))\nall_data_na = all_data.isnull().sum() / len(all_data) * 100\nall_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index\n ).sort_values(ascending=False)[:30]\nmissing_data = pd.DataFrame({'Missing Ratio': all_data_na})\nprint(missing_data.head(20))\ncorrmat = train.corr()\nplt.subplots(figsize=(12, 9))\nsns.heatmap(corrmat, vmax=0.9, square=True, linewidth=1)\nplt.show()\nall_data['PoolQC'] = all_data['PoolQC'].fillna('None')\nall_data['MiscFeature'] = all_data['MiscFeature'].fillna('None')\nall_data['Alley'] = all_data['Alley'].fillna('None')\nall_data['Fence'] = all_data['Fence'].fillna('None')\nall_data['FireplaceQu'] = all_data['FireplaceQu'].fillna('None')\nall_data['LotFrontage'] = all_data.groupby('Neighborhood')['LotFrontage'\n ].transform(lambda x: x.fillna(x.median()))\nfor col in ('GarageType', 'GarageFinish', 'GarageQual', 'GarageCond'):\n all_data[col] = all_data[col].fillna('None')\nfor col in ('GarageYrBlt', 'GarageArea', 'GarageCars'):\n all_data[col] = all_data[col].fillna(0)\nfor col in ('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF',\n 'BsmtFullBath', 'BsmtHalfBath'):\n all_data[col] = all_data[col].fillna(0)\nfor col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1',\n 'BsmtFinType2'):\n all_data[col] = all_data[col].fillna('None')\nall_data['MasVnrType'] = all_data['MasVnrType'].fillna('None')\nall_data['MasVnrArea'] = all_data['MasVnrArea'].fillna(0)\nall_data['MSZoning'] = all_data['MSZoning'].fillna(all_data['MSZoning'].\n mode()[0])\nall_data = all_data.drop(['Utilities'], axis=1)\nall_data['Functional'] = all_data['Functional'].fillna('Typ')\nall_data['Electrical'] = all_data['Electrical'].fillna(all_data[\n 'Electrical'].mode()[0])\nall_data['KitchenQual'] = all_data['KitchenQual'].fillna(all_data[\n 'KitchenQual'].mode()[0])\nall_data['Exterior1st'] = all_data['Exterior1st'].fillna(all_data[\n 'Exterior1st'].mode()[0])\nall_data['Exterior2nd'] = all_data['Exterior2nd'].fillna(all_data[\n 'Exterior2nd'].mode()[0])\nall_data['SaleType'] = all_data['SaleType'].fillna(all_data['SaleType'].\n mode()[0])\nall_data['MSSubClass'] = all_data['MSSubClass'].fillna('None')\nprint('\\n\\n\\n\\n')\nall_data_na = all_data.isnull().sum() / len(all_data) * 100\nall_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index\n ).sort_values(ascending=False)\nmissing_data = pd.DataFrame({'Missing Ratio': all_data_na})\nmissing_data.head()\nall_data['MSSubClass'] = all_data['MSSubClass'].apply(str)\nall_data['OverallCond'] = all_data['OverallCond'].astype(str)\nall_data['YrSold'] = all_data['YrSold'].astype(str)\nall_data['MoSold'] = all_data['MoSold'].astype(str)\nfrom sklearn.preprocessing import LabelEncoder\ncols = ('FireplaceQu', 'BsmtQual', 'BsmtCond', 'GarageQual', 'GarageCond',\n 'ExterQual', 'ExterCond', 'HeatingQC', 'PoolQC', 'KitchenQual',\n 'BsmtFinType1', 'BsmtFinType2', 'Functional', 'Fence', 'BsmtExposure',\n 'GarageFinish', 'LandSlope', 'LotShape', 'PavedDrive', 'Street',\n 'Alley', 'CentralAir', 'MSSubClass', 'OverallCond', 'YrSold', 'MoSold')\nfor c in cols:\n lbl = LabelEncoder()\n lbl.fit(list(all_data[c].values))\n all_data[c] = lbl.transform(list(all_data[c].values))\nprint('Shape all_data: {}'.format(all_data.shape))\nall_data['TotalSF'] = all_data['TotalBsmtSF'] + all_data['1stFlrSF'\n ] + all_data['2ndFlrSF']\nnumeric_feats = all_data.dtypes[all_data.dtypes != 'object'].index\nskewed_feats = all_data[numeric_feats].apply(lambda x: skew(x.dropna())\n ).sort_values(ascending=False)\nprint(\"\"\"\nSkew in numerical features: \n\"\"\")\nskewness = pd.DataFrame({'Skew': skewed_feats})\nprint(skewness.head(10))\nskewness = skewness[abs(skewness) > 0.75]\nprint('There are {} skewed numerical features to Box Cox transform'.format(\n skewness.shape[0]))\nfrom scipy.special import boxcox1p\nskewed_features = skewness.index\nlam = 0.15\nfor feat in skewed_features:\n all_data[feat] = boxcox1p(all_data[feat], lam)\nall_data = pd.get_dummies(all_data)\nprint(all_data.shape)\ntrain = all_data[:ntrain]\ntest = all_data[ntrain:]\nfrom sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC\nfrom sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\nfrom sklearn.kernel_ridge import KernelRidge\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import RobustScaler\nfrom sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone\nfrom sklearn.model_selection import KFold, cross_val_score, train_test_split\nfrom sklearn.metrics import mean_squared_error\nimport xgboost as xgb\nimport lightgbm as lgb\n",
"<import token>\ncolor = sns.color_palette()\nsns.set_style('darkgrid')\n\n\ndef ignore_warn(*args, **kwargs):\n pass\n\n\nwarnings.warn = ignore_warn\npd.set_option('display.float_format', lambda x: '{:.3f}'.format(x))\ntrain = pd.read_csv('../data/train.csv')\ntest = pd.read_csv('../data/test.csv')\nprint('The train data size before dropping Id feature is : {} '.format(\n train.shape))\nprint('The test data size before dropping Id feature is : {} '.format(test.\n shape))\ntrain_ID = train['Id']\ntest_ID = test['Id']\nprint('\\n\\n')\nprint(\"\"\"The train ID :\n {} \"\"\".format(np.array(train_ID)))\nprint(\"\"\"The test ID :\n {} \"\"\".format(np.array(test_ID)))\ntrain.drop('Id', axis=1, inplace=True)\ntest.drop('Id', axis=1, inplace=True)\nprint(\"\"\"\nThe train data size after dropping Id feature is : {} \"\"\".format(\n train.shape))\nprint('The test data size after dropping Id feature is : {} '.format(test.\n shape))\ntrain = train.drop(train[(train['GrLivArea'] > 4000) & (train['SalePrice'] <\n 300000)].index)\nntrain = train.shape[0]\nntest = test.shape[0]\ny_train = train.SalePrice.values\nall_data = pd.concat((train, test)).reset_index(drop=True)\nall_data.drop(['SalePrice'], axis=1, inplace=True)\nprint(\"\"\"\nall_data size is : {}\"\"\".format(all_data.shape))\nall_data_na = all_data.isnull().sum() / len(all_data) * 100\nall_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index\n ).sort_values(ascending=False)[:30]\nmissing_data = pd.DataFrame({'Missing Ratio': all_data_na})\nprint(missing_data.head(20))\ncorrmat = train.corr()\nplt.subplots(figsize=(12, 9))\nsns.heatmap(corrmat, vmax=0.9, square=True, linewidth=1)\nplt.show()\nall_data['PoolQC'] = all_data['PoolQC'].fillna('None')\nall_data['MiscFeature'] = all_data['MiscFeature'].fillna('None')\nall_data['Alley'] = all_data['Alley'].fillna('None')\nall_data['Fence'] = all_data['Fence'].fillna('None')\nall_data['FireplaceQu'] = all_data['FireplaceQu'].fillna('None')\nall_data['LotFrontage'] = all_data.groupby('Neighborhood')['LotFrontage'\n ].transform(lambda x: x.fillna(x.median()))\nfor col in ('GarageType', 'GarageFinish', 'GarageQual', 'GarageCond'):\n all_data[col] = all_data[col].fillna('None')\nfor col in ('GarageYrBlt', 'GarageArea', 'GarageCars'):\n all_data[col] = all_data[col].fillna(0)\nfor col in ('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF',\n 'BsmtFullBath', 'BsmtHalfBath'):\n all_data[col] = all_data[col].fillna(0)\nfor col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1',\n 'BsmtFinType2'):\n all_data[col] = all_data[col].fillna('None')\nall_data['MasVnrType'] = all_data['MasVnrType'].fillna('None')\nall_data['MasVnrArea'] = all_data['MasVnrArea'].fillna(0)\nall_data['MSZoning'] = all_data['MSZoning'].fillna(all_data['MSZoning'].\n mode()[0])\nall_data = all_data.drop(['Utilities'], axis=1)\nall_data['Functional'] = all_data['Functional'].fillna('Typ')\nall_data['Electrical'] = all_data['Electrical'].fillna(all_data[\n 'Electrical'].mode()[0])\nall_data['KitchenQual'] = all_data['KitchenQual'].fillna(all_data[\n 'KitchenQual'].mode()[0])\nall_data['Exterior1st'] = all_data['Exterior1st'].fillna(all_data[\n 'Exterior1st'].mode()[0])\nall_data['Exterior2nd'] = all_data['Exterior2nd'].fillna(all_data[\n 'Exterior2nd'].mode()[0])\nall_data['SaleType'] = all_data['SaleType'].fillna(all_data['SaleType'].\n mode()[0])\nall_data['MSSubClass'] = all_data['MSSubClass'].fillna('None')\nprint('\\n\\n\\n\\n')\nall_data_na = all_data.isnull().sum() / len(all_data) * 100\nall_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index\n ).sort_values(ascending=False)\nmissing_data = pd.DataFrame({'Missing Ratio': all_data_na})\nmissing_data.head()\nall_data['MSSubClass'] = all_data['MSSubClass'].apply(str)\nall_data['OverallCond'] = all_data['OverallCond'].astype(str)\nall_data['YrSold'] = all_data['YrSold'].astype(str)\nall_data['MoSold'] = all_data['MoSold'].astype(str)\n<import token>\ncols = ('FireplaceQu', 'BsmtQual', 'BsmtCond', 'GarageQual', 'GarageCond',\n 'ExterQual', 'ExterCond', 'HeatingQC', 'PoolQC', 'KitchenQual',\n 'BsmtFinType1', 'BsmtFinType2', 'Functional', 'Fence', 'BsmtExposure',\n 'GarageFinish', 'LandSlope', 'LotShape', 'PavedDrive', 'Street',\n 'Alley', 'CentralAir', 'MSSubClass', 'OverallCond', 'YrSold', 'MoSold')\nfor c in cols:\n lbl = LabelEncoder()\n lbl.fit(list(all_data[c].values))\n all_data[c] = lbl.transform(list(all_data[c].values))\nprint('Shape all_data: {}'.format(all_data.shape))\nall_data['TotalSF'] = all_data['TotalBsmtSF'] + all_data['1stFlrSF'\n ] + all_data['2ndFlrSF']\nnumeric_feats = all_data.dtypes[all_data.dtypes != 'object'].index\nskewed_feats = all_data[numeric_feats].apply(lambda x: skew(x.dropna())\n ).sort_values(ascending=False)\nprint(\"\"\"\nSkew in numerical features: \n\"\"\")\nskewness = pd.DataFrame({'Skew': skewed_feats})\nprint(skewness.head(10))\nskewness = skewness[abs(skewness) > 0.75]\nprint('There are {} skewed numerical features to Box Cox transform'.format(\n skewness.shape[0]))\n<import token>\nskewed_features = skewness.index\nlam = 0.15\nfor feat in skewed_features:\n all_data[feat] = boxcox1p(all_data[feat], lam)\nall_data = pd.get_dummies(all_data)\nprint(all_data.shape)\ntrain = all_data[:ntrain]\ntest = all_data[ntrain:]\n<import token>\n",
"<import token>\n<assignment token>\nsns.set_style('darkgrid')\n\n\ndef ignore_warn(*args, **kwargs):\n pass\n\n\n<assignment token>\npd.set_option('display.float_format', lambda x: '{:.3f}'.format(x))\n<assignment token>\nprint('The train data size before dropping Id feature is : {} '.format(\n train.shape))\nprint('The test data size before dropping Id feature is : {} '.format(test.\n shape))\n<assignment token>\nprint('\\n\\n')\nprint(\"\"\"The train ID :\n {} \"\"\".format(np.array(train_ID)))\nprint(\"\"\"The test ID :\n {} \"\"\".format(np.array(test_ID)))\ntrain.drop('Id', axis=1, inplace=True)\ntest.drop('Id', axis=1, inplace=True)\nprint(\"\"\"\nThe train data size after dropping Id feature is : {} \"\"\".format(\n train.shape))\nprint('The test data size after dropping Id feature is : {} '.format(test.\n shape))\n<assignment token>\nall_data.drop(['SalePrice'], axis=1, inplace=True)\nprint(\"\"\"\nall_data size is : {}\"\"\".format(all_data.shape))\n<assignment token>\nprint(missing_data.head(20))\n<assignment token>\nplt.subplots(figsize=(12, 9))\nsns.heatmap(corrmat, vmax=0.9, square=True, linewidth=1)\nplt.show()\n<assignment token>\nfor col in ('GarageType', 'GarageFinish', 'GarageQual', 'GarageCond'):\n all_data[col] = all_data[col].fillna('None')\nfor col in ('GarageYrBlt', 'GarageArea', 'GarageCars'):\n all_data[col] = all_data[col].fillna(0)\nfor col in ('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF',\n 'BsmtFullBath', 'BsmtHalfBath'):\n all_data[col] = all_data[col].fillna(0)\nfor col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1',\n 'BsmtFinType2'):\n all_data[col] = all_data[col].fillna('None')\n<assignment token>\nprint('\\n\\n\\n\\n')\n<assignment token>\nmissing_data.head()\n<assignment token>\n<import token>\n<assignment token>\nfor c in cols:\n lbl = LabelEncoder()\n lbl.fit(list(all_data[c].values))\n all_data[c] = lbl.transform(list(all_data[c].values))\nprint('Shape all_data: {}'.format(all_data.shape))\n<assignment token>\nprint(\"\"\"\nSkew in numerical features: \n\"\"\")\n<assignment token>\nprint(skewness.head(10))\n<assignment token>\nprint('There are {} skewed numerical features to Box Cox transform'.format(\n skewness.shape[0]))\n<import token>\n<assignment token>\nfor feat in skewed_features:\n all_data[feat] = boxcox1p(all_data[feat], lam)\n<assignment token>\nprint(all_data.shape)\n<assignment token>\n<import token>\n",
"<import token>\n<assignment token>\n<code token>\n\n\ndef ignore_warn(*args, **kwargs):\n pass\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<import token>\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<import token>\n"
] | false |
99,636 |
a8ec4687b322af5ac2e49e7a48f6feb611726531
|
#!/usr/bin/env python3
def han(cc): # has all numbers?
cc = str(cc)
index = [0,0,0,0,0,0,0,0,0,0]
for c in cc:
index[int(c)]+=1
if index == [0,1,1,1,1,1,1,1,1,1]:
return True
else:
return False
known = []
pro = 0
def store(cc):
if cc in known:
return
else:
global pro
pro += cc
known.append(cc)
tmax = 9876
xmax = 222
ymax = 2222
for x in range(1,10800):
for y in range(1,10800):
if y > x:
break
# if x * y > MAX:
# print("too big:", MAX)
# continue
a = han(str(x) + str (y) + str(x*y))
if a:
#if x < y:
# tmp = x
# x = y
# y = x
store(x*y)
print(a,': x',x,'z',y,"xz",x*y,"sum", pro)
break
r = han(140000100)
print(r)
print(known)
print('pro:',pro)
|
[
"#!/usr/bin/env python3\n\n\ndef han(cc): # has all numbers?\n cc = str(cc)\n index = [0,0,0,0,0,0,0,0,0,0]\n\n for c in cc:\n index[int(c)]+=1\n if index == [0,1,1,1,1,1,1,1,1,1]:\n return True\n else:\n return False\n\n\nknown = []\npro = 0\n\ndef store(cc):\n if cc in known:\n return\n else:\n global pro\n pro += cc\n known.append(cc)\n\n\n\ntmax = 9876\nxmax = 222\nymax = 2222\n\n\nfor x in range(1,10800):\n for y in range(1,10800):\n if y > x:\n break\n # if x * y > MAX:\n # print(\"too big:\", MAX)\n # continue\n \n a = han(str(x) + str (y) + str(x*y))\n if a:\n #if x < y:\n # tmp = x\n # x = y\n # y = x\n\n store(x*y)\n print(a,': x',x,'z',y,\"xz\",x*y,\"sum\", pro)\n break\n \n\nr = han(140000100)\nprint(r)\n\nprint(known)\nprint('pro:',pro)\n",
"def han(cc):\n cc = str(cc)\n index = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n for c in cc:\n index[int(c)] += 1\n if index == [0, 1, 1, 1, 1, 1, 1, 1, 1, 1]:\n return True\n else:\n return False\n\n\nknown = []\npro = 0\n\n\ndef store(cc):\n if cc in known:\n return\n else:\n global pro\n pro += cc\n known.append(cc)\n\n\ntmax = 9876\nxmax = 222\nymax = 2222\nfor x in range(1, 10800):\n for y in range(1, 10800):\n if y > x:\n break\n a = han(str(x) + str(y) + str(x * y))\n if a:\n store(x * y)\n print(a, ': x', x, 'z', y, 'xz', x * y, 'sum', pro)\n break\nr = han(140000100)\nprint(r)\nprint(known)\nprint('pro:', pro)\n",
"def han(cc):\n cc = str(cc)\n index = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n for c in cc:\n index[int(c)] += 1\n if index == [0, 1, 1, 1, 1, 1, 1, 1, 1, 1]:\n return True\n else:\n return False\n\n\n<assignment token>\n\n\ndef store(cc):\n if cc in known:\n return\n else:\n global pro\n pro += cc\n known.append(cc)\n\n\n<assignment token>\nfor x in range(1, 10800):\n for y in range(1, 10800):\n if y > x:\n break\n a = han(str(x) + str(y) + str(x * y))\n if a:\n store(x * y)\n print(a, ': x', x, 'z', y, 'xz', x * y, 'sum', pro)\n break\n<assignment token>\nprint(r)\nprint(known)\nprint('pro:', pro)\n",
"def han(cc):\n cc = str(cc)\n index = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n for c in cc:\n index[int(c)] += 1\n if index == [0, 1, 1, 1, 1, 1, 1, 1, 1, 1]:\n return True\n else:\n return False\n\n\n<assignment token>\n\n\ndef store(cc):\n if cc in known:\n return\n else:\n global pro\n pro += cc\n known.append(cc)\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"def han(cc):\n cc = str(cc)\n index = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n for c in cc:\n index[int(c)] += 1\n if index == [0, 1, 1, 1, 1, 1, 1, 1, 1, 1]:\n return True\n else:\n return False\n\n\n<assignment token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n<assignment token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,637 |
6c900d91550c1d156b252f48828b544fbb598ecf
|
# Generated by Django 2.2.1 on 2019-06-02 18:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('design', '0003_auto_20190531_1947'),
]
operations = [
migrations.AlterField(
model_name='gear',
name='Np',
field=models.FloatField(choices=[(12, 12), (18, 18), (32, 32)], null=True, verbose_name='dientes del piñón'),
),
]
|
[
"# Generated by Django 2.2.1 on 2019-06-02 18:26\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('design', '0003_auto_20190531_1947'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='gear',\n name='Np',\n field=models.FloatField(choices=[(12, 12), (18, 18), (32, 32)], null=True, verbose_name='dientes del piñón'),\n ),\n ]\n",
"from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('design', '0003_auto_20190531_1947')]\n operations = [migrations.AlterField(model_name='gear', name='Np', field\n =models.FloatField(choices=[(12, 12), (18, 18), (32, 32)], null=\n True, verbose_name='dientes del piñón'))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('design', '0003_auto_20190531_1947')]\n operations = [migrations.AlterField(model_name='gear', name='Np', field\n =models.FloatField(choices=[(12, 12), (18, 18), (32, 32)], null=\n True, verbose_name='dientes del piñón'))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
99,638 |
cb2d380a4b16b3c826125bb98bc9eacda96b2f5a
|
from django.db import models, forms
from datetime import datetime, timedelta
def deadline():
return datetime.today() + timedelta(days=30)
class Checkout(models.Model):
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
code = models.CharField(max_length=30, default='null', unique=True)
email = models.EmailField(max_length=30)
checkout_date = models.DateField(default=datetime.today)
due_date = models.DateField(default=deadline)
year = models.CharField(max_length=4, choices=[('2019', '2019'), ('2020', '2020'), ('2021', '2021'), ('2022', '2022')])
def __str__(self):
return str(self.id - 3 + 1000)
class Filter(forms.Form):
query = forms.CharField(max_length=30)
|
[
"from django.db import models, forms\r\nfrom datetime import datetime, timedelta\r\n\r\ndef deadline():\r\n return datetime.today() + timedelta(days=30)\r\n\r\nclass Checkout(models.Model):\r\n first_name = models.CharField(max_length=20)\r\n last_name = models.CharField(max_length=20)\r\n code = models.CharField(max_length=30, default='null', unique=True)\r\n email = models.EmailField(max_length=30)\r\n checkout_date = models.DateField(default=datetime.today)\r\n due_date = models.DateField(default=deadline)\r\n year = models.CharField(max_length=4, choices=[('2019', '2019'), ('2020', '2020'), ('2021', '2021'), ('2022', '2022')])\r\n\r\n def __str__(self):\r\n return str(self.id - 3 + 1000)\r\n\r\n\r\nclass Filter(forms.Form):\r\n query = forms.CharField(max_length=30)",
"from django.db import models, forms\nfrom datetime import datetime, timedelta\n\n\ndef deadline():\n return datetime.today() + timedelta(days=30)\n\n\nclass Checkout(models.Model):\n first_name = models.CharField(max_length=20)\n last_name = models.CharField(max_length=20)\n code = models.CharField(max_length=30, default='null', unique=True)\n email = models.EmailField(max_length=30)\n checkout_date = models.DateField(default=datetime.today)\n due_date = models.DateField(default=deadline)\n year = models.CharField(max_length=4, choices=[('2019', '2019'), (\n '2020', '2020'), ('2021', '2021'), ('2022', '2022')])\n\n def __str__(self):\n return str(self.id - 3 + 1000)\n\n\nclass Filter(forms.Form):\n query = forms.CharField(max_length=30)\n",
"<import token>\n\n\ndef deadline():\n return datetime.today() + timedelta(days=30)\n\n\nclass Checkout(models.Model):\n first_name = models.CharField(max_length=20)\n last_name = models.CharField(max_length=20)\n code = models.CharField(max_length=30, default='null', unique=True)\n email = models.EmailField(max_length=30)\n checkout_date = models.DateField(default=datetime.today)\n due_date = models.DateField(default=deadline)\n year = models.CharField(max_length=4, choices=[('2019', '2019'), (\n '2020', '2020'), ('2021', '2021'), ('2022', '2022')])\n\n def __str__(self):\n return str(self.id - 3 + 1000)\n\n\nclass Filter(forms.Form):\n query = forms.CharField(max_length=30)\n",
"<import token>\n<function token>\n\n\nclass Checkout(models.Model):\n first_name = models.CharField(max_length=20)\n last_name = models.CharField(max_length=20)\n code = models.CharField(max_length=30, default='null', unique=True)\n email = models.EmailField(max_length=30)\n checkout_date = models.DateField(default=datetime.today)\n due_date = models.DateField(default=deadline)\n year = models.CharField(max_length=4, choices=[('2019', '2019'), (\n '2020', '2020'), ('2021', '2021'), ('2022', '2022')])\n\n def __str__(self):\n return str(self.id - 3 + 1000)\n\n\nclass Filter(forms.Form):\n query = forms.CharField(max_length=30)\n",
"<import token>\n<function token>\n\n\nclass Checkout(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return str(self.id - 3 + 1000)\n\n\nclass Filter(forms.Form):\n query = forms.CharField(max_length=30)\n",
"<import token>\n<function token>\n\n\nclass Checkout(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass Filter(forms.Form):\n query = forms.CharField(max_length=30)\n",
"<import token>\n<function token>\n<class token>\n\n\nclass Filter(forms.Form):\n query = forms.CharField(max_length=30)\n",
"<import token>\n<function token>\n<class token>\n\n\nclass Filter(forms.Form):\n <assignment token>\n",
"<import token>\n<function token>\n<class token>\n<class token>\n"
] | false |
99,639 |
e476d068f09f4b26af305d3cdfd3d445a7a27c2b
|
def chees_and_crackers(chees_count, boxes_of_crackers):
print "You have %d cheeses!" % chees_count
print "You have %d boxes of crackers!" % boxes_of_crackers
print "Man that's enough for part!"
print "Get the blanket.\n"
print "We can just give the function numbers directly..."
chees_and_crackers(10,20)
print "or , we can use variables from our script:"
a = 15
b = 20
chees_and_crackers(a,b)
print "We can even do math inside."
chees_and_crackers(10+2,20+4)
|
[
"\ndef chees_and_crackers(chees_count, boxes_of_crackers):\n print \"You have %d cheeses!\" % chees_count\n print \"You have %d boxes of crackers!\" % boxes_of_crackers\n print \"Man that's enough for part!\"\n print \"Get the blanket.\\n\"\n\t\n\t\nprint \"We can just give the function numbers directly...\"\nchees_and_crackers(10,20)\n\nprint \"or , we can use variables from our script:\"\na = 15\nb = 20\n\nchees_and_crackers(a,b)\n\nprint \"We can even do math inside.\"\nchees_and_crackers(10+2,20+4)\n"
] | true |
99,640 |
d18e820f98f733ba7ea6708b3d02c99295daf2b1
|
# coding = utf-8
'''
@author = super_fazai
@File : demo.py
@Time : 2017/8/16 17:35
@connect : [email protected]
'''
"""
很多时候数据读写, 不一定是文件, 也可能在内存读写
StringIO顾名思义就是在内存中读写str
"""
from io import StringIO
f = StringIO()
f.write('hello')
print(f)
f.write(' ')
print(f)
f.write('world!')
print(f.getvalue()) # 获取写入后的str
print('分界线'.center(40, '-'))
f = StringIO('hello!\nhi!\ngoodbye!')
while True:
s = f.readline()
if s == '':
break
print(s.strip())
|
[
"# coding = utf-8\n\n'''\n@author = super_fazai\n@File : demo.py\n@Time : 2017/8/16 17:35\n@connect : [email protected]\n'''\n\n\"\"\"\n很多时候数据读写, 不一定是文件, 也可能在内存读写\n\nStringIO顾名思义就是在内存中读写str\n\"\"\"\n\nfrom io import StringIO\n\nf = StringIO()\nf.write('hello')\nprint(f)\nf.write(' ')\nprint(f)\nf.write('world!')\nprint(f.getvalue()) # 获取写入后的str\n\nprint('分界线'.center(40, '-'))\n\nf = StringIO('hello!\\nhi!\\ngoodbye!')\nwhile True:\n s = f.readline()\n if s == '':\n break\n print(s.strip())",
"<docstring token>\nfrom io import StringIO\nf = StringIO()\nf.write('hello')\nprint(f)\nf.write(' ')\nprint(f)\nf.write('world!')\nprint(f.getvalue())\nprint('分界线'.center(40, '-'))\nf = StringIO(\"\"\"hello!\nhi!\ngoodbye!\"\"\")\nwhile True:\n s = f.readline()\n if s == '':\n break\n print(s.strip())\n",
"<docstring token>\n<import token>\nf = StringIO()\nf.write('hello')\nprint(f)\nf.write(' ')\nprint(f)\nf.write('world!')\nprint(f.getvalue())\nprint('分界线'.center(40, '-'))\nf = StringIO(\"\"\"hello!\nhi!\ngoodbye!\"\"\")\nwhile True:\n s = f.readline()\n if s == '':\n break\n print(s.strip())\n",
"<docstring token>\n<import token>\n<assignment token>\nf.write('hello')\nprint(f)\nf.write(' ')\nprint(f)\nf.write('world!')\nprint(f.getvalue())\nprint('分界线'.center(40, '-'))\n<assignment token>\nwhile True:\n s = f.readline()\n if s == '':\n break\n print(s.strip())\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,641 |
0fa096f9a2b0e23af95a942433d559755de6ada7
|
################################################################################
import datetime, os, time, argparse, multiprocessing, subprocess
from webcam import Webcam
from gpio import GPIO
DEFAULT_RESOLUTION = "1600x1200"
#DEFAULT_PNG_COMPRESSION = 9 # (-1,0-10)
DEFAULT_FRAME_SKIP = 0
USERHOME_PATH = os.path.expanduser("~") #should be portable
DEFAULT_OUTPUT_PATH = os.sep.join((USERHOME_PATH,"photos"))
DEFAULT_VERBOSE = True
SLEEP_TIME = 0.1 #seconds
SNAP_BUTTON_PIN = 44
SNAP_LED_PIN = 26
INPUT_PINS = [SNAP_BUTTON_PIN]
OUTPUT_PINS = [SNAP_LED_PIN]
################################################################################
################################################################################
class Application:
def __init__(self, **kwargs):
self.verbose = kwargs.get('verbose', DEFAULT_VERBOSE)
self.webcam = Webcam(**kwargs)
self.gpio = GPIO(inputs = INPUT_PINS, outputs = OUTPUT_PINS)
def main_loop(self):
i = 0
try:
self.gpio.export()
while True:
button_state = self.gpio[SNAP_BUTTON_PIN]
self.gpio[SNAP_LED_PIN] = button_state
if button_state:
dt = datetime.datetime.now()
filename_prefix = dt.strftime("%Y-%m-%d_%H_%M_%S")
filename_suffix = "_img%03d" % i
self.webcam.take_photo(filename_prefix = filename_prefix,
filename_suffix = filename_suffix,
blocking = True,
)
self.gpio[SNAP_LED_PIN] = button_state
time.sleep(SLEEP_TIME)
except KeyboardInterrupt:
if self.verbose:
print "user aborted capture...goodbye"
finally:
self.gpio.unexport()
################################################################################
# MAIN
################################################################################
def main():
import argparse
parser = argparse.ArgumentParser()
#optional arguments
parser.add_argument("-r", "--resolution",
help = "set the resolution of the camera",
default = DEFAULT_RESOLUTION,
)
# parser.add_argument("-c", "--png_compression",
# help = "level of PNG compression (-1,0-10)",
# type = int,
# choices = (-1,0,1,2,3,4,5,6,7,8,9,10),
# default = DEFAULT_PNG_COMPRESSION,
# )
parser.add_argument("-f", "--frame_skip",
help = "skip number of frames",
type = int,
default = DEFAULT_FRAME_SKIP,
)
parser.add_argument("-o", "--output_path",
help = "path for img output",
default = DEFAULT_OUTPUT_PATH,
)
parser.add_argument("-v", "--verbose",
help="increase output verbosity",
action="store_true",
default = DEFAULT_VERBOSE,
)
args = parser.parse_args()
#apply configuration arguments to constructor
app = Application(resolution = args.resolution,
#png_compression = args.png_compression,
frame_skip = args.frame_skip,
output_path = args.output_path,
verbose = args.verbose,
)
app.main_loop()
#if this module is run directly
if __name__ == "__main__":
main()
|
[
"################################################################################\nimport datetime, os, time, argparse, multiprocessing, subprocess\nfrom webcam import Webcam\nfrom gpio import GPIO\n\nDEFAULT_RESOLUTION = \"1600x1200\"\n#DEFAULT_PNG_COMPRESSION = 9 # (-1,0-10)\nDEFAULT_FRAME_SKIP = 0\nUSERHOME_PATH = os.path.expanduser(\"~\") #should be portable\nDEFAULT_OUTPUT_PATH = os.sep.join((USERHOME_PATH,\"photos\"))\nDEFAULT_VERBOSE = True\nSLEEP_TIME = 0.1 #seconds\nSNAP_BUTTON_PIN = 44\nSNAP_LED_PIN = 26\nINPUT_PINS = [SNAP_BUTTON_PIN]\nOUTPUT_PINS = [SNAP_LED_PIN]\n################################################################################\n\n\n################################################################################\nclass Application:\n def __init__(self, **kwargs):\n self.verbose = kwargs.get('verbose', DEFAULT_VERBOSE)\n self.webcam = Webcam(**kwargs)\n self.gpio = GPIO(inputs = INPUT_PINS, outputs = OUTPUT_PINS)\n \n \n def main_loop(self):\n i = 0\n try:\n self.gpio.export()\n while True:\n button_state = self.gpio[SNAP_BUTTON_PIN]\n self.gpio[SNAP_LED_PIN] = button_state\n if button_state:\n dt = datetime.datetime.now()\n filename_prefix = dt.strftime(\"%Y-%m-%d_%H_%M_%S\")\n filename_suffix = \"_img%03d\" % i\n self.webcam.take_photo(filename_prefix = filename_prefix,\n filename_suffix = filename_suffix,\n blocking = True,\n )\n self.gpio[SNAP_LED_PIN] = button_state\n time.sleep(SLEEP_TIME)\n except KeyboardInterrupt:\n if self.verbose:\n print \"user aborted capture...goodbye\"\n finally:\n self.gpio.unexport()\n\n################################################################################\n# MAIN\n################################################################################\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n #optional arguments\n parser.add_argument(\"-r\", \"--resolution\",\n help = \"set the resolution of the camera\",\n default = DEFAULT_RESOLUTION,\n )\n# parser.add_argument(\"-c\", \"--png_compression\",\n# help = \"level of PNG compression (-1,0-10)\",\n# type = int,\n# choices = (-1,0,1,2,3,4,5,6,7,8,9,10),\n# default = DEFAULT_PNG_COMPRESSION,\n# )\n parser.add_argument(\"-f\", \"--frame_skip\",\n help = \"skip number of frames\",\n type = int,\n default = DEFAULT_FRAME_SKIP,\n )\n parser.add_argument(\"-o\", \"--output_path\",\n help = \"path for img output\",\n default = DEFAULT_OUTPUT_PATH,\n )\n parser.add_argument(\"-v\", \"--verbose\",\n help=\"increase output verbosity\",\n action=\"store_true\",\n default = DEFAULT_VERBOSE,\n )\n args = parser.parse_args()\n #apply configuration arguments to constructor\n app = Application(resolution = args.resolution,\n #png_compression = args.png_compression,\n frame_skip = args.frame_skip,\n output_path = args.output_path,\n verbose = args.verbose,\n )\n app.main_loop()\n \n#if this module is run directly\nif __name__ == \"__main__\":\n main()\n"
] | true |
99,642 |
ca10ad0c1ed6e07e2ad72e98ccbb3376d176a88c
|
"""
Create a function that takes a list of numbers and returns the **sum** of all
**prime numbers** in the list.
### Examples
sum_primes([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) ➞ 17
sum_primes([2, 3, 4, 11, 20, 50, 71]) ➞ 87
sum_primes([]) ➞ None
### Notes
* Given numbers won't exceed 101.
* A prime number is a number which has exactly two divisors.
"""
import math
def sum_primes(series):
if not series:
return None
primes = primes_upto(max(series))
return sum(element for element in series if element in primes)
def primes_upto(n):
if n <= 1:
return list()
return [number for number in range(2, n+1) if is_prime(number)]
def is_prime(n):
if n <= 1:
return False
elif n <= 3:
return True
for i in range(2, int(math.sqrt(n)+1)):
if n % i == 0:
return False
return True
|
[
"\"\"\"\r\n\n\nCreate a function that takes a list of numbers and returns the **sum** of all\n**prime numbers** in the list.\n\n### Examples\n\n sum_primes([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) ➞ 17\n \n sum_primes([2, 3, 4, 11, 20, 50, 71]) ➞ 87\n \n sum_primes([]) ➞ None\n\n### Notes\n\n * Given numbers won't exceed 101.\n * A prime number is a number which has exactly two divisors.\n\n\"\"\"\r\n\nimport math\ndef sum_primes(series):\n if not series:\n return None\n primes = primes_upto(max(series))\n return sum(element for element in series if element in primes)\n\ndef primes_upto(n):\n if n <= 1:\n return list() \n return [number for number in range(2, n+1) if is_prime(number)]\n\n\ndef is_prime(n):\n if n <= 1:\n return False\n elif n <= 3:\n return True\n\n for i in range(2, int(math.sqrt(n)+1)):\n if n % i == 0:\n return False\n return True\n\n"
] | true |
99,643 |
1404355c69fc3c23de8531131974f31978873355
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import tct
from os.path import exists as ospe
params = tct.readjson(sys.argv[1])
binabspath = sys.argv[2]
facts = tct.readjson(params['factsfile'])
milestones = tct.readjson(params['milestonesfile'])
reason = ''
resultfile = params['resultfile']
result = tct.readjson(resultfile)
loglist = result['loglist'] = result.get('loglist', [])
toolname = params['toolname']
toolname_pure = params['toolname_pure']
workdir = params['workdir']
exitcode = CONTINUE = 0
# ==================================================
# Make a copy of milestones for later inspection?
# --------------------------------------------------
if 0 or milestones.get('debug_always_make_milestones_snapshot'):
tct.make_snapshot_of_milestones(params['milestonesfile'], sys.argv[1])
# ==================================================
# Helper functions
# --------------------------------------------------
def lookup(D, *keys, **kwdargs):
result = tct.deepget(D, *keys, **kwdargs)
loglist.append((keys, result))
return result
# ==================================================
# define
# --------------------------------------------------
remove_static_folder_from_html_done = None
xeq_name_cnt = 0
# ==================================================
# Check params
# --------------------------------------------------
if exitcode == CONTINUE:
loglist.append('CHECK PARAMS')
build_html_folder = lookup(milestones, 'build_html_folder', default=None)
build_singlehtml_folder = lookup(milestones, 'build_singlehtml_folder',
default=None)
replace_static_in_html_done = lookup(milestones,
'replace_static_in_html_done',
default=None)
theme_module_path = lookup(milestones, 'theme_module_path')
if not (1
and (build_html_folder or build_singlehtml_folder)
and replace_static_in_html_done
and theme_module_path
):
CONTINUE = -2
reason = 'Bad PARAMS or nothing to do'
if exitcode == CONTINUE:
loglist.append('PARAMS are ok')
else:
loglist.append(reason)
# ==================================================
# work
# --------------------------------------------------
if exitcode == CONTINUE:
statics_to_keep = milestones.get('statics_to_keep', [])
for build_folder in [build_html_folder, build_singlehtml_folder]:
if not build_folder:
continue
startfolder = '_static'
fixed_part_length = len(build_folder)
fpath = os.path.join(build_folder, startfolder)
if os.path.exists(fpath):
for top, dirs, files in os.walk(fpath, topdown=False):
for file in files:
topfile = top + '/' + file
relfile = topfile[fixed_part_length+1:]
themefile = theme_module_path + '/' + relfile[1:]
if not (relfile in statics_to_keep) and ospe(themefile):
os.remove(topfile)
if not os.listdir(top):
os.rmdir(top)
loglist.append('%s, %s' % ('remove', fpath))
remove_static_folder_from_html_done = 1
# ==================================================
# Set MILESTONE
# --------------------------------------------------
if remove_static_folder_from_html_done:
result['MILESTONES'].append({
'remove_static_folder_from_html_done':
remove_static_folder_from_html_done})
# ==================================================
# save result
# --------------------------------------------------
tct.save_the_result(result, resultfile, params, facts, milestones, exitcode, CONTINUE, reason)
# ==================================================
# Return with proper exitcode
# --------------------------------------------------
sys.exit(exitcode)
|
[
"#!/usr/bin/env python\n# coding: utf-8\n\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport os\nimport sys\nimport tct\n\nfrom os.path import exists as ospe\n\nparams = tct.readjson(sys.argv[1])\nbinabspath = sys.argv[2]\nfacts = tct.readjson(params['factsfile'])\nmilestones = tct.readjson(params['milestonesfile'])\nreason = ''\nresultfile = params['resultfile']\nresult = tct.readjson(resultfile)\nloglist = result['loglist'] = result.get('loglist', [])\ntoolname = params['toolname']\ntoolname_pure = params['toolname_pure']\nworkdir = params['workdir']\nexitcode = CONTINUE = 0\n\n\n# ==================================================\n# Make a copy of milestones for later inspection?\n# --------------------------------------------------\n\nif 0 or milestones.get('debug_always_make_milestones_snapshot'):\n tct.make_snapshot_of_milestones(params['milestonesfile'], sys.argv[1])\n\n\n# ==================================================\n# Helper functions\n# --------------------------------------------------\n\ndef lookup(D, *keys, **kwdargs):\n result = tct.deepget(D, *keys, **kwdargs)\n loglist.append((keys, result))\n return result\n\n\n# ==================================================\n# define\n# --------------------------------------------------\n\nremove_static_folder_from_html_done = None\nxeq_name_cnt = 0\n\n\n# ==================================================\n# Check params\n# --------------------------------------------------\n\nif exitcode == CONTINUE:\n loglist.append('CHECK PARAMS')\n\n build_html_folder = lookup(milestones, 'build_html_folder', default=None)\n build_singlehtml_folder = lookup(milestones, 'build_singlehtml_folder',\n default=None)\n replace_static_in_html_done = lookup(milestones,\n 'replace_static_in_html_done',\n default=None)\n theme_module_path = lookup(milestones, 'theme_module_path')\n if not (1\n and (build_html_folder or build_singlehtml_folder)\n and replace_static_in_html_done\n and theme_module_path\n ):\n CONTINUE = -2\n reason = 'Bad PARAMS or nothing to do'\n\nif exitcode == CONTINUE:\n loglist.append('PARAMS are ok')\nelse:\n loglist.append(reason)\n\n\n# ==================================================\n# work\n# --------------------------------------------------\n\nif exitcode == CONTINUE:\n\n statics_to_keep = milestones.get('statics_to_keep', [])\n for build_folder in [build_html_folder, build_singlehtml_folder]:\n if not build_folder:\n continue\n startfolder = '_static'\n fixed_part_length = len(build_folder)\n fpath = os.path.join(build_folder, startfolder)\n if os.path.exists(fpath):\n for top, dirs, files in os.walk(fpath, topdown=False):\n for file in files:\n topfile = top + '/' + file\n relfile = topfile[fixed_part_length+1:]\n themefile = theme_module_path + '/' + relfile[1:]\n if not (relfile in statics_to_keep) and ospe(themefile):\n os.remove(topfile)\n if not os.listdir(top):\n os.rmdir(top)\n\n loglist.append('%s, %s' % ('remove', fpath))\n remove_static_folder_from_html_done = 1\n\n\n# ==================================================\n# Set MILESTONE\n# --------------------------------------------------\n\nif remove_static_folder_from_html_done:\n result['MILESTONES'].append({\n 'remove_static_folder_from_html_done':\n remove_static_folder_from_html_done})\n\n\n# ==================================================\n# save result\n# --------------------------------------------------\n\ntct.save_the_result(result, resultfile, params, facts, milestones, exitcode, CONTINUE, reason)\n\n\n# ==================================================\n# Return with proper exitcode\n# --------------------------------------------------\n\nsys.exit(exitcode)\n",
"from __future__ import print_function\nfrom __future__ import absolute_import\nimport os\nimport sys\nimport tct\nfrom os.path import exists as ospe\nparams = tct.readjson(sys.argv[1])\nbinabspath = sys.argv[2]\nfacts = tct.readjson(params['factsfile'])\nmilestones = tct.readjson(params['milestonesfile'])\nreason = ''\nresultfile = params['resultfile']\nresult = tct.readjson(resultfile)\nloglist = result['loglist'] = result.get('loglist', [])\ntoolname = params['toolname']\ntoolname_pure = params['toolname_pure']\nworkdir = params['workdir']\nexitcode = CONTINUE = 0\nif 0 or milestones.get('debug_always_make_milestones_snapshot'):\n tct.make_snapshot_of_milestones(params['milestonesfile'], sys.argv[1])\n\n\ndef lookup(D, *keys, **kwdargs):\n result = tct.deepget(D, *keys, **kwdargs)\n loglist.append((keys, result))\n return result\n\n\nremove_static_folder_from_html_done = None\nxeq_name_cnt = 0\nif exitcode == CONTINUE:\n loglist.append('CHECK PARAMS')\n build_html_folder = lookup(milestones, 'build_html_folder', default=None)\n build_singlehtml_folder = lookup(milestones, 'build_singlehtml_folder',\n default=None)\n replace_static_in_html_done = lookup(milestones,\n 'replace_static_in_html_done', default=None)\n theme_module_path = lookup(milestones, 'theme_module_path')\n if not (1 and (build_html_folder or build_singlehtml_folder) and\n replace_static_in_html_done and theme_module_path):\n CONTINUE = -2\n reason = 'Bad PARAMS or nothing to do'\nif exitcode == CONTINUE:\n loglist.append('PARAMS are ok')\nelse:\n loglist.append(reason)\nif exitcode == CONTINUE:\n statics_to_keep = milestones.get('statics_to_keep', [])\n for build_folder in [build_html_folder, build_singlehtml_folder]:\n if not build_folder:\n continue\n startfolder = '_static'\n fixed_part_length = len(build_folder)\n fpath = os.path.join(build_folder, startfolder)\n if os.path.exists(fpath):\n for top, dirs, files in os.walk(fpath, topdown=False):\n for file in files:\n topfile = top + '/' + file\n relfile = topfile[fixed_part_length + 1:]\n themefile = theme_module_path + '/' + relfile[1:]\n if not relfile in statics_to_keep and ospe(themefile):\n os.remove(topfile)\n if not os.listdir(top):\n os.rmdir(top)\n loglist.append('%s, %s' % ('remove', fpath))\n remove_static_folder_from_html_done = 1\nif remove_static_folder_from_html_done:\n result['MILESTONES'].append({'remove_static_folder_from_html_done':\n remove_static_folder_from_html_done})\ntct.save_the_result(result, resultfile, params, facts, milestones, exitcode,\n CONTINUE, reason)\nsys.exit(exitcode)\n",
"<import token>\nparams = tct.readjson(sys.argv[1])\nbinabspath = sys.argv[2]\nfacts = tct.readjson(params['factsfile'])\nmilestones = tct.readjson(params['milestonesfile'])\nreason = ''\nresultfile = params['resultfile']\nresult = tct.readjson(resultfile)\nloglist = result['loglist'] = result.get('loglist', [])\ntoolname = params['toolname']\ntoolname_pure = params['toolname_pure']\nworkdir = params['workdir']\nexitcode = CONTINUE = 0\nif 0 or milestones.get('debug_always_make_milestones_snapshot'):\n tct.make_snapshot_of_milestones(params['milestonesfile'], sys.argv[1])\n\n\ndef lookup(D, *keys, **kwdargs):\n result = tct.deepget(D, *keys, **kwdargs)\n loglist.append((keys, result))\n return result\n\n\nremove_static_folder_from_html_done = None\nxeq_name_cnt = 0\nif exitcode == CONTINUE:\n loglist.append('CHECK PARAMS')\n build_html_folder = lookup(milestones, 'build_html_folder', default=None)\n build_singlehtml_folder = lookup(milestones, 'build_singlehtml_folder',\n default=None)\n replace_static_in_html_done = lookup(milestones,\n 'replace_static_in_html_done', default=None)\n theme_module_path = lookup(milestones, 'theme_module_path')\n if not (1 and (build_html_folder or build_singlehtml_folder) and\n replace_static_in_html_done and theme_module_path):\n CONTINUE = -2\n reason = 'Bad PARAMS or nothing to do'\nif exitcode == CONTINUE:\n loglist.append('PARAMS are ok')\nelse:\n loglist.append(reason)\nif exitcode == CONTINUE:\n statics_to_keep = milestones.get('statics_to_keep', [])\n for build_folder in [build_html_folder, build_singlehtml_folder]:\n if not build_folder:\n continue\n startfolder = '_static'\n fixed_part_length = len(build_folder)\n fpath = os.path.join(build_folder, startfolder)\n if os.path.exists(fpath):\n for top, dirs, files in os.walk(fpath, topdown=False):\n for file in files:\n topfile = top + '/' + file\n relfile = topfile[fixed_part_length + 1:]\n themefile = theme_module_path + '/' + relfile[1:]\n if not relfile in statics_to_keep and ospe(themefile):\n os.remove(topfile)\n if not os.listdir(top):\n os.rmdir(top)\n loglist.append('%s, %s' % ('remove', fpath))\n remove_static_folder_from_html_done = 1\nif remove_static_folder_from_html_done:\n result['MILESTONES'].append({'remove_static_folder_from_html_done':\n remove_static_folder_from_html_done})\ntct.save_the_result(result, resultfile, params, facts, milestones, exitcode,\n CONTINUE, reason)\nsys.exit(exitcode)\n",
"<import token>\n<assignment token>\nif 0 or milestones.get('debug_always_make_milestones_snapshot'):\n tct.make_snapshot_of_milestones(params['milestonesfile'], sys.argv[1])\n\n\ndef lookup(D, *keys, **kwdargs):\n result = tct.deepget(D, *keys, **kwdargs)\n loglist.append((keys, result))\n return result\n\n\n<assignment token>\nif exitcode == CONTINUE:\n loglist.append('CHECK PARAMS')\n build_html_folder = lookup(milestones, 'build_html_folder', default=None)\n build_singlehtml_folder = lookup(milestones, 'build_singlehtml_folder',\n default=None)\n replace_static_in_html_done = lookup(milestones,\n 'replace_static_in_html_done', default=None)\n theme_module_path = lookup(milestones, 'theme_module_path')\n if not (1 and (build_html_folder or build_singlehtml_folder) and\n replace_static_in_html_done and theme_module_path):\n CONTINUE = -2\n reason = 'Bad PARAMS or nothing to do'\nif exitcode == CONTINUE:\n loglist.append('PARAMS are ok')\nelse:\n loglist.append(reason)\nif exitcode == CONTINUE:\n statics_to_keep = milestones.get('statics_to_keep', [])\n for build_folder in [build_html_folder, build_singlehtml_folder]:\n if not build_folder:\n continue\n startfolder = '_static'\n fixed_part_length = len(build_folder)\n fpath = os.path.join(build_folder, startfolder)\n if os.path.exists(fpath):\n for top, dirs, files in os.walk(fpath, topdown=False):\n for file in files:\n topfile = top + '/' + file\n relfile = topfile[fixed_part_length + 1:]\n themefile = theme_module_path + '/' + relfile[1:]\n if not relfile in statics_to_keep and ospe(themefile):\n os.remove(topfile)\n if not os.listdir(top):\n os.rmdir(top)\n loglist.append('%s, %s' % ('remove', fpath))\n remove_static_folder_from_html_done = 1\nif remove_static_folder_from_html_done:\n result['MILESTONES'].append({'remove_static_folder_from_html_done':\n remove_static_folder_from_html_done})\ntct.save_the_result(result, resultfile, params, facts, milestones, exitcode,\n CONTINUE, reason)\nsys.exit(exitcode)\n",
"<import token>\n<assignment token>\n<code token>\n\n\ndef lookup(D, *keys, **kwdargs):\n result = tct.deepget(D, *keys, **kwdargs)\n loglist.append((keys, result))\n return result\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
99,644 |
c72af4f3da3e9d1c2fb9f54ce442bb33b67f0d5c
|
"""
Exercise2 :
Use this skeleton and the webcam_getoneframe.py as a sample to write your own that streams continuously
from webcam and plots intensity of image across time
Hints :
1. Consolidate the functions - acquiring image, converting to rgb and calculating intensity,
into a single generator function.
2. Acquire the results of this generator function within an infinite loop.
3. Quit the loop with key press (example if statement below)
4. Yield can spit out two results. You can obtain that by
for i in g:
print(i[0]) #First output
print(i[1]) #Second output
"""
import cv2
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
plt.style.use('dark_background') # Dark background for a prettier plot
def display_images():
"""
Main function to call
This function should obtain results from generators and plot image and image intensity
Create a for loop to iterate the generator functions
"""
vc = cv2.VideoCapture(0) # Open webcam
figure, ax = plt.subplots(1, 2, figsize=(10, 5)) # Intiialise plot
count = 0 # Counter for number of aquired frames
intensity = [] # Append intensity across time
# For loop over generator here
intensity.append(imageintensity)
plot_image_and_brightness() # Call plot function
count += 1
# This triggers exit sequences when user presses q
if cv2.waitKey(1) & 0xFF == ord('q'):
# Clean up here
plt.close('all') # close plots
generator.close() # Use generator exit for clean up,
break # break loop
def plot_image_and_brightness(axis, image, imageintensity, framecount):
"""
This function plots image and intensity of image through time
:param axis: figure axis for plotting
image: rgb image
imageintensity: intensity of image
framecount: present frame number
"""
# Plot RGB Image
axis[0].imshow(image)
axis[0].axis('off')
axis[0].set_title(f'Frame Number {framecount}')
# Plot intensity
axis[1].plot(imageintensity, '.-')
axis[1].set_ylabel('Average Intensity')
# Stuff to show and stream plot
plt.show(block=False)
plt.pause(0.001)
def stream_frames(video_capture):
"""
Use an infinite loop and write a generator function that should acquire image, convert to rgb,
get mean intensity and yield necessary results
:param video_capture: the video capture object from opencv
:yield RGB_image
Image Intensity
"""
display_images()
|
[
"\"\"\"\nExercise2 :\nUse this skeleton and the webcam_getoneframe.py as a sample to write your own that streams continuously\nfrom webcam and plots intensity of image across time\nHints :\n1. Consolidate the functions - acquiring image, converting to rgb and calculating intensity,\ninto a single generator function.\n2. Acquire the results of this generator function within an infinite loop.\n3. Quit the loop with key press (example if statement below)\n4. Yield can spit out two results. You can obtain that by\nfor i in g:\n print(i[0]) #First output\n print(i[1]) #Second output\n\"\"\"\n\nimport cv2\nimport numpy as np\nimport matplotlib\n\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n\nplt.style.use('dark_background') # Dark background for a prettier plot\n\n\ndef display_images():\n \"\"\"\n Main function to call\n This function should obtain results from generators and plot image and image intensity\n Create a for loop to iterate the generator functions\n \"\"\"\n vc = cv2.VideoCapture(0) # Open webcam\n figure, ax = plt.subplots(1, 2, figsize=(10, 5)) # Intiialise plot\n\n count = 0 # Counter for number of aquired frames\n intensity = [] # Append intensity across time\n\n # For loop over generator here\n intensity.append(imageintensity)\n plot_image_and_brightness() # Call plot function\n count += 1\n\n # This triggers exit sequences when user presses q\n if cv2.waitKey(1) & 0xFF == ord('q'):\n # Clean up here\n plt.close('all') # close plots\n generator.close() # Use generator exit for clean up,\n break # break loop\n\n\ndef plot_image_and_brightness(axis, image, imageintensity, framecount):\n \"\"\"\n This function plots image and intensity of image through time\n :param axis: figure axis for plotting\n image: rgb image\n imageintensity: intensity of image\n framecount: present frame number\n \"\"\"\n\n # Plot RGB Image\n axis[0].imshow(image)\n axis[0].axis('off')\n axis[0].set_title(f'Frame Number {framecount}')\n\n # Plot intensity\n axis[1].plot(imageintensity, '.-')\n axis[1].set_ylabel('Average Intensity')\n\n # Stuff to show and stream plot\n plt.show(block=False)\n plt.pause(0.001)\n\n\ndef stream_frames(video_capture):\n \"\"\"\n Use an infinite loop and write a generator function that should acquire image, convert to rgb,\n get mean intensity and yield necessary results\n :param video_capture: the video capture object from opencv\n :yield RGB_image\n Image Intensity\n \"\"\"\n\n\ndisplay_images()\n",
"<docstring token>\nimport cv2\nimport numpy as np\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nplt.style.use('dark_background')\n\n\ndef display_images():\n \"\"\"\n Main function to call\n This function should obtain results from generators and plot image and image intensity\n Create a for loop to iterate the generator functions\n \"\"\"\n vc = cv2.VideoCapture(0)\n figure, ax = plt.subplots(1, 2, figsize=(10, 5))\n count = 0\n intensity = []\n intensity.append(imageintensity)\n plot_image_and_brightness()\n count += 1\n if cv2.waitKey(1) & 255 == ord('q'):\n plt.close('all')\n generator.close()\n break\n\n\ndef plot_image_and_brightness(axis, image, imageintensity, framecount):\n \"\"\"\n This function plots image and intensity of image through time\n :param axis: figure axis for plotting\n image: rgb image\n imageintensity: intensity of image\n framecount: present frame number\n \"\"\"\n axis[0].imshow(image)\n axis[0].axis('off')\n axis[0].set_title(f'Frame Number {framecount}')\n axis[1].plot(imageintensity, '.-')\n axis[1].set_ylabel('Average Intensity')\n plt.show(block=False)\n plt.pause(0.001)\n\n\ndef stream_frames(video_capture):\n \"\"\"\n Use an infinite loop and write a generator function that should acquire image, convert to rgb,\n get mean intensity and yield necessary results\n :param video_capture: the video capture object from opencv\n :yield RGB_image\n Image Intensity\n \"\"\"\n\n\ndisplay_images()\n",
"<docstring token>\n<import token>\nmatplotlib.use('TkAgg')\n<import token>\nplt.style.use('dark_background')\n\n\ndef display_images():\n \"\"\"\n Main function to call\n This function should obtain results from generators and plot image and image intensity\n Create a for loop to iterate the generator functions\n \"\"\"\n vc = cv2.VideoCapture(0)\n figure, ax = plt.subplots(1, 2, figsize=(10, 5))\n count = 0\n intensity = []\n intensity.append(imageintensity)\n plot_image_and_brightness()\n count += 1\n if cv2.waitKey(1) & 255 == ord('q'):\n plt.close('all')\n generator.close()\n break\n\n\ndef plot_image_and_brightness(axis, image, imageintensity, framecount):\n \"\"\"\n This function plots image and intensity of image through time\n :param axis: figure axis for plotting\n image: rgb image\n imageintensity: intensity of image\n framecount: present frame number\n \"\"\"\n axis[0].imshow(image)\n axis[0].axis('off')\n axis[0].set_title(f'Frame Number {framecount}')\n axis[1].plot(imageintensity, '.-')\n axis[1].set_ylabel('Average Intensity')\n plt.show(block=False)\n plt.pause(0.001)\n\n\ndef stream_frames(video_capture):\n \"\"\"\n Use an infinite loop and write a generator function that should acquire image, convert to rgb,\n get mean intensity and yield necessary results\n :param video_capture: the video capture object from opencv\n :yield RGB_image\n Image Intensity\n \"\"\"\n\n\ndisplay_images()\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n\n\ndef display_images():\n \"\"\"\n Main function to call\n This function should obtain results from generators and plot image and image intensity\n Create a for loop to iterate the generator functions\n \"\"\"\n vc = cv2.VideoCapture(0)\n figure, ax = plt.subplots(1, 2, figsize=(10, 5))\n count = 0\n intensity = []\n intensity.append(imageintensity)\n plot_image_and_brightness()\n count += 1\n if cv2.waitKey(1) & 255 == ord('q'):\n plt.close('all')\n generator.close()\n break\n\n\ndef plot_image_and_brightness(axis, image, imageintensity, framecount):\n \"\"\"\n This function plots image and intensity of image through time\n :param axis: figure axis for plotting\n image: rgb image\n imageintensity: intensity of image\n framecount: present frame number\n \"\"\"\n axis[0].imshow(image)\n axis[0].axis('off')\n axis[0].set_title(f'Frame Number {framecount}')\n axis[1].plot(imageintensity, '.-')\n axis[1].set_ylabel('Average Intensity')\n plt.show(block=False)\n plt.pause(0.001)\n\n\ndef stream_frames(video_capture):\n \"\"\"\n Use an infinite loop and write a generator function that should acquire image, convert to rgb,\n get mean intensity and yield necessary results\n :param video_capture: the video capture object from opencv\n :yield RGB_image\n Image Intensity\n \"\"\"\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n\n\ndef display_images():\n \"\"\"\n Main function to call\n This function should obtain results from generators and plot image and image intensity\n Create a for loop to iterate the generator functions\n \"\"\"\n vc = cv2.VideoCapture(0)\n figure, ax = plt.subplots(1, 2, figsize=(10, 5))\n count = 0\n intensity = []\n intensity.append(imageintensity)\n plot_image_and_brightness()\n count += 1\n if cv2.waitKey(1) & 255 == ord('q'):\n plt.close('all')\n generator.close()\n break\n\n\n<function token>\n\n\ndef stream_frames(video_capture):\n \"\"\"\n Use an infinite loop and write a generator function that should acquire image, convert to rgb,\n get mean intensity and yield necessary results\n :param video_capture: the video capture object from opencv\n :yield RGB_image\n Image Intensity\n \"\"\"\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n\n\ndef display_images():\n \"\"\"\n Main function to call\n This function should obtain results from generators and plot image and image intensity\n Create a for loop to iterate the generator functions\n \"\"\"\n vc = cv2.VideoCapture(0)\n figure, ax = plt.subplots(1, 2, figsize=(10, 5))\n count = 0\n intensity = []\n intensity.append(imageintensity)\n plot_image_and_brightness()\n count += 1\n if cv2.waitKey(1) & 255 == ord('q'):\n plt.close('all')\n generator.close()\n break\n\n\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
99,645 |
06cf93c1b5f915da604e22a7c545263b38d0118e
|
from LPR_ADD_GAME import lpr_add_game
import csv
import os
from LPR_ADD_PLAYER import lpr_add_player
from LPR_PLAYER_CLASS import Player
from LPR_VIEW_GAMES import lpr_view_game
print('WELCOME TO LIVER POOL RUMMY SCORE KEEPER, TO EXIT TYPE -Q at any point')
user_selection = ''
games_list = []
if os.path.isfile('GAME_SCORES.csv')== True:
with open('GAME_SCORES.csv') as file:
for row in csv.DictReader(file, skipinitialspace=True):
game_dict = {}
for k, v in row.items():
if v != '':
game_dict[k] = v
games_list.append(game_dict)
for game in games_list:
for key in game:
if key != 'winner':
csv_player_scores = [int(x) for x in game[key].replace('[', '').replace(']', '').split(',')]
Player.get_player(key).update_score(csv_player_scores, game['winner'])
else:
print('====================================================')
print('COULD NOT LOAD PLAYER GAMES LIST FILE GAME_SCORE.CSV')
print('====================================================')
# Start up, load scores into classes, load games into list
while user_selection != '-Q':
print('MAIN LPR MENU: ')
print('ADD NEW GAME -N')
print('ADD PLAYER -P')
print('EXIT -Q')
print('PLAYER SCORE -S')
print('GAME SCORES -G')
# Adding a new game.
user_selection = input('WHAT WOULD YOU LIKE TO DO : ').upper()
if user_selection == '-N':
new_game_dic = lpr_add_game()
if new_game_dic != 0:
games_list.append(new_game_dic)
for key in new_game_dic.keys():
if key != 'winner':
Player._players[key].update_score(new_game_dic[key], new_game_dic['winner'])
elif user_selection == '-S':
print('WHAT PLAYER WOULD YOU LIKE TO PRINT: ')
user_selection_test = True
while user_selection_test:
user_score_selection = input().upper()
if user_score_selection in Player._players:
user_selection_test = False
Player._players[user_score_selection].display_player_score()
elif user_score_selection == '-Q':
user_selection_test = False
else:
print('USER NOT IN LIST, TRY AGAIN: ')
# Adding a new player
elif user_selection == '-P':
lpr_add_player()
# Viewing games in the games list
elif user_selection == '-G':
lpr_view_game(games_list)
print('EXITING, PLEASE DO NOT SHUT DOWN.')
# Quitting , save to txt file.
with open('GAME_SCORES.csv', 'w') as csvfile:
fields = ['winner']
for key in Player._players.keys():
fields.append(key)
writer = csv.DictWriter(csvfile, fieldnames=fields)
writer.writeheader()
for game_index in range(len(games_list)):
writer.writerow(games_list[game_index])
|
[
"from LPR_ADD_GAME import lpr_add_game\nimport csv\nimport os\nfrom LPR_ADD_PLAYER import lpr_add_player\nfrom LPR_PLAYER_CLASS import Player\nfrom LPR_VIEW_GAMES import lpr_view_game\nprint('WELCOME TO LIVER POOL RUMMY SCORE KEEPER, TO EXIT TYPE -Q at any point')\n\n\nuser_selection = ''\ngames_list = []\nif os.path.isfile('GAME_SCORES.csv')== True:\n with open('GAME_SCORES.csv') as file:\n for row in csv.DictReader(file, skipinitialspace=True):\n game_dict = {}\n for k, v in row.items():\n if v != '':\n game_dict[k] = v\n games_list.append(game_dict)\n for game in games_list:\n for key in game:\n if key != 'winner':\n csv_player_scores = [int(x) for x in game[key].replace('[', '').replace(']', '').split(',')]\n Player.get_player(key).update_score(csv_player_scores, game['winner'])\n\nelse:\n print('====================================================')\n print('COULD NOT LOAD PLAYER GAMES LIST FILE GAME_SCORE.CSV')\n print('====================================================')\n# Start up, load scores into classes, load games into list\n\nwhile user_selection != '-Q':\n print('MAIN LPR MENU: ')\n print('ADD NEW GAME -N')\n print('ADD PLAYER -P')\n print('EXIT -Q')\n print('PLAYER SCORE -S')\n print('GAME SCORES -G')\n\n # Adding a new game.\n user_selection = input('WHAT WOULD YOU LIKE TO DO : ').upper()\n if user_selection == '-N':\n new_game_dic = lpr_add_game()\n if new_game_dic != 0:\n games_list.append(new_game_dic)\n for key in new_game_dic.keys():\n if key != 'winner':\n Player._players[key].update_score(new_game_dic[key], new_game_dic['winner'])\n\n elif user_selection == '-S':\n print('WHAT PLAYER WOULD YOU LIKE TO PRINT: ')\n user_selection_test = True\n while user_selection_test:\n user_score_selection = input().upper()\n if user_score_selection in Player._players:\n user_selection_test = False\n Player._players[user_score_selection].display_player_score()\n elif user_score_selection == '-Q':\n user_selection_test = False\n else:\n print('USER NOT IN LIST, TRY AGAIN: ')\n # Adding a new player\n elif user_selection == '-P':\n lpr_add_player()\n\n # Viewing games in the games list\n elif user_selection == '-G':\n lpr_view_game(games_list)\n\nprint('EXITING, PLEASE DO NOT SHUT DOWN.')\n# Quitting , save to txt file.\nwith open('GAME_SCORES.csv', 'w') as csvfile:\n fields = ['winner']\n for key in Player._players.keys():\n fields.append(key)\n\n writer = csv.DictWriter(csvfile, fieldnames=fields)\n writer.writeheader()\n for game_index in range(len(games_list)):\n writer.writerow(games_list[game_index])\n",
"from LPR_ADD_GAME import lpr_add_game\nimport csv\nimport os\nfrom LPR_ADD_PLAYER import lpr_add_player\nfrom LPR_PLAYER_CLASS import Player\nfrom LPR_VIEW_GAMES import lpr_view_game\nprint('WELCOME TO LIVER POOL RUMMY SCORE KEEPER, TO EXIT TYPE -Q at any point')\nuser_selection = ''\ngames_list = []\nif os.path.isfile('GAME_SCORES.csv') == True:\n with open('GAME_SCORES.csv') as file:\n for row in csv.DictReader(file, skipinitialspace=True):\n game_dict = {}\n for k, v in row.items():\n if v != '':\n game_dict[k] = v\n games_list.append(game_dict)\n for game in games_list:\n for key in game:\n if key != 'winner':\n csv_player_scores = [int(x) for x in game[key].replace('[',\n '').replace(']', '').split(',')]\n Player.get_player(key).update_score(csv_player_scores, game\n ['winner'])\nelse:\n print('====================================================')\n print('COULD NOT LOAD PLAYER GAMES LIST FILE GAME_SCORE.CSV')\n print('====================================================')\nwhile user_selection != '-Q':\n print('MAIN LPR MENU: ')\n print('ADD NEW GAME -N')\n print('ADD PLAYER -P')\n print('EXIT -Q')\n print('PLAYER SCORE -S')\n print('GAME SCORES -G')\n user_selection = input('WHAT WOULD YOU LIKE TO DO : ').upper()\n if user_selection == '-N':\n new_game_dic = lpr_add_game()\n if new_game_dic != 0:\n games_list.append(new_game_dic)\n for key in new_game_dic.keys():\n if key != 'winner':\n Player._players[key].update_score(new_game_dic[key],\n new_game_dic['winner'])\n elif user_selection == '-S':\n print('WHAT PLAYER WOULD YOU LIKE TO PRINT: ')\n user_selection_test = True\n while user_selection_test:\n user_score_selection = input().upper()\n if user_score_selection in Player._players:\n user_selection_test = False\n Player._players[user_score_selection].display_player_score()\n elif user_score_selection == '-Q':\n user_selection_test = False\n else:\n print('USER NOT IN LIST, TRY AGAIN: ')\n elif user_selection == '-P':\n lpr_add_player()\n elif user_selection == '-G':\n lpr_view_game(games_list)\nprint('EXITING, PLEASE DO NOT SHUT DOWN.')\nwith open('GAME_SCORES.csv', 'w') as csvfile:\n fields = ['winner']\n for key in Player._players.keys():\n fields.append(key)\n writer = csv.DictWriter(csvfile, fieldnames=fields)\n writer.writeheader()\n for game_index in range(len(games_list)):\n writer.writerow(games_list[game_index])\n",
"<import token>\nprint('WELCOME TO LIVER POOL RUMMY SCORE KEEPER, TO EXIT TYPE -Q at any point')\nuser_selection = ''\ngames_list = []\nif os.path.isfile('GAME_SCORES.csv') == True:\n with open('GAME_SCORES.csv') as file:\n for row in csv.DictReader(file, skipinitialspace=True):\n game_dict = {}\n for k, v in row.items():\n if v != '':\n game_dict[k] = v\n games_list.append(game_dict)\n for game in games_list:\n for key in game:\n if key != 'winner':\n csv_player_scores = [int(x) for x in game[key].replace('[',\n '').replace(']', '').split(',')]\n Player.get_player(key).update_score(csv_player_scores, game\n ['winner'])\nelse:\n print('====================================================')\n print('COULD NOT LOAD PLAYER GAMES LIST FILE GAME_SCORE.CSV')\n print('====================================================')\nwhile user_selection != '-Q':\n print('MAIN LPR MENU: ')\n print('ADD NEW GAME -N')\n print('ADD PLAYER -P')\n print('EXIT -Q')\n print('PLAYER SCORE -S')\n print('GAME SCORES -G')\n user_selection = input('WHAT WOULD YOU LIKE TO DO : ').upper()\n if user_selection == '-N':\n new_game_dic = lpr_add_game()\n if new_game_dic != 0:\n games_list.append(new_game_dic)\n for key in new_game_dic.keys():\n if key != 'winner':\n Player._players[key].update_score(new_game_dic[key],\n new_game_dic['winner'])\n elif user_selection == '-S':\n print('WHAT PLAYER WOULD YOU LIKE TO PRINT: ')\n user_selection_test = True\n while user_selection_test:\n user_score_selection = input().upper()\n if user_score_selection in Player._players:\n user_selection_test = False\n Player._players[user_score_selection].display_player_score()\n elif user_score_selection == '-Q':\n user_selection_test = False\n else:\n print('USER NOT IN LIST, TRY AGAIN: ')\n elif user_selection == '-P':\n lpr_add_player()\n elif user_selection == '-G':\n lpr_view_game(games_list)\nprint('EXITING, PLEASE DO NOT SHUT DOWN.')\nwith open('GAME_SCORES.csv', 'w') as csvfile:\n fields = ['winner']\n for key in Player._players.keys():\n fields.append(key)\n writer = csv.DictWriter(csvfile, fieldnames=fields)\n writer.writeheader()\n for game_index in range(len(games_list)):\n writer.writerow(games_list[game_index])\n",
"<import token>\nprint('WELCOME TO LIVER POOL RUMMY SCORE KEEPER, TO EXIT TYPE -Q at any point')\n<assignment token>\nif os.path.isfile('GAME_SCORES.csv') == True:\n with open('GAME_SCORES.csv') as file:\n for row in csv.DictReader(file, skipinitialspace=True):\n game_dict = {}\n for k, v in row.items():\n if v != '':\n game_dict[k] = v\n games_list.append(game_dict)\n for game in games_list:\n for key in game:\n if key != 'winner':\n csv_player_scores = [int(x) for x in game[key].replace('[',\n '').replace(']', '').split(',')]\n Player.get_player(key).update_score(csv_player_scores, game\n ['winner'])\nelse:\n print('====================================================')\n print('COULD NOT LOAD PLAYER GAMES LIST FILE GAME_SCORE.CSV')\n print('====================================================')\nwhile user_selection != '-Q':\n print('MAIN LPR MENU: ')\n print('ADD NEW GAME -N')\n print('ADD PLAYER -P')\n print('EXIT -Q')\n print('PLAYER SCORE -S')\n print('GAME SCORES -G')\n user_selection = input('WHAT WOULD YOU LIKE TO DO : ').upper()\n if user_selection == '-N':\n new_game_dic = lpr_add_game()\n if new_game_dic != 0:\n games_list.append(new_game_dic)\n for key in new_game_dic.keys():\n if key != 'winner':\n Player._players[key].update_score(new_game_dic[key],\n new_game_dic['winner'])\n elif user_selection == '-S':\n print('WHAT PLAYER WOULD YOU LIKE TO PRINT: ')\n user_selection_test = True\n while user_selection_test:\n user_score_selection = input().upper()\n if user_score_selection in Player._players:\n user_selection_test = False\n Player._players[user_score_selection].display_player_score()\n elif user_score_selection == '-Q':\n user_selection_test = False\n else:\n print('USER NOT IN LIST, TRY AGAIN: ')\n elif user_selection == '-P':\n lpr_add_player()\n elif user_selection == '-G':\n lpr_view_game(games_list)\nprint('EXITING, PLEASE DO NOT SHUT DOWN.')\nwith open('GAME_SCORES.csv', 'w') as csvfile:\n fields = ['winner']\n for key in Player._players.keys():\n fields.append(key)\n writer = csv.DictWriter(csvfile, fieldnames=fields)\n writer.writeheader()\n for game_index in range(len(games_list)):\n writer.writerow(games_list[game_index])\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,646 |
d85e5f981f8a0d0992c555fe00bd51aafeb9e8f7
|
#!/usr/bin/env python3
# Copyright 2009-2017 BHG http://bw.org/
# Hexadecimal
x = 0x0a
y = 0x02
z = x & y
# Printing in HEX
print(f'(hex) x is {x:02x}, y is {y:02x}, z is {z:02x}')
# Printing in Binary
print(f'(bin) x is {x:08b}, y is {y:08b}, z is {z:08b}')
|
[
"#!/usr/bin/env python3\n# Copyright 2009-2017 BHG http://bw.org/\n\n# Hexadecimal \nx = 0x0a\ny = 0x02\nz = x & y\n\n# Printing in HEX\nprint(f'(hex) x is {x:02x}, y is {y:02x}, z is {z:02x}')\n\n# Printing in Binary\nprint(f'(bin) x is {x:08b}, y is {y:08b}, z is {z:08b}')\n",
"x = 10\ny = 2\nz = x & y\nprint(f'(hex) x is {x:02x}, y is {y:02x}, z is {z:02x}')\nprint(f'(bin) x is {x:08b}, y is {y:08b}, z is {z:08b}')\n",
"<assignment token>\nprint(f'(hex) x is {x:02x}, y is {y:02x}, z is {z:02x}')\nprint(f'(bin) x is {x:08b}, y is {y:08b}, z is {z:08b}')\n",
"<assignment token>\n<code token>\n"
] | false |
99,647 |
1f5f96a7e38daee8780c2cc346a3d9856caa619e
|
from collections import Counter
import pandas as pd
from pprint import pprint
df = pd.read_csv('yv-data-4.csv', encoding='utf-8')
mark = df['MARK'].values.tolist()
mudel = df['MUDEL'].values.tolist()
valjalaskeaasta = df['VÄLJALASKEAASTA'].values.tolist()
koduva_yv_protsent = df['KORDUVA ÜV PROTSENT'].values.tolist()
soidukite_vanus = df['SOIDUKITE VANUS'].values.tolist()
soidukite_arv = df['SOIDUKITE ARV'].values.tolist()
carmakers = []
car_makers = ['MERCEDES-BENZ', 'BMW', 'FORD', 'HONDA', 'VOLKSWAGEN', 'VOLVO', 'SUZUKI', 'TOYOTA', 'OPEL', 'CHEVROLET',
'AUDI', 'NISSAN', 'RENAULT', 'MITSUBISHI', 'FIAT', 'PEUGEOT', 'CITROEN', 'MAZDA', 'HYUNDAI', 'DODGE',
'PORSCHE', 'CHRYSLER', 'KIA', 'LEXUS', 'CADILLACK', 'JEEP', 'SEAT', 'LAND ROVER', 'SKODA', 'SUBARU',
'JAGUAR', 'SAAB', 'ALFA ROMEO', 'PONTIAC', 'LINCOLN', 'ISUZU', 'BUICK', 'DAEWOO', 'INFINITI', 'LANCIA',
'DACIA', 'SMART', 'CADILLAC']
for carmaker in Counter(mark).most_common(100):
carmakers.append(carmaker[0])
dict_of_cars = {}
for i in range(len(mark) - 1):
if soidukite_arv[i] < 50:
continue
if mark[i] not in car_makers:
continue
#if koduva_yv_protsent[i] > float(5):
# continue
if valjalaskeaasta[i] > int(2008):
continue
#if mark[i] not in dict_of_cars.keys():
# dict_of_cars[mark[i]] = {}
# if mudel[i] not in dict_of_cars[mark[i]][mudel[i]].keys():
# dict_of_cars[mark[i]][mudel[i]] = {}
dict_of_cars[mark[i] + ' ' + mudel[i] + ' ' + str(valjalaskeaasta[i])] = {
'valjalaskeaasta': valjalaskeaasta[i],
'koduva_yv_protsent': koduva_yv_protsent[i],
'soidukite_arv': soidukite_arv[i]
}
# dict_of_cars[mark[i]]['valjalaskeaasta'] = valjalaskeaasta[i]
# dict_of_cars[mark[i]]['koduva_yv_protsent'] = koduva_yv_protsent[i]
# dict_of_cars[mark[i]]['soidukite_arv'] = soidukite_arv[i]
pprint(sorted(dict_of_cars.items(), key=lambda x: x[1]['koduva_yv_protsent']))
# I want to connenct car models with lines so I can see, which is consistently the best.
# I have to recognize which models are acually the same.
#def does_my_dataset_already_contain_this_model(car_mark, car_model, car)
|
[
"from collections import Counter\nimport pandas as pd\nfrom pprint import pprint\n\ndf = pd.read_csv('yv-data-4.csv', encoding='utf-8')\n\n\nmark = df['MARK'].values.tolist()\nmudel = df['MUDEL'].values.tolist()\nvaljalaskeaasta = df['VÄLJALASKEAASTA'].values.tolist()\nkoduva_yv_protsent = df['KORDUVA ÜV PROTSENT'].values.tolist()\nsoidukite_vanus = df['SOIDUKITE VANUS'].values.tolist()\nsoidukite_arv = df['SOIDUKITE ARV'].values.tolist()\n\ncarmakers = []\n\ncar_makers = ['MERCEDES-BENZ', 'BMW', 'FORD', 'HONDA', 'VOLKSWAGEN', 'VOLVO', 'SUZUKI', 'TOYOTA', 'OPEL', 'CHEVROLET',\n 'AUDI', 'NISSAN', 'RENAULT', 'MITSUBISHI', 'FIAT', 'PEUGEOT', 'CITROEN', 'MAZDA', 'HYUNDAI', 'DODGE',\n 'PORSCHE', 'CHRYSLER', 'KIA', 'LEXUS', 'CADILLACK', 'JEEP', 'SEAT', 'LAND ROVER', 'SKODA', 'SUBARU',\n 'JAGUAR', 'SAAB', 'ALFA ROMEO', 'PONTIAC', 'LINCOLN', 'ISUZU', 'BUICK', 'DAEWOO', 'INFINITI', 'LANCIA',\n 'DACIA', 'SMART', 'CADILLAC']\n\nfor carmaker in Counter(mark).most_common(100):\n carmakers.append(carmaker[0])\n\ndict_of_cars = {}\n\nfor i in range(len(mark) - 1):\n if soidukite_arv[i] < 50:\n continue\n\n if mark[i] not in car_makers:\n continue\n\n #if koduva_yv_protsent[i] > float(5):\n # continue\n\n if valjalaskeaasta[i] > int(2008):\n continue\n\n #if mark[i] not in dict_of_cars.keys():\n # dict_of_cars[mark[i]] = {}\n\n # if mudel[i] not in dict_of_cars[mark[i]][mudel[i]].keys():\n # dict_of_cars[mark[i]][mudel[i]] = {}\n\n dict_of_cars[mark[i] + ' ' + mudel[i] + ' ' + str(valjalaskeaasta[i])] = {\n 'valjalaskeaasta': valjalaskeaasta[i],\n 'koduva_yv_protsent': koduva_yv_protsent[i],\n 'soidukite_arv': soidukite_arv[i]\n }\n\n # dict_of_cars[mark[i]]['valjalaskeaasta'] = valjalaskeaasta[i]\n # dict_of_cars[mark[i]]['koduva_yv_protsent'] = koduva_yv_protsent[i]\n # dict_of_cars[mark[i]]['soidukite_arv'] = soidukite_arv[i]\n\n\npprint(sorted(dict_of_cars.items(), key=lambda x: x[1]['koduva_yv_protsent']))\n\n\n# I want to connenct car models with lines so I can see, which is consistently the best.\n# I have to recognize which models are acually the same.\n\n#def does_my_dataset_already_contain_this_model(car_mark, car_model, car)",
"from collections import Counter\nimport pandas as pd\nfrom pprint import pprint\ndf = pd.read_csv('yv-data-4.csv', encoding='utf-8')\nmark = df['MARK'].values.tolist()\nmudel = df['MUDEL'].values.tolist()\nvaljalaskeaasta = df['VÄLJALASKEAASTA'].values.tolist()\nkoduva_yv_protsent = df['KORDUVA ÜV PROTSENT'].values.tolist()\nsoidukite_vanus = df['SOIDUKITE VANUS'].values.tolist()\nsoidukite_arv = df['SOIDUKITE ARV'].values.tolist()\ncarmakers = []\ncar_makers = ['MERCEDES-BENZ', 'BMW', 'FORD', 'HONDA', 'VOLKSWAGEN',\n 'VOLVO', 'SUZUKI', 'TOYOTA', 'OPEL', 'CHEVROLET', 'AUDI', 'NISSAN',\n 'RENAULT', 'MITSUBISHI', 'FIAT', 'PEUGEOT', 'CITROEN', 'MAZDA',\n 'HYUNDAI', 'DODGE', 'PORSCHE', 'CHRYSLER', 'KIA', 'LEXUS', 'CADILLACK',\n 'JEEP', 'SEAT', 'LAND ROVER', 'SKODA', 'SUBARU', 'JAGUAR', 'SAAB',\n 'ALFA ROMEO', 'PONTIAC', 'LINCOLN', 'ISUZU', 'BUICK', 'DAEWOO',\n 'INFINITI', 'LANCIA', 'DACIA', 'SMART', 'CADILLAC']\nfor carmaker in Counter(mark).most_common(100):\n carmakers.append(carmaker[0])\ndict_of_cars = {}\nfor i in range(len(mark) - 1):\n if soidukite_arv[i] < 50:\n continue\n if mark[i] not in car_makers:\n continue\n if valjalaskeaasta[i] > int(2008):\n continue\n dict_of_cars[mark[i] + ' ' + mudel[i] + ' ' + str(valjalaskeaasta[i])] = {\n 'valjalaskeaasta': valjalaskeaasta[i], 'koduva_yv_protsent':\n koduva_yv_protsent[i], 'soidukite_arv': soidukite_arv[i]}\npprint(sorted(dict_of_cars.items(), key=lambda x: x[1]['koduva_yv_protsent']))\n",
"<import token>\ndf = pd.read_csv('yv-data-4.csv', encoding='utf-8')\nmark = df['MARK'].values.tolist()\nmudel = df['MUDEL'].values.tolist()\nvaljalaskeaasta = df['VÄLJALASKEAASTA'].values.tolist()\nkoduva_yv_protsent = df['KORDUVA ÜV PROTSENT'].values.tolist()\nsoidukite_vanus = df['SOIDUKITE VANUS'].values.tolist()\nsoidukite_arv = df['SOIDUKITE ARV'].values.tolist()\ncarmakers = []\ncar_makers = ['MERCEDES-BENZ', 'BMW', 'FORD', 'HONDA', 'VOLKSWAGEN',\n 'VOLVO', 'SUZUKI', 'TOYOTA', 'OPEL', 'CHEVROLET', 'AUDI', 'NISSAN',\n 'RENAULT', 'MITSUBISHI', 'FIAT', 'PEUGEOT', 'CITROEN', 'MAZDA',\n 'HYUNDAI', 'DODGE', 'PORSCHE', 'CHRYSLER', 'KIA', 'LEXUS', 'CADILLACK',\n 'JEEP', 'SEAT', 'LAND ROVER', 'SKODA', 'SUBARU', 'JAGUAR', 'SAAB',\n 'ALFA ROMEO', 'PONTIAC', 'LINCOLN', 'ISUZU', 'BUICK', 'DAEWOO',\n 'INFINITI', 'LANCIA', 'DACIA', 'SMART', 'CADILLAC']\nfor carmaker in Counter(mark).most_common(100):\n carmakers.append(carmaker[0])\ndict_of_cars = {}\nfor i in range(len(mark) - 1):\n if soidukite_arv[i] < 50:\n continue\n if mark[i] not in car_makers:\n continue\n if valjalaskeaasta[i] > int(2008):\n continue\n dict_of_cars[mark[i] + ' ' + mudel[i] + ' ' + str(valjalaskeaasta[i])] = {\n 'valjalaskeaasta': valjalaskeaasta[i], 'koduva_yv_protsent':\n koduva_yv_protsent[i], 'soidukite_arv': soidukite_arv[i]}\npprint(sorted(dict_of_cars.items(), key=lambda x: x[1]['koduva_yv_protsent']))\n",
"<import token>\n<assignment token>\nfor carmaker in Counter(mark).most_common(100):\n carmakers.append(carmaker[0])\n<assignment token>\nfor i in range(len(mark) - 1):\n if soidukite_arv[i] < 50:\n continue\n if mark[i] not in car_makers:\n continue\n if valjalaskeaasta[i] > int(2008):\n continue\n dict_of_cars[mark[i] + ' ' + mudel[i] + ' ' + str(valjalaskeaasta[i])] = {\n 'valjalaskeaasta': valjalaskeaasta[i], 'koduva_yv_protsent':\n koduva_yv_protsent[i], 'soidukite_arv': soidukite_arv[i]}\npprint(sorted(dict_of_cars.items(), key=lambda x: x[1]['koduva_yv_protsent']))\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,648 |
365003933ea72eda11b282618296bf9db67b088e
|
from openerp.osv import osv, fields
class openacademy_course (osv.Model):
_name= 'openacademy.course'
_columns= {
'name': fields.char('Name', size=32, required= True),
'description': fields.text('Description'),
'responsible_id': fields.many2one('res.users', string='Responsible'),
'session_ids':fields.one2many('openacademy.session', 'course_id', string='Sessions'),
}
class openacademy_session (osv.Model):
_name= 'openacademy.session'
_columns= {
'name': fields.char('Name', size=32, required= True),
'duration': fields.float('Duration'),
'seats': fields.integer('Seats'),
'course_id': fields.many2one('openacademy.course', string='Course'),
'instructor_id': fields.many2one('res.partner', string='Instructor'),
}
|
[
"from openerp.osv import osv, fields\n\nclass openacademy_course (osv.Model):\n _name= 'openacademy.course'\n _columns= {\n 'name': fields.char('Name', size=32, required= True),\n 'description': fields.text('Description'),\n 'responsible_id': fields.many2one('res.users', string='Responsible'),\n 'session_ids':fields.one2many('openacademy.session', 'course_id', string='Sessions'),\n \n }\n \n \nclass openacademy_session (osv.Model):\n _name= 'openacademy.session'\n _columns= {\n 'name': fields.char('Name', size=32, required= True),\n 'duration': fields.float('Duration'),\n 'seats': fields.integer('Seats'),\n 'course_id': fields.many2one('openacademy.course', string='Course'),\n 'instructor_id': fields.many2one('res.partner', string='Instructor'),\n }\n",
"from openerp.osv import osv, fields\n\n\nclass openacademy_course(osv.Model):\n _name = 'openacademy.course'\n _columns = {'name': fields.char('Name', size=32, required=True),\n 'description': fields.text('Description'), 'responsible_id': fields\n .many2one('res.users', string='Responsible'), 'session_ids': fields\n .one2many('openacademy.session', 'course_id', string='Sessions')}\n\n\nclass openacademy_session(osv.Model):\n _name = 'openacademy.session'\n _columns = {'name': fields.char('Name', size=32, required=True),\n 'duration': fields.float('Duration'), 'seats': fields.integer(\n 'Seats'), 'course_id': fields.many2one('openacademy.course', string\n ='Course'), 'instructor_id': fields.many2one('res.partner', string=\n 'Instructor')}\n",
"<import token>\n\n\nclass openacademy_course(osv.Model):\n _name = 'openacademy.course'\n _columns = {'name': fields.char('Name', size=32, required=True),\n 'description': fields.text('Description'), 'responsible_id': fields\n .many2one('res.users', string='Responsible'), 'session_ids': fields\n .one2many('openacademy.session', 'course_id', string='Sessions')}\n\n\nclass openacademy_session(osv.Model):\n _name = 'openacademy.session'\n _columns = {'name': fields.char('Name', size=32, required=True),\n 'duration': fields.float('Duration'), 'seats': fields.integer(\n 'Seats'), 'course_id': fields.many2one('openacademy.course', string\n ='Course'), 'instructor_id': fields.many2one('res.partner', string=\n 'Instructor')}\n",
"<import token>\n\n\nclass openacademy_course(osv.Model):\n <assignment token>\n <assignment token>\n\n\nclass openacademy_session(osv.Model):\n _name = 'openacademy.session'\n _columns = {'name': fields.char('Name', size=32, required=True),\n 'duration': fields.float('Duration'), 'seats': fields.integer(\n 'Seats'), 'course_id': fields.many2one('openacademy.course', string\n ='Course'), 'instructor_id': fields.many2one('res.partner', string=\n 'Instructor')}\n",
"<import token>\n<class token>\n\n\nclass openacademy_session(osv.Model):\n _name = 'openacademy.session'\n _columns = {'name': fields.char('Name', size=32, required=True),\n 'duration': fields.float('Duration'), 'seats': fields.integer(\n 'Seats'), 'course_id': fields.many2one('openacademy.course', string\n ='Course'), 'instructor_id': fields.many2one('res.partner', string=\n 'Instructor')}\n",
"<import token>\n<class token>\n\n\nclass openacademy_session(osv.Model):\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n<class token>\n"
] | false |
99,649 |
65d51a6e2fd6f9bb6dc5ad458dcea6ec9bb9f29c
|
# from quizB_others import sum_of_double_even_place, get_size, get_prefix
def is_valid(number):
valid = (4,5,37,6)
size = get_size(number)
if size > 12 and size < 17:
for i in valid:
if get_prefix(number,i):
tot = sum_of_odd_place(number) + sum_of_double_even_place(number)
if tot%10 == 0:
answer = True
else:
answer = False
else:
answer = False
else:
answer = False
return answer
print is_valid(4388576018402626) #F
print is_valid(4388576018410707) #T
print is_valid(371826291433349) #T
print is_valid(5411045872559122) #T
print is_valid(6011432717792989) #T
def get_digit(number):
total = 0
if number<10:
total = number
else:
number = str(number)
ls = list(number)
for i in range(len(ls)):
temp = int(ls[i])
total += temp
return total
#print get_digit(1123)
def sum_of_odd_place(number):
snumber = str(number)
snumber = snumber[::-1]
odd = snumber[0::2]
lnumber = list(odd)
total = 0
for i in range(len(odd)):
total += int(odd[i])
return total
# print sum_of_odd_place (1234) # 4+2 = 6
# print sum_of_odd_place(4388576018402626)#38
def prefix_matched(number, d):
strd = str(d)
ld = list(strd)
length = len(ld)
a = get_prefix(number, length)
if d == a:
ans = True
else:
ans = False
return ans
# print prefix_matched(4388576018402626,4)
# print prefix_matched(4388576018402626,5)
# print prefix_matched(4388576018402626,43)
|
[
"# from quizB_others import sum_of_double_even_place, get_size, get_prefix\ndef is_valid(number):\n\tvalid = (4,5,37,6)\n\tsize = get_size(number)\n\tif size > 12 and size < 17:\n\t\tfor i in valid:\n\t\t\tif get_prefix(number,i):\n\t\t\t\ttot = sum_of_odd_place(number) + sum_of_double_even_place(number)\n\t\t\t\tif tot%10 == 0:\n\t\t\t\t\tanswer = True\n\t\t\t\telse:\n\t\t\t\t\tanswer = False\n\t\t\t\t\n\t\t\t\n\t\t\telse:\n\t\t\t\tanswer = False\n\telse: \n\t\tanswer = False\n\t\n\treturn answer\n\t\nprint is_valid(4388576018402626) #F\nprint is_valid(4388576018410707) #T\nprint is_valid(371826291433349) #T\nprint is_valid(5411045872559122) #T\nprint is_valid(6011432717792989) #T\n\n\ndef get_digit(number):\n\ttotal = 0\n\tif number<10:\n\t\ttotal = number\n\telse:\n\t\tnumber = str(number)\n\t\tls = list(number)\n\t\tfor i in range(len(ls)):\n\t\t\ttemp = int(ls[i])\n\t\t\ttotal += temp\n\t\n\treturn total\n\t\n#print get_digit(1123)\n\n\ndef sum_of_odd_place(number):\n\tsnumber = str(number)\n\tsnumber = snumber[::-1]\n\todd = snumber[0::2]\n\tlnumber = list(odd)\n\ttotal = 0\n\tfor i in range(len(odd)):\n\t\ttotal += int(odd[i])\n\n\treturn total\n\t\n# print sum_of_odd_place (1234) # 4+2 = 6\t\n# print sum_of_odd_place(4388576018402626)#38\n\n\n\ndef prefix_matched(number, d):\n\tstrd = str(d)\n\tld = list(strd)\n\tlength = len(ld)\n\ta = get_prefix(number, length)\n\tif d == a:\n\t\tans = True\n\telse:\n\t\tans = False\n\treturn ans\n\t\n# print prefix_matched(4388576018402626,4)\n# print prefix_matched(4388576018402626,5)\n# print prefix_matched(4388576018402626,43)\n\n\n"
] | true |
99,650 |
14cac979841b1cc5ad73c44e9ab66caf2de0fe99
|
# coding:utf-8
'''
created on 2018/8/23
@author:sunyihuan
'''
import numpy
a = numpy.zeros([784])
print(a)
|
[
"# coding:utf-8 \n'''\ncreated on 2018/8/23\n\n@author:sunyihuan\n'''\nimport numpy\n\na = numpy.zeros([784])\nprint(a)\n",
"<docstring token>\nimport numpy\na = numpy.zeros([784])\nprint(a)\n",
"<docstring token>\n<import token>\na = numpy.zeros([784])\nprint(a)\n",
"<docstring token>\n<import token>\n<assignment token>\nprint(a)\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n"
] | false |
99,651 |
c349afc9775b7e7d2969b72576c72ebd26d906a1
|
#!/usr/bin/env python3
x = int(input())
print("YNeos"[x<30::2])
|
[
"#!/usr/bin/env python3\nx = int(input())\nprint(\"YNeos\"[x<30::2])\n",
"x = int(input())\nprint('YNeos'[x < 30::2])\n",
"<assignment token>\nprint('YNeos'[x < 30::2])\n",
"<assignment token>\n<code token>\n"
] | false |
99,652 |
00c61f79de06f8026bf1749954cf87014a3af161
|
from django.shortcuts import render, redirect
from Eggplant.models import User, Session, Order
from hashlib import md5
import time
def index(request):
data = {}
if 'session' in request.COOKIES:
if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:
data['authorized'] = True
return render(request, "index.html", data)
def printr(request):
data = {}
if 'session' in request.COOKIES:
if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:
username=Session.objects.filter(cookie=request.COOKIES['session'])[0].uid
data['authorized'] = True
if request.method == 'POST':
if 'list' in request.POST and 'color' in request.POST and 'where' in request.POST and \
'file' in request.FILES:
file = request.FILES['file']
fname = md5((file.name + username).encode()).hexdigest()
open('uploads/' + fname, 'bw+').write(file.read())
o = Order(file=fname, client=username, provider=request.POST['where'])
o.save()
data['ok'] = True
return render(request, "print.html", data)
return redirect('/')
def registration(request):
data = {}
if 'session' in request.COOKIES:
if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:
return redirect('/account/')
if request.method == 'POST':
if 'login' in request.POST and 'password' in request.POST and 'type' in request.POST:
login = request.POST['login']
password = request.POST['password']
type = request.POST['type']
if len(User.objects.filter(login=login)) < 1:
u = User(len(User.objects.all()) - 1, login, password, type, 2)#len(User.objects.filter(type=1)) - 1)
u.save()
r = redirect("/login/")
r.set_cookie(key="login", value=login, max_age=60000)
return r
return render(request, "registration.html", data)
def login(request):
data = {}
if 'login' in request.COOKIES:
data['login'] = request.COOKIES['login']
if 'session' in request.COOKIES:
if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:
return redirect('/account/')
if request.method == 'POST':
if 'login' in request.POST and 'password' in request.POST:
login = request.POST['login']
password = request.POST['password']
if len(User.objects.filter(login=login, password=password)) == 1:
r = redirect("/account/")
m = md5()
m.update((login + password + str(time.time())).encode())
s = m.hexdigest()
n = Session(cookie=s, uid=User.objects.filter(login=login)[0].login)
n.save()
r.set_cookie(key="session", value=s, max_age=60000)
return r
return render(request, "login.html", data)
def account(request):
data = {}
if 'session' in request.COOKIES:
if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:
data['authorized'] = True
login = Session.objects.filter(cookie=request.COOKIES['session'])[0].uid
data['type'] = User.objects.filter(login=login)[0].type
return render(request, "account.html", data)
return redirect('/')
def orders(request):
data = {}
if 'session' in request.COOKIES:
if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:
data['authorized'] = True
user = User.objects.filter(login=Session.objects.filter(cookie=request.COOKIES['session'])[0].uid)[0]
if user.type == 2:
return redirect('/account/')
data['orders'] = [i for i in Order.objects.filter(provider=user.pid)]
return render(request, "orders.html", data)
return redirect('/')
def exit(request):
if 'session' in request.COOKIES:
if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:
d = Session.objects.filter(cookie=request.COOKIES['session'])[0]
d.delete()
return redirect('/')
def calc(request):
w = 'err'
a = 0
b = 0
if request.method == "GET":
if "a" in request.GET and "b" in request.GET and "w" in request.GET:
a = int(request.GET['a'])
b = int(request.GET['b'])
w = request.GET['w']
elif request.method == "POST":
if "a" in request.POST and "b" in request.POST and "w" in request.POST:
a = int(request.POST['a'])
b = int(request.POST['b'])
w = request.POST['w']
if w == '/':
if b == 0:
result = 'inf'
else:
result = a / b
elif w == '*':
result = a * b
elif w == '+':
result = a + b
elif w == '-':
result = a - b
elif w == '%':
if b == 0:
result = 'inf'
else:
result = a % b
elif w == '^' or w == '**':
result = a ** b
elif w == 'xor':
result = a ^ b
else:
result = 'err'
return render(request, "calc.html", {'result': result, 'a': a, "b": b, "w": w})
|
[
"from django.shortcuts import render, redirect\nfrom Eggplant.models import User, Session, Order\nfrom hashlib import md5\nimport time\n\n\ndef index(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n data['authorized'] = True\n return render(request, \"index.html\", data)\n\n\ndef printr(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n username=Session.objects.filter(cookie=request.COOKIES['session'])[0].uid\n data['authorized'] = True\n if request.method == 'POST':\n if 'list' in request.POST and 'color' in request.POST and 'where' in request.POST and \\\n 'file' in request.FILES:\n file = request.FILES['file']\n fname = md5((file.name + username).encode()).hexdigest()\n open('uploads/' + fname, 'bw+').write(file.read())\n o = Order(file=fname, client=username, provider=request.POST['where'])\n o.save()\n data['ok'] = True\n return render(request, \"print.html\", data)\n return redirect('/')\n\n\ndef registration(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n return redirect('/account/')\n if request.method == 'POST':\n if 'login' in request.POST and 'password' in request.POST and 'type' in request.POST:\n login = request.POST['login']\n password = request.POST['password']\n type = request.POST['type']\n if len(User.objects.filter(login=login)) < 1:\n u = User(len(User.objects.all()) - 1, login, password, type, 2)#len(User.objects.filter(type=1)) - 1)\n u.save()\n r = redirect(\"/login/\")\n r.set_cookie(key=\"login\", value=login, max_age=60000)\n return r\n\n return render(request, \"registration.html\", data)\n\n\ndef login(request):\n data = {}\n if 'login' in request.COOKIES:\n data['login'] = request.COOKIES['login']\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n return redirect('/account/')\n if request.method == 'POST':\n if 'login' in request.POST and 'password' in request.POST:\n login = request.POST['login']\n password = request.POST['password']\n if len(User.objects.filter(login=login, password=password)) == 1:\n r = redirect(\"/account/\")\n m = md5()\n\n m.update((login + password + str(time.time())).encode())\n s = m.hexdigest()\n n = Session(cookie=s, uid=User.objects.filter(login=login)[0].login)\n n.save()\n r.set_cookie(key=\"session\", value=s, max_age=60000)\n return r\n return render(request, \"login.html\", data)\n\n\ndef account(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n data['authorized'] = True\n login = Session.objects.filter(cookie=request.COOKIES['session'])[0].uid\n data['type'] = User.objects.filter(login=login)[0].type\n return render(request, \"account.html\", data)\n return redirect('/')\n\n\ndef orders(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n data['authorized'] = True\n user = User.objects.filter(login=Session.objects.filter(cookie=request.COOKIES['session'])[0].uid)[0]\n if user.type == 2:\n return redirect('/account/')\n data['orders'] = [i for i in Order.objects.filter(provider=user.pid)]\n return render(request, \"orders.html\", data)\n return redirect('/')\n\n\ndef exit(request):\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n d = Session.objects.filter(cookie=request.COOKIES['session'])[0]\n d.delete()\n return redirect('/')\n\n\ndef calc(request):\n w = 'err'\n a = 0\n b = 0\n if request.method == \"GET\":\n if \"a\" in request.GET and \"b\" in request.GET and \"w\" in request.GET:\n a = int(request.GET['a'])\n b = int(request.GET['b'])\n w = request.GET['w']\n elif request.method == \"POST\":\n if \"a\" in request.POST and \"b\" in request.POST and \"w\" in request.POST:\n a = int(request.POST['a'])\n b = int(request.POST['b'])\n w = request.POST['w']\n if w == '/':\n if b == 0:\n result = 'inf'\n else:\n result = a / b\n elif w == '*':\n result = a * b\n elif w == '+':\n result = a + b\n elif w == '-':\n result = a - b\n elif w == '%':\n if b == 0:\n result = 'inf'\n else:\n result = a % b\n elif w == '^' or w == '**':\n result = a ** b\n elif w == 'xor':\n result = a ^ b\n else:\n result = 'err'\n return render(request, \"calc.html\", {'result': result, 'a': a, \"b\": b, \"w\": w})\n",
"from django.shortcuts import render, redirect\nfrom Eggplant.models import User, Session, Order\nfrom hashlib import md5\nimport time\n\n\ndef index(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n data['authorized'] = True\n return render(request, 'index.html', data)\n\n\ndef printr(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n username = Session.objects.filter(cookie=request.COOKIES['session']\n )[0].uid\n data['authorized'] = True\n if request.method == 'POST':\n if ('list' in request.POST and 'color' in request.POST and \n 'where' in request.POST and 'file' in request.FILES):\n file = request.FILES['file']\n fname = md5((file.name + username).encode()).hexdigest()\n open('uploads/' + fname, 'bw+').write(file.read())\n o = Order(file=fname, client=username, provider=request\n .POST['where'])\n o.save()\n data['ok'] = True\n return render(request, 'print.html', data)\n return redirect('/')\n\n\ndef registration(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n return redirect('/account/')\n if request.method == 'POST':\n if ('login' in request.POST and 'password' in request.POST and \n 'type' in request.POST):\n login = request.POST['login']\n password = request.POST['password']\n type = request.POST['type']\n if len(User.objects.filter(login=login)) < 1:\n u = User(len(User.objects.all()) - 1, login, password, type, 2)\n u.save()\n r = redirect('/login/')\n r.set_cookie(key='login', value=login, max_age=60000)\n return r\n return render(request, 'registration.html', data)\n\n\ndef login(request):\n data = {}\n if 'login' in request.COOKIES:\n data['login'] = request.COOKIES['login']\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n return redirect('/account/')\n if request.method == 'POST':\n if 'login' in request.POST and 'password' in request.POST:\n login = request.POST['login']\n password = request.POST['password']\n if len(User.objects.filter(login=login, password=password)) == 1:\n r = redirect('/account/')\n m = md5()\n m.update((login + password + str(time.time())).encode())\n s = m.hexdigest()\n n = Session(cookie=s, uid=User.objects.filter(login=login)[\n 0].login)\n n.save()\n r.set_cookie(key='session', value=s, max_age=60000)\n return r\n return render(request, 'login.html', data)\n\n\ndef account(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n data['authorized'] = True\n login = Session.objects.filter(cookie=request.COOKIES['session'])[0\n ].uid\n data['type'] = User.objects.filter(login=login)[0].type\n return render(request, 'account.html', data)\n return redirect('/')\n\n\ndef orders(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n data['authorized'] = True\n user = User.objects.filter(login=Session.objects.filter(cookie=\n request.COOKIES['session'])[0].uid)[0]\n if user.type == 2:\n return redirect('/account/')\n data['orders'] = [i for i in Order.objects.filter(provider=user\n .pid)]\n return render(request, 'orders.html', data)\n return redirect('/')\n\n\ndef exit(request):\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n d = Session.objects.filter(cookie=request.COOKIES['session'])[0]\n d.delete()\n return redirect('/')\n\n\ndef calc(request):\n w = 'err'\n a = 0\n b = 0\n if request.method == 'GET':\n if 'a' in request.GET and 'b' in request.GET and 'w' in request.GET:\n a = int(request.GET['a'])\n b = int(request.GET['b'])\n w = request.GET['w']\n elif request.method == 'POST':\n if 'a' in request.POST and 'b' in request.POST and 'w' in request.POST:\n a = int(request.POST['a'])\n b = int(request.POST['b'])\n w = request.POST['w']\n if w == '/':\n if b == 0:\n result = 'inf'\n else:\n result = a / b\n elif w == '*':\n result = a * b\n elif w == '+':\n result = a + b\n elif w == '-':\n result = a - b\n elif w == '%':\n if b == 0:\n result = 'inf'\n else:\n result = a % b\n elif w == '^' or w == '**':\n result = a ** b\n elif w == 'xor':\n result = a ^ b\n else:\n result = 'err'\n return render(request, 'calc.html', {'result': result, 'a': a, 'b': b,\n 'w': w})\n",
"<import token>\n\n\ndef index(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n data['authorized'] = True\n return render(request, 'index.html', data)\n\n\ndef printr(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n username = Session.objects.filter(cookie=request.COOKIES['session']\n )[0].uid\n data['authorized'] = True\n if request.method == 'POST':\n if ('list' in request.POST and 'color' in request.POST and \n 'where' in request.POST and 'file' in request.FILES):\n file = request.FILES['file']\n fname = md5((file.name + username).encode()).hexdigest()\n open('uploads/' + fname, 'bw+').write(file.read())\n o = Order(file=fname, client=username, provider=request\n .POST['where'])\n o.save()\n data['ok'] = True\n return render(request, 'print.html', data)\n return redirect('/')\n\n\ndef registration(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n return redirect('/account/')\n if request.method == 'POST':\n if ('login' in request.POST and 'password' in request.POST and \n 'type' in request.POST):\n login = request.POST['login']\n password = request.POST['password']\n type = request.POST['type']\n if len(User.objects.filter(login=login)) < 1:\n u = User(len(User.objects.all()) - 1, login, password, type, 2)\n u.save()\n r = redirect('/login/')\n r.set_cookie(key='login', value=login, max_age=60000)\n return r\n return render(request, 'registration.html', data)\n\n\ndef login(request):\n data = {}\n if 'login' in request.COOKIES:\n data['login'] = request.COOKIES['login']\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n return redirect('/account/')\n if request.method == 'POST':\n if 'login' in request.POST and 'password' in request.POST:\n login = request.POST['login']\n password = request.POST['password']\n if len(User.objects.filter(login=login, password=password)) == 1:\n r = redirect('/account/')\n m = md5()\n m.update((login + password + str(time.time())).encode())\n s = m.hexdigest()\n n = Session(cookie=s, uid=User.objects.filter(login=login)[\n 0].login)\n n.save()\n r.set_cookie(key='session', value=s, max_age=60000)\n return r\n return render(request, 'login.html', data)\n\n\ndef account(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n data['authorized'] = True\n login = Session.objects.filter(cookie=request.COOKIES['session'])[0\n ].uid\n data['type'] = User.objects.filter(login=login)[0].type\n return render(request, 'account.html', data)\n return redirect('/')\n\n\ndef orders(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n data['authorized'] = True\n user = User.objects.filter(login=Session.objects.filter(cookie=\n request.COOKIES['session'])[0].uid)[0]\n if user.type == 2:\n return redirect('/account/')\n data['orders'] = [i for i in Order.objects.filter(provider=user\n .pid)]\n return render(request, 'orders.html', data)\n return redirect('/')\n\n\ndef exit(request):\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n d = Session.objects.filter(cookie=request.COOKIES['session'])[0]\n d.delete()\n return redirect('/')\n\n\ndef calc(request):\n w = 'err'\n a = 0\n b = 0\n if request.method == 'GET':\n if 'a' in request.GET and 'b' in request.GET and 'w' in request.GET:\n a = int(request.GET['a'])\n b = int(request.GET['b'])\n w = request.GET['w']\n elif request.method == 'POST':\n if 'a' in request.POST and 'b' in request.POST and 'w' in request.POST:\n a = int(request.POST['a'])\n b = int(request.POST['b'])\n w = request.POST['w']\n if w == '/':\n if b == 0:\n result = 'inf'\n else:\n result = a / b\n elif w == '*':\n result = a * b\n elif w == '+':\n result = a + b\n elif w == '-':\n result = a - b\n elif w == '%':\n if b == 0:\n result = 'inf'\n else:\n result = a % b\n elif w == '^' or w == '**':\n result = a ** b\n elif w == 'xor':\n result = a ^ b\n else:\n result = 'err'\n return render(request, 'calc.html', {'result': result, 'a': a, 'b': b,\n 'w': w})\n",
"<import token>\n\n\ndef index(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n data['authorized'] = True\n return render(request, 'index.html', data)\n\n\ndef printr(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n username = Session.objects.filter(cookie=request.COOKIES['session']\n )[0].uid\n data['authorized'] = True\n if request.method == 'POST':\n if ('list' in request.POST and 'color' in request.POST and \n 'where' in request.POST and 'file' in request.FILES):\n file = request.FILES['file']\n fname = md5((file.name + username).encode()).hexdigest()\n open('uploads/' + fname, 'bw+').write(file.read())\n o = Order(file=fname, client=username, provider=request\n .POST['where'])\n o.save()\n data['ok'] = True\n return render(request, 'print.html', data)\n return redirect('/')\n\n\ndef registration(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n return redirect('/account/')\n if request.method == 'POST':\n if ('login' in request.POST and 'password' in request.POST and \n 'type' in request.POST):\n login = request.POST['login']\n password = request.POST['password']\n type = request.POST['type']\n if len(User.objects.filter(login=login)) < 1:\n u = User(len(User.objects.all()) - 1, login, password, type, 2)\n u.save()\n r = redirect('/login/')\n r.set_cookie(key='login', value=login, max_age=60000)\n return r\n return render(request, 'registration.html', data)\n\n\ndef login(request):\n data = {}\n if 'login' in request.COOKIES:\n data['login'] = request.COOKIES['login']\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n return redirect('/account/')\n if request.method == 'POST':\n if 'login' in request.POST and 'password' in request.POST:\n login = request.POST['login']\n password = request.POST['password']\n if len(User.objects.filter(login=login, password=password)) == 1:\n r = redirect('/account/')\n m = md5()\n m.update((login + password + str(time.time())).encode())\n s = m.hexdigest()\n n = Session(cookie=s, uid=User.objects.filter(login=login)[\n 0].login)\n n.save()\n r.set_cookie(key='session', value=s, max_age=60000)\n return r\n return render(request, 'login.html', data)\n\n\ndef account(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n data['authorized'] = True\n login = Session.objects.filter(cookie=request.COOKIES['session'])[0\n ].uid\n data['type'] = User.objects.filter(login=login)[0].type\n return render(request, 'account.html', data)\n return redirect('/')\n\n\n<function token>\n\n\ndef exit(request):\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n d = Session.objects.filter(cookie=request.COOKIES['session'])[0]\n d.delete()\n return redirect('/')\n\n\ndef calc(request):\n w = 'err'\n a = 0\n b = 0\n if request.method == 'GET':\n if 'a' in request.GET and 'b' in request.GET and 'w' in request.GET:\n a = int(request.GET['a'])\n b = int(request.GET['b'])\n w = request.GET['w']\n elif request.method == 'POST':\n if 'a' in request.POST and 'b' in request.POST and 'w' in request.POST:\n a = int(request.POST['a'])\n b = int(request.POST['b'])\n w = request.POST['w']\n if w == '/':\n if b == 0:\n result = 'inf'\n else:\n result = a / b\n elif w == '*':\n result = a * b\n elif w == '+':\n result = a + b\n elif w == '-':\n result = a - b\n elif w == '%':\n if b == 0:\n result = 'inf'\n else:\n result = a % b\n elif w == '^' or w == '**':\n result = a ** b\n elif w == 'xor':\n result = a ^ b\n else:\n result = 'err'\n return render(request, 'calc.html', {'result': result, 'a': a, 'b': b,\n 'w': w})\n",
"<import token>\n\n\ndef index(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n data['authorized'] = True\n return render(request, 'index.html', data)\n\n\ndef printr(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n username = Session.objects.filter(cookie=request.COOKIES['session']\n )[0].uid\n data['authorized'] = True\n if request.method == 'POST':\n if ('list' in request.POST and 'color' in request.POST and \n 'where' in request.POST and 'file' in request.FILES):\n file = request.FILES['file']\n fname = md5((file.name + username).encode()).hexdigest()\n open('uploads/' + fname, 'bw+').write(file.read())\n o = Order(file=fname, client=username, provider=request\n .POST['where'])\n o.save()\n data['ok'] = True\n return render(request, 'print.html', data)\n return redirect('/')\n\n\ndef registration(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n return redirect('/account/')\n if request.method == 'POST':\n if ('login' in request.POST and 'password' in request.POST and \n 'type' in request.POST):\n login = request.POST['login']\n password = request.POST['password']\n type = request.POST['type']\n if len(User.objects.filter(login=login)) < 1:\n u = User(len(User.objects.all()) - 1, login, password, type, 2)\n u.save()\n r = redirect('/login/')\n r.set_cookie(key='login', value=login, max_age=60000)\n return r\n return render(request, 'registration.html', data)\n\n\n<function token>\n\n\ndef account(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n data['authorized'] = True\n login = Session.objects.filter(cookie=request.COOKIES['session'])[0\n ].uid\n data['type'] = User.objects.filter(login=login)[0].type\n return render(request, 'account.html', data)\n return redirect('/')\n\n\n<function token>\n\n\ndef exit(request):\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n d = Session.objects.filter(cookie=request.COOKIES['session'])[0]\n d.delete()\n return redirect('/')\n\n\ndef calc(request):\n w = 'err'\n a = 0\n b = 0\n if request.method == 'GET':\n if 'a' in request.GET and 'b' in request.GET and 'w' in request.GET:\n a = int(request.GET['a'])\n b = int(request.GET['b'])\n w = request.GET['w']\n elif request.method == 'POST':\n if 'a' in request.POST and 'b' in request.POST and 'w' in request.POST:\n a = int(request.POST['a'])\n b = int(request.POST['b'])\n w = request.POST['w']\n if w == '/':\n if b == 0:\n result = 'inf'\n else:\n result = a / b\n elif w == '*':\n result = a * b\n elif w == '+':\n result = a + b\n elif w == '-':\n result = a - b\n elif w == '%':\n if b == 0:\n result = 'inf'\n else:\n result = a % b\n elif w == '^' or w == '**':\n result = a ** b\n elif w == 'xor':\n result = a ^ b\n else:\n result = 'err'\n return render(request, 'calc.html', {'result': result, 'a': a, 'b': b,\n 'w': w})\n",
"<import token>\n\n\ndef index(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n data['authorized'] = True\n return render(request, 'index.html', data)\n\n\ndef printr(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n username = Session.objects.filter(cookie=request.COOKIES['session']\n )[0].uid\n data['authorized'] = True\n if request.method == 'POST':\n if ('list' in request.POST and 'color' in request.POST and \n 'where' in request.POST and 'file' in request.FILES):\n file = request.FILES['file']\n fname = md5((file.name + username).encode()).hexdigest()\n open('uploads/' + fname, 'bw+').write(file.read())\n o = Order(file=fname, client=username, provider=request\n .POST['where'])\n o.save()\n data['ok'] = True\n return render(request, 'print.html', data)\n return redirect('/')\n\n\n<function token>\n<function token>\n\n\ndef account(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n data['authorized'] = True\n login = Session.objects.filter(cookie=request.COOKIES['session'])[0\n ].uid\n data['type'] = User.objects.filter(login=login)[0].type\n return render(request, 'account.html', data)\n return redirect('/')\n\n\n<function token>\n\n\ndef exit(request):\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n d = Session.objects.filter(cookie=request.COOKIES['session'])[0]\n d.delete()\n return redirect('/')\n\n\ndef calc(request):\n w = 'err'\n a = 0\n b = 0\n if request.method == 'GET':\n if 'a' in request.GET and 'b' in request.GET and 'w' in request.GET:\n a = int(request.GET['a'])\n b = int(request.GET['b'])\n w = request.GET['w']\n elif request.method == 'POST':\n if 'a' in request.POST and 'b' in request.POST and 'w' in request.POST:\n a = int(request.POST['a'])\n b = int(request.POST['b'])\n w = request.POST['w']\n if w == '/':\n if b == 0:\n result = 'inf'\n else:\n result = a / b\n elif w == '*':\n result = a * b\n elif w == '+':\n result = a + b\n elif w == '-':\n result = a - b\n elif w == '%':\n if b == 0:\n result = 'inf'\n else:\n result = a % b\n elif w == '^' or w == '**':\n result = a ** b\n elif w == 'xor':\n result = a ^ b\n else:\n result = 'err'\n return render(request, 'calc.html', {'result': result, 'a': a, 'b': b,\n 'w': w})\n",
"<import token>\n\n\ndef index(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n data['authorized'] = True\n return render(request, 'index.html', data)\n\n\ndef printr(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n username = Session.objects.filter(cookie=request.COOKIES['session']\n )[0].uid\n data['authorized'] = True\n if request.method == 'POST':\n if ('list' in request.POST and 'color' in request.POST and \n 'where' in request.POST and 'file' in request.FILES):\n file = request.FILES['file']\n fname = md5((file.name + username).encode()).hexdigest()\n open('uploads/' + fname, 'bw+').write(file.read())\n o = Order(file=fname, client=username, provider=request\n .POST['where'])\n o.save()\n data['ok'] = True\n return render(request, 'print.html', data)\n return redirect('/')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef exit(request):\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n d = Session.objects.filter(cookie=request.COOKIES['session'])[0]\n d.delete()\n return redirect('/')\n\n\ndef calc(request):\n w = 'err'\n a = 0\n b = 0\n if request.method == 'GET':\n if 'a' in request.GET and 'b' in request.GET and 'w' in request.GET:\n a = int(request.GET['a'])\n b = int(request.GET['b'])\n w = request.GET['w']\n elif request.method == 'POST':\n if 'a' in request.POST and 'b' in request.POST and 'w' in request.POST:\n a = int(request.POST['a'])\n b = int(request.POST['b'])\n w = request.POST['w']\n if w == '/':\n if b == 0:\n result = 'inf'\n else:\n result = a / b\n elif w == '*':\n result = a * b\n elif w == '+':\n result = a + b\n elif w == '-':\n result = a - b\n elif w == '%':\n if b == 0:\n result = 'inf'\n else:\n result = a % b\n elif w == '^' or w == '**':\n result = a ** b\n elif w == 'xor':\n result = a ^ b\n else:\n result = 'err'\n return render(request, 'calc.html', {'result': result, 'a': a, 'b': b,\n 'w': w})\n",
"<import token>\n<function token>\n\n\ndef printr(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n username = Session.objects.filter(cookie=request.COOKIES['session']\n )[0].uid\n data['authorized'] = True\n if request.method == 'POST':\n if ('list' in request.POST and 'color' in request.POST and \n 'where' in request.POST and 'file' in request.FILES):\n file = request.FILES['file']\n fname = md5((file.name + username).encode()).hexdigest()\n open('uploads/' + fname, 'bw+').write(file.read())\n o = Order(file=fname, client=username, provider=request\n .POST['where'])\n o.save()\n data['ok'] = True\n return render(request, 'print.html', data)\n return redirect('/')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef exit(request):\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n d = Session.objects.filter(cookie=request.COOKIES['session'])[0]\n d.delete()\n return redirect('/')\n\n\ndef calc(request):\n w = 'err'\n a = 0\n b = 0\n if request.method == 'GET':\n if 'a' in request.GET and 'b' in request.GET and 'w' in request.GET:\n a = int(request.GET['a'])\n b = int(request.GET['b'])\n w = request.GET['w']\n elif request.method == 'POST':\n if 'a' in request.POST and 'b' in request.POST and 'w' in request.POST:\n a = int(request.POST['a'])\n b = int(request.POST['b'])\n w = request.POST['w']\n if w == '/':\n if b == 0:\n result = 'inf'\n else:\n result = a / b\n elif w == '*':\n result = a * b\n elif w == '+':\n result = a + b\n elif w == '-':\n result = a - b\n elif w == '%':\n if b == 0:\n result = 'inf'\n else:\n result = a % b\n elif w == '^' or w == '**':\n result = a ** b\n elif w == 'xor':\n result = a ^ b\n else:\n result = 'err'\n return render(request, 'calc.html', {'result': result, 'a': a, 'b': b,\n 'w': w})\n",
"<import token>\n<function token>\n\n\ndef printr(request):\n data = {}\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n username = Session.objects.filter(cookie=request.COOKIES['session']\n )[0].uid\n data['authorized'] = True\n if request.method == 'POST':\n if ('list' in request.POST and 'color' in request.POST and \n 'where' in request.POST and 'file' in request.FILES):\n file = request.FILES['file']\n fname = md5((file.name + username).encode()).hexdigest()\n open('uploads/' + fname, 'bw+').write(file.read())\n o = Order(file=fname, client=username, provider=request\n .POST['where'])\n o.save()\n data['ok'] = True\n return render(request, 'print.html', data)\n return redirect('/')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef exit(request):\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n d = Session.objects.filter(cookie=request.COOKIES['session'])[0]\n d.delete()\n return redirect('/')\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef exit(request):\n if 'session' in request.COOKIES:\n if len(Session.objects.filter(cookie=request.COOKIES['session'])) == 1:\n d = Session.objects.filter(cookie=request.COOKIES['session'])[0]\n d.delete()\n return redirect('/')\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,653 |
bf09c051490f20793f6519b4bd6420b9b4ef21f1
|
#%%
# https://icc-aria.ir/courses/%D8%B1%D8%A7%D8%A8%D8%B7-%DA%AF%D8%B1%D8%A7%D9%81%DB%8C%DA%A9%DB%8C-tkinter-%D9%BE%D8%A7%DB%8C%D8%AA%D9%88%D9%86/episode/messagebox
from tkinter import *
from tkinter import messagebox
def show_info():
messagebox.showinfo('author', 'written by alibigdeli')
def show_error():
messagebox.showerror('ERROR', 'somthing happend')
def show_warning():
messagebox.showwarning('warning', 'your data will be loss')
def show_ask():
messagebox.askquestion('q/a', 'Wanna exit?')
# messagebox.askokcancel('q/a', 'Wanna exit?')
# messagebox.askyesno('q/a', 'Wanna exit?')
# messagebox.askretrycancel('q/a', 'Wanna exit?')
pass
window = Tk()
window.geometry('300x300')
menubar = Menu(window)
menubar.add_command(label='info', command=show_info)
menubar.add_command(label='exit', command=show_ask)
window.config(menu=menubar)
Button(window, text='warning', command=show_warning).pack()
Button(window, text='error', command=show_error).pack()
window.mainloop()
|
[
"#%%\n# https://icc-aria.ir/courses/%D8%B1%D8%A7%D8%A8%D8%B7-%DA%AF%D8%B1%D8%A7%D9%81%DB%8C%DA%A9%DB%8C-tkinter-%D9%BE%D8%A7%DB%8C%D8%AA%D9%88%D9%86/episode/messagebox\nfrom tkinter import *\nfrom tkinter import messagebox\n\ndef show_info():\n messagebox.showinfo('author', 'written by alibigdeli')\ndef show_error():\n messagebox.showerror('ERROR', 'somthing happend')\ndef show_warning():\n messagebox.showwarning('warning', 'your data will be loss')\ndef show_ask():\n messagebox.askquestion('q/a', 'Wanna exit?')\n # messagebox.askokcancel('q/a', 'Wanna exit?')\n # messagebox.askyesno('q/a', 'Wanna exit?')\n # messagebox.askretrycancel('q/a', 'Wanna exit?')\n pass\n\nwindow = Tk()\nwindow.geometry('300x300')\n\nmenubar = Menu(window)\nmenubar.add_command(label='info', command=show_info)\nmenubar.add_command(label='exit', command=show_ask)\n\nwindow.config(menu=menubar)\n\nButton(window, text='warning', command=show_warning).pack()\nButton(window, text='error', command=show_error).pack()\nwindow.mainloop()\n ",
"from tkinter import *\nfrom tkinter import messagebox\n\n\ndef show_info():\n messagebox.showinfo('author', 'written by alibigdeli')\n\n\ndef show_error():\n messagebox.showerror('ERROR', 'somthing happend')\n\n\ndef show_warning():\n messagebox.showwarning('warning', 'your data will be loss')\n\n\ndef show_ask():\n messagebox.askquestion('q/a', 'Wanna exit?')\n pass\n\n\nwindow = Tk()\nwindow.geometry('300x300')\nmenubar = Menu(window)\nmenubar.add_command(label='info', command=show_info)\nmenubar.add_command(label='exit', command=show_ask)\nwindow.config(menu=menubar)\nButton(window, text='warning', command=show_warning).pack()\nButton(window, text='error', command=show_error).pack()\nwindow.mainloop()\n",
"<import token>\n\n\ndef show_info():\n messagebox.showinfo('author', 'written by alibigdeli')\n\n\ndef show_error():\n messagebox.showerror('ERROR', 'somthing happend')\n\n\ndef show_warning():\n messagebox.showwarning('warning', 'your data will be loss')\n\n\ndef show_ask():\n messagebox.askquestion('q/a', 'Wanna exit?')\n pass\n\n\nwindow = Tk()\nwindow.geometry('300x300')\nmenubar = Menu(window)\nmenubar.add_command(label='info', command=show_info)\nmenubar.add_command(label='exit', command=show_ask)\nwindow.config(menu=menubar)\nButton(window, text='warning', command=show_warning).pack()\nButton(window, text='error', command=show_error).pack()\nwindow.mainloop()\n",
"<import token>\n\n\ndef show_info():\n messagebox.showinfo('author', 'written by alibigdeli')\n\n\ndef show_error():\n messagebox.showerror('ERROR', 'somthing happend')\n\n\ndef show_warning():\n messagebox.showwarning('warning', 'your data will be loss')\n\n\ndef show_ask():\n messagebox.askquestion('q/a', 'Wanna exit?')\n pass\n\n\n<assignment token>\nwindow.geometry('300x300')\n<assignment token>\nmenubar.add_command(label='info', command=show_info)\nmenubar.add_command(label='exit', command=show_ask)\nwindow.config(menu=menubar)\nButton(window, text='warning', command=show_warning).pack()\nButton(window, text='error', command=show_error).pack()\nwindow.mainloop()\n",
"<import token>\n\n\ndef show_info():\n messagebox.showinfo('author', 'written by alibigdeli')\n\n\ndef show_error():\n messagebox.showerror('ERROR', 'somthing happend')\n\n\ndef show_warning():\n messagebox.showwarning('warning', 'your data will be loss')\n\n\ndef show_ask():\n messagebox.askquestion('q/a', 'Wanna exit?')\n pass\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n\n\ndef show_info():\n messagebox.showinfo('author', 'written by alibigdeli')\n\n\ndef show_error():\n messagebox.showerror('ERROR', 'somthing happend')\n\n\ndef show_warning():\n messagebox.showwarning('warning', 'your data will be loss')\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n\n\ndef show_info():\n messagebox.showinfo('author', 'written by alibigdeli')\n\n\n<function token>\n\n\ndef show_warning():\n messagebox.showwarning('warning', 'your data will be loss')\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef show_warning():\n messagebox.showwarning('warning', 'your data will be loss')\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,654 |
01659e873161eacb29b1c06104804fe99a7449a5
|
from strat_gaz.storage_optimisation.stockage import *
from pathlib import Path
import pandas as pd
import numpy as np
path_spot = Path(__file__).parent.parent / 'Data' / 'spot_history_HH.csv'
# volume initial bien borné
def test_vinit():
try :
data = pd.read_csv(path_spot)
X_0 = np.zeros( len(data['Day']))
data['Day'] = pd.to_datetime(data['Day'], format = '%Y-%m-%d')
stock = Stockage(100,-10 , data, X_0)
except ValueError:
assert True
else:
assert False
# lim_inf not defined
def test_lim_inf():
try :
data = pd.read_csv(path_spot)
X_0 = np.zeros( len(data['Day']))
data['Day'] = pd.to_datetime(data['Day'], format = '%Y-%m-%d')
stock = Stockage(100,10 , data, X_0, comp_tunnel = False)
stock.lim_inf
except AttributeError :
assert True
#objet pour tests
data = pd.read_csv(path_spot)
X_0 = np.zeros( len(data['Day']))
data['Day'] = pd.to_datetime(data['Day'], format = '%Y-%m-%d')
stock = Stockage(100,10 , data, X_0)
#test sout
def test_soutirage_empty():
if stock.sout_correction(0) == 0 :
assert True
def test_soutirage_full():
if stock.sout_correction(1) == 1 :
assert True
def test_inj_empty():
if stock.sout_correction(0) == 1 :
assert True
def test_inj_full():
if stock.sout_correction(1) == 0 :
assert True
|
[
"from strat_gaz.storage_optimisation.stockage import *\nfrom pathlib import Path\nimport pandas as pd \nimport numpy as np\npath_spot = Path(__file__).parent.parent / 'Data' / 'spot_history_HH.csv'\n# volume initial bien borné\n\ndef test_vinit():\n try :\n data = pd.read_csv(path_spot)\n X_0 = np.zeros( len(data['Day'])) \n data['Day'] = pd.to_datetime(data['Day'], format = '%Y-%m-%d') \n stock = Stockage(100,-10 , data, X_0)\n except ValueError:\n assert True\n else:\n assert False\n\n# lim_inf not defined\n\ndef test_lim_inf():\n try :\n data = pd.read_csv(path_spot)\n X_0 = np.zeros( len(data['Day'])) \n data['Day'] = pd.to_datetime(data['Day'], format = '%Y-%m-%d') \n stock = Stockage(100,10 , data, X_0, comp_tunnel = False)\n stock.lim_inf\n except AttributeError :\n assert True \n\n#objet pour tests\ndata = pd.read_csv(path_spot)\nX_0 = np.zeros( len(data['Day'])) \ndata['Day'] = pd.to_datetime(data['Day'], format = '%Y-%m-%d') \nstock = Stockage(100,10 , data, X_0)\n\n#test sout \ndef test_soutirage_empty():\n\n if stock.sout_correction(0) == 0 :\n assert True\n\ndef test_soutirage_full():\n\n if stock.sout_correction(1) == 1 :\n assert True\n \ndef test_inj_empty():\n\n if stock.sout_correction(0) == 1 :\n assert True\n\ndef test_inj_full():\n\n if stock.sout_correction(1) == 0 :\n assert True\n\n\n",
"from strat_gaz.storage_optimisation.stockage import *\nfrom pathlib import Path\nimport pandas as pd\nimport numpy as np\npath_spot = Path(__file__).parent.parent / 'Data' / 'spot_history_HH.csv'\n\n\ndef test_vinit():\n try:\n data = pd.read_csv(path_spot)\n X_0 = np.zeros(len(data['Day']))\n data['Day'] = pd.to_datetime(data['Day'], format='%Y-%m-%d')\n stock = Stockage(100, -10, data, X_0)\n except ValueError:\n assert True\n else:\n assert False\n\n\ndef test_lim_inf():\n try:\n data = pd.read_csv(path_spot)\n X_0 = np.zeros(len(data['Day']))\n data['Day'] = pd.to_datetime(data['Day'], format='%Y-%m-%d')\n stock = Stockage(100, 10, data, X_0, comp_tunnel=False)\n stock.lim_inf\n except AttributeError:\n assert True\n\n\ndata = pd.read_csv(path_spot)\nX_0 = np.zeros(len(data['Day']))\ndata['Day'] = pd.to_datetime(data['Day'], format='%Y-%m-%d')\nstock = Stockage(100, 10, data, X_0)\n\n\ndef test_soutirage_empty():\n if stock.sout_correction(0) == 0:\n assert True\n\n\ndef test_soutirage_full():\n if stock.sout_correction(1) == 1:\n assert True\n\n\ndef test_inj_empty():\n if stock.sout_correction(0) == 1:\n assert True\n\n\ndef test_inj_full():\n if stock.sout_correction(1) == 0:\n assert True\n",
"<import token>\npath_spot = Path(__file__).parent.parent / 'Data' / 'spot_history_HH.csv'\n\n\ndef test_vinit():\n try:\n data = pd.read_csv(path_spot)\n X_0 = np.zeros(len(data['Day']))\n data['Day'] = pd.to_datetime(data['Day'], format='%Y-%m-%d')\n stock = Stockage(100, -10, data, X_0)\n except ValueError:\n assert True\n else:\n assert False\n\n\ndef test_lim_inf():\n try:\n data = pd.read_csv(path_spot)\n X_0 = np.zeros(len(data['Day']))\n data['Day'] = pd.to_datetime(data['Day'], format='%Y-%m-%d')\n stock = Stockage(100, 10, data, X_0, comp_tunnel=False)\n stock.lim_inf\n except AttributeError:\n assert True\n\n\ndata = pd.read_csv(path_spot)\nX_0 = np.zeros(len(data['Day']))\ndata['Day'] = pd.to_datetime(data['Day'], format='%Y-%m-%d')\nstock = Stockage(100, 10, data, X_0)\n\n\ndef test_soutirage_empty():\n if stock.sout_correction(0) == 0:\n assert True\n\n\ndef test_soutirage_full():\n if stock.sout_correction(1) == 1:\n assert True\n\n\ndef test_inj_empty():\n if stock.sout_correction(0) == 1:\n assert True\n\n\ndef test_inj_full():\n if stock.sout_correction(1) == 0:\n assert True\n",
"<import token>\n<assignment token>\n\n\ndef test_vinit():\n try:\n data = pd.read_csv(path_spot)\n X_0 = np.zeros(len(data['Day']))\n data['Day'] = pd.to_datetime(data['Day'], format='%Y-%m-%d')\n stock = Stockage(100, -10, data, X_0)\n except ValueError:\n assert True\n else:\n assert False\n\n\ndef test_lim_inf():\n try:\n data = pd.read_csv(path_spot)\n X_0 = np.zeros(len(data['Day']))\n data['Day'] = pd.to_datetime(data['Day'], format='%Y-%m-%d')\n stock = Stockage(100, 10, data, X_0, comp_tunnel=False)\n stock.lim_inf\n except AttributeError:\n assert True\n\n\n<assignment token>\n\n\ndef test_soutirage_empty():\n if stock.sout_correction(0) == 0:\n assert True\n\n\ndef test_soutirage_full():\n if stock.sout_correction(1) == 1:\n assert True\n\n\ndef test_inj_empty():\n if stock.sout_correction(0) == 1:\n assert True\n\n\ndef test_inj_full():\n if stock.sout_correction(1) == 0:\n assert True\n",
"<import token>\n<assignment token>\n\n\ndef test_vinit():\n try:\n data = pd.read_csv(path_spot)\n X_0 = np.zeros(len(data['Day']))\n data['Day'] = pd.to_datetime(data['Day'], format='%Y-%m-%d')\n stock = Stockage(100, -10, data, X_0)\n except ValueError:\n assert True\n else:\n assert False\n\n\n<function token>\n<assignment token>\n\n\ndef test_soutirage_empty():\n if stock.sout_correction(0) == 0:\n assert True\n\n\ndef test_soutirage_full():\n if stock.sout_correction(1) == 1:\n assert True\n\n\ndef test_inj_empty():\n if stock.sout_correction(0) == 1:\n assert True\n\n\ndef test_inj_full():\n if stock.sout_correction(1) == 0:\n assert True\n",
"<import token>\n<assignment token>\n\n\ndef test_vinit():\n try:\n data = pd.read_csv(path_spot)\n X_0 = np.zeros(len(data['Day']))\n data['Day'] = pd.to_datetime(data['Day'], format='%Y-%m-%d')\n stock = Stockage(100, -10, data, X_0)\n except ValueError:\n assert True\n else:\n assert False\n\n\n<function token>\n<assignment token>\n\n\ndef test_soutirage_empty():\n if stock.sout_correction(0) == 0:\n assert True\n\n\ndef test_soutirage_full():\n if stock.sout_correction(1) == 1:\n assert True\n\n\ndef test_inj_empty():\n if stock.sout_correction(0) == 1:\n assert True\n\n\n<function token>\n",
"<import token>\n<assignment token>\n\n\ndef test_vinit():\n try:\n data = pd.read_csv(path_spot)\n X_0 = np.zeros(len(data['Day']))\n data['Day'] = pd.to_datetime(data['Day'], format='%Y-%m-%d')\n stock = Stockage(100, -10, data, X_0)\n except ValueError:\n assert True\n else:\n assert False\n\n\n<function token>\n<assignment token>\n\n\ndef test_soutirage_empty():\n if stock.sout_correction(0) == 0:\n assert True\n\n\n<function token>\n\n\ndef test_inj_empty():\n if stock.sout_correction(0) == 1:\n assert True\n\n\n<function token>\n",
"<import token>\n<assignment token>\n\n\ndef test_vinit():\n try:\n data = pd.read_csv(path_spot)\n X_0 = np.zeros(len(data['Day']))\n data['Day'] = pd.to_datetime(data['Day'], format='%Y-%m-%d')\n stock = Stockage(100, -10, data, X_0)\n except ValueError:\n assert True\n else:\n assert False\n\n\n<function token>\n<assignment token>\n\n\ndef test_soutirage_empty():\n if stock.sout_correction(0) == 0:\n assert True\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n\n\ndef test_vinit():\n try:\n data = pd.read_csv(path_spot)\n X_0 = np.zeros(len(data['Day']))\n data['Day'] = pd.to_datetime(data['Day'], format='%Y-%m-%d')\n stock = Stockage(100, -10, data, X_0)\n except ValueError:\n assert True\n else:\n assert False\n\n\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,655 |
1c9f2dad44ac7c244ea3a66ae3bda09faaa65940
|
from tkinter import *
import time
import os
import ctypes
from sound import Sound
from pocketsphinx import LiveSpeech, get_model_path
from threading import Thread
global start
global f
#function for decreasing volume where input is speed
def decrease(inputspeed):
#while current volume is not desired volume
#decrease volume at rate according to speed
speed = int(inputspeed)
count = 0
if speed == 0:
Sound.volume_set(10)
elif speed == 1:
while count <= 15:
#1 second reduction for 15 bars
Sound.volume_down()
time.sleep(1 / 15)
count += 1
elif speed == 3:
while count <= 15:
#3 second reduction for 15 bars
Sound.volume_down()
time.sleep(3 / 15)
count += 1
elif speed == 5:
while count <= 15:
#5 second reduction for 15 bars
Sound.volume_down()
time.sleep(5 / 15)
count += 1
def audioCheck(inputspeed):
if (inputspeed == "0" or inputspeed == "1" or inputspeed == "3" or inputspeed == "5"):
model_path = get_model_path()
speech = LiveSpeech(
verbose=False,
sampling_rate=16000,
buffer_size=2048,
no_search=False,
full_utt=False,
hmm=os.path.join(model_path, 'en-us'),
lm=os.path.join(model_path, 'en-us.lm.bin'),
dic=os.path.join(model_path, 'cmudict-en-us.dict'))
for phrase in speech:
print(phrase)
break
decrease(inputspeed)
#reset volume
def normalize():
Sound.volume_set(40)
class Application(Frame):
def __init__(self, master=None):
super().__init__(master)
self.pack()
self.create_widgets()
normalize()
#audioCheck(3)
def create_widgets(self):
#seperator
self.line1 = Label(self, text="--------------EXAMINOR---------------")
self.line1.pack()
#User File Name
self.lbl = Label(self, text="Enter File Name:")
self.lbl.pack()
self.file = Entry(self)
self.file.pack()
self.file.insert(0, "test1a.txt") #default file
self.openFile = Button(self, text="OK", command=self.fileStart)
self.openFile.pack()
#Set Speeed & Start Recording
# self.recTitle = Label(self, text="Enter a speed: 0, 1, 3, 5, none")
# self.recTitle.pack()
# self.recSpeed = Entry(self)
# self.recSpeed.pack()
# self.recSpeed.insert(0, "1")
# self.startRecording = Button(self, text="Start Recording")
# self.startRecording["command"]=lambda : self.audioCheck(self.recSpeed.get())
# self.startRecording.pack()
#input is the speed
#on submit: run recording
self.line = Label(self, text="Enter a speed: 0, 1, 3, 5, none")
self.line.pack()
#Start button initiates the experiment
self.speed = Entry(self)
self.speed.pack()
self.speed.insert(0,"0") #decrease speed default
self.start = Button(self, text="START", command=self.start)
self.start.pack(padx=5, pady= 20)
#user section
self.userline = Label(self, text="----------------USER----------------")
self.userline.pack()
#Entry field for word counts
# self.ent = Entry(self)
# self.ent.pack()
# self.submit = Button(self, text="SUBMIT", bg="yellow", command=self.enter)
# self.submit.pack()
#Colour buttons
self.blue = Button(self, text="BLUE", fg="blue", bg="blue", height=5, width=10)
self.blue["command"]=lambda colour="blue": self.colour(colour)
self.blue.pack(padx=10, pady= 20, side="left")
self.red = Button(self, text="RED", fg="red", bg="red", height=5, width=10)
self.red["command"]=lambda colour="red": self.colour(colour)
self.red.pack(padx=10, pady= 20, side="left")
self.green = Button(self, text="GREEN", fg="green", bg="green", height=5, width=10)
self.green["command"]=lambda colour="green": self.colour(colour)
self.green.pack(padx=10, pady= 20, side="left")
self.none = Button(self, text="UNSURE", fg="white", bg="black", height=5, width=10)
self.none["command"]=lambda colour="unsure": self.colour(colour)
self.none.pack(padx=10, pady= 20, side="left")
#quit
self.quit = Button(self, text="QUIT", fg="red", bg="black",command=root.destroy)
self.quit.pack(padx=20, pady= 20)
#Setup for experiment start
def start(self):
normalize()
#set Start time
global start
start = time.time()
self.line1.after(60000, self.increment)
#audio
option = self.speed.get()
global f
f.write('AUDIO REDUCTION LEVEL: ' + option + '\n')
if (option == "none"):
option = "0"
else:
audioCheck(option)
f.write('SPOKEN CUE: ' + str(time.time() - start - float(option)) + '\n')
def increment(self):
global f
f.close()
ctypes.windll.user32.MessageBoxW(0, "DONE!", "Test over", 1)
#Record time button is clicked
def colour(self, colour):
global start
t = time.time() - start
global f
f.write('COLOUR: ' + colour + '\n')
f.write('REACTION:' + str(t) + '\n')
normalize()
#Record user counts
def enter(self):
global start
global f
t = time.time() - start
#Stop the experiment at 1.5min mark
if (t > 90):
f.close()
ctypes.windll.user32.MessageBoxW(0, "DONE!", "Test over", 1)
else:
f.write(self.ent.get() + '\n')
#clear the field for next entry
self.ent.delete(0,END)
#Open the user data file for writing
def fileStart(self):
global f
f = open(self.file.get(),'w')
#setup the GUI
root = Tk()
app = Application(master=root)
root.geometry("400x400")
app.mainloop()
|
[
"from tkinter import *\nimport time\nimport os\nimport ctypes\nfrom sound import Sound\nfrom pocketsphinx import LiveSpeech, get_model_path\nfrom threading import Thread\n\nglobal start\nglobal f\n\n#function for decreasing volume where input is speed\ndef decrease(inputspeed):\n\t#while current volume is not desired volume\n\t#decrease volume at rate according to speed\n\tspeed = int(inputspeed)\n\tcount = 0\n\tif speed == 0:\n\t\tSound.volume_set(10)\n\telif speed == 1:\n\t\twhile count <= 15:\n\t\t\t#1 second reduction for 15 bars\n\t\t\tSound.volume_down()\n\t\t\ttime.sleep(1 / 15)\n\t\t\tcount += 1\n\telif speed == 3:\n\t\twhile count <= 15:\n\t\t\t#3 second reduction for 15 bars\n\t\t\tSound.volume_down()\n\t\t\ttime.sleep(3 / 15)\n\t\t\tcount += 1\n\telif speed == 5:\n\t\twhile count <= 15:\n\t\t\t#5 second reduction for 15 bars\n\t\t\tSound.volume_down()\n\t\t\ttime.sleep(5 / 15)\n\t\t\tcount += 1\n\ndef audioCheck(inputspeed):\n\tif (inputspeed == \"0\" or inputspeed == \"1\" or inputspeed == \"3\" or inputspeed == \"5\"):\n\t\tmodel_path = get_model_path()\n\t\tspeech = LiveSpeech(\n verbose=False,\n sampling_rate=16000,\n buffer_size=2048,\n no_search=False,\n full_utt=False,\n hmm=os.path.join(model_path, 'en-us'),\n lm=os.path.join(model_path, 'en-us.lm.bin'),\n dic=os.path.join(model_path, 'cmudict-en-us.dict'))\n\t\tfor phrase in speech:\n\t\t print(phrase)\n\t\t break\n\t\tdecrease(inputspeed)\n\n#reset volume \ndef normalize():\n\tSound.volume_set(40)\n\n\n\nclass Application(Frame):\n \n def __init__(self, master=None):\n super().__init__(master)\n self.pack()\n self.create_widgets()\n normalize()\n #audioCheck(3)\n\n def create_widgets(self):\n #seperator\n self.line1 = Label(self, text=\"--------------EXAMINOR---------------\")\n self.line1.pack()\n\n #User File Name\n self.lbl = Label(self, text=\"Enter File Name:\")\n self.lbl.pack()\n self.file = Entry(self)\n self.file.pack()\n self.file.insert(0, \"test1a.txt\") #default file\n self.openFile = Button(self, text=\"OK\", command=self.fileStart)\n self.openFile.pack()\n\n #Set Speeed & Start Recording\n # self.recTitle = Label(self, text=\"Enter a speed: 0, 1, 3, 5, none\")\n # self.recTitle.pack() \n # self.recSpeed = Entry(self)\n # self.recSpeed.pack()\n # self.recSpeed.insert(0, \"1\")\n # self.startRecording = Button(self, text=\"Start Recording\")\n # self.startRecording[\"command\"]=lambda : self.audioCheck(self.recSpeed.get())\n # self.startRecording.pack()\n\n #input is the speed\n #on submit: run recording\n\n\n \n self.line = Label(self, text=\"Enter a speed: 0, 1, 3, 5, none\")\n self.line.pack()\n\n #Start button initiates the experiment\n self.speed = Entry(self)\n self.speed.pack()\n self.speed.insert(0,\"0\") #decrease speed default\n \n self.start = Button(self, text=\"START\", command=self.start)\n self.start.pack(padx=5, pady= 20)\n\n #user section\n self.userline = Label(self, text=\"----------------USER----------------\")\n self.userline.pack()\n\n #Entry field for word counts\n # self.ent = Entry(self)\n # self.ent.pack()\n # self.submit = Button(self, text=\"SUBMIT\", bg=\"yellow\", command=self.enter)\n # self.submit.pack()\n\n #Colour buttons\n self.blue = Button(self, text=\"BLUE\", fg=\"blue\", bg=\"blue\", height=5, width=10)\n self.blue[\"command\"]=lambda colour=\"blue\": self.colour(colour)\n self.blue.pack(padx=10, pady= 20, side=\"left\")\n self.red = Button(self, text=\"RED\", fg=\"red\", bg=\"red\", height=5, width=10)\n self.red[\"command\"]=lambda colour=\"red\": self.colour(colour)\n self.red.pack(padx=10, pady= 20, side=\"left\")\n self.green = Button(self, text=\"GREEN\", fg=\"green\", bg=\"green\", height=5, width=10)\n self.green[\"command\"]=lambda colour=\"green\": self.colour(colour)\n self.green.pack(padx=10, pady= 20, side=\"left\")\n self.none = Button(self, text=\"UNSURE\", fg=\"white\", bg=\"black\", height=5, width=10)\n self.none[\"command\"]=lambda colour=\"unsure\": self.colour(colour)\n self.none.pack(padx=10, pady= 20, side=\"left\")\n\n #quit\n self.quit = Button(self, text=\"QUIT\", fg=\"red\", bg=\"black\",command=root.destroy)\n self.quit.pack(padx=20, pady= 20)\n\n #Setup for experiment start\n def start(self):\n normalize()\n #set Start time\n global start\n start = time.time()\n self.line1.after(60000, self.increment)\n\n\n #audio\n option = self.speed.get()\n global f\n f.write('AUDIO REDUCTION LEVEL: ' + option + '\\n')\n if (option == \"none\"):\n \toption = \"0\"\n else:\n \taudioCheck(option)\n f.write('SPOKEN CUE: ' + str(time.time() - start - float(option)) + '\\n')\n \n def increment(self):\n\n global f\n f.close()\n ctypes.windll.user32.MessageBoxW(0, \"DONE!\", \"Test over\", 1)\n \n\n #Record time button is clicked\n def colour(self, colour):\n global start\n t = time.time() - start\n global f\n f.write('COLOUR: ' + colour + '\\n')\n f.write('REACTION:' + str(t) + '\\n')\n normalize()\n\n #Record user counts\n def enter(self):\n global start\n global f\n t = time.time() - start\n\n #Stop the experiment at 1.5min mark\n if (t > 90):\n f.close()\n ctypes.windll.user32.MessageBoxW(0, \"DONE!\", \"Test over\", 1)\n else:\n f.write(self.ent.get() + '\\n')\n #clear the field for next entry\n self.ent.delete(0,END)\n\n #Open the user data file for writing\n def fileStart(self):\n global f\n f = open(self.file.get(),'w')\n\n#setup the GUI\n\nroot = Tk()\napp = Application(master=root)\nroot.geometry(\"400x400\")\napp.mainloop()\n",
"from tkinter import *\nimport time\nimport os\nimport ctypes\nfrom sound import Sound\nfrom pocketsphinx import LiveSpeech, get_model_path\nfrom threading import Thread\nglobal start\nglobal f\n\n\ndef decrease(inputspeed):\n speed = int(inputspeed)\n count = 0\n if speed == 0:\n Sound.volume_set(10)\n elif speed == 1:\n while count <= 15:\n Sound.volume_down()\n time.sleep(1 / 15)\n count += 1\n elif speed == 3:\n while count <= 15:\n Sound.volume_down()\n time.sleep(3 / 15)\n count += 1\n elif speed == 5:\n while count <= 15:\n Sound.volume_down()\n time.sleep(5 / 15)\n count += 1\n\n\ndef audioCheck(inputspeed):\n if (inputspeed == '0' or inputspeed == '1' or inputspeed == '3' or \n inputspeed == '5'):\n model_path = get_model_path()\n speech = LiveSpeech(verbose=False, sampling_rate=16000, buffer_size\n =2048, no_search=False, full_utt=False, hmm=os.path.join(\n model_path, 'en-us'), lm=os.path.join(model_path,\n 'en-us.lm.bin'), dic=os.path.join(model_path, 'cmudict-en-us.dict')\n )\n for phrase in speech:\n print(phrase)\n break\n decrease(inputspeed)\n\n\ndef normalize():\n Sound.volume_set(40)\n\n\nclass Application(Frame):\n\n def __init__(self, master=None):\n super().__init__(master)\n self.pack()\n self.create_widgets()\n normalize()\n\n def create_widgets(self):\n self.line1 = Label(self, text='--------------EXAMINOR---------------')\n self.line1.pack()\n self.lbl = Label(self, text='Enter File Name:')\n self.lbl.pack()\n self.file = Entry(self)\n self.file.pack()\n self.file.insert(0, 'test1a.txt')\n self.openFile = Button(self, text='OK', command=self.fileStart)\n self.openFile.pack()\n self.line = Label(self, text='Enter a speed: 0, 1, 3, 5, none')\n self.line.pack()\n self.speed = Entry(self)\n self.speed.pack()\n self.speed.insert(0, '0')\n self.start = Button(self, text='START', command=self.start)\n self.start.pack(padx=5, pady=20)\n self.userline = Label(self, text='----------------USER----------------'\n )\n self.userline.pack()\n self.blue = Button(self, text='BLUE', fg='blue', bg='blue', height=\n 5, width=10)\n self.blue['command'] = lambda colour='blue': self.colour(colour)\n self.blue.pack(padx=10, pady=20, side='left')\n self.red = Button(self, text='RED', fg='red', bg='red', height=5,\n width=10)\n self.red['command'] = lambda colour='red': self.colour(colour)\n self.red.pack(padx=10, pady=20, side='left')\n self.green = Button(self, text='GREEN', fg='green', bg='green',\n height=5, width=10)\n self.green['command'] = lambda colour='green': self.colour(colour)\n self.green.pack(padx=10, pady=20, side='left')\n self.none = Button(self, text='UNSURE', fg='white', bg='black',\n height=5, width=10)\n self.none['command'] = lambda colour='unsure': self.colour(colour)\n self.none.pack(padx=10, pady=20, side='left')\n self.quit = Button(self, text='QUIT', fg='red', bg='black', command\n =root.destroy)\n self.quit.pack(padx=20, pady=20)\n\n def start(self):\n normalize()\n global start\n start = time.time()\n self.line1.after(60000, self.increment)\n option = self.speed.get()\n global f\n f.write('AUDIO REDUCTION LEVEL: ' + option + '\\n')\n if option == 'none':\n option = '0'\n else:\n audioCheck(option)\n f.write('SPOKEN CUE: ' + str(time.time() - start - float(option)) +\n '\\n')\n\n def increment(self):\n global f\n f.close()\n ctypes.windll.user32.MessageBoxW(0, 'DONE!', 'Test over', 1)\n\n def colour(self, colour):\n global start\n t = time.time() - start\n global f\n f.write('COLOUR: ' + colour + '\\n')\n f.write('REACTION:' + str(t) + '\\n')\n normalize()\n\n def enter(self):\n global start\n global f\n t = time.time() - start\n if t > 90:\n f.close()\n ctypes.windll.user32.MessageBoxW(0, 'DONE!', 'Test over', 1)\n else:\n f.write(self.ent.get() + '\\n')\n self.ent.delete(0, END)\n\n def fileStart(self):\n global f\n f = open(self.file.get(), 'w')\n\n\nroot = Tk()\napp = Application(master=root)\nroot.geometry('400x400')\napp.mainloop()\n",
"<import token>\nglobal start\nglobal f\n\n\ndef decrease(inputspeed):\n speed = int(inputspeed)\n count = 0\n if speed == 0:\n Sound.volume_set(10)\n elif speed == 1:\n while count <= 15:\n Sound.volume_down()\n time.sleep(1 / 15)\n count += 1\n elif speed == 3:\n while count <= 15:\n Sound.volume_down()\n time.sleep(3 / 15)\n count += 1\n elif speed == 5:\n while count <= 15:\n Sound.volume_down()\n time.sleep(5 / 15)\n count += 1\n\n\ndef audioCheck(inputspeed):\n if (inputspeed == '0' or inputspeed == '1' or inputspeed == '3' or \n inputspeed == '5'):\n model_path = get_model_path()\n speech = LiveSpeech(verbose=False, sampling_rate=16000, buffer_size\n =2048, no_search=False, full_utt=False, hmm=os.path.join(\n model_path, 'en-us'), lm=os.path.join(model_path,\n 'en-us.lm.bin'), dic=os.path.join(model_path, 'cmudict-en-us.dict')\n )\n for phrase in speech:\n print(phrase)\n break\n decrease(inputspeed)\n\n\ndef normalize():\n Sound.volume_set(40)\n\n\nclass Application(Frame):\n\n def __init__(self, master=None):\n super().__init__(master)\n self.pack()\n self.create_widgets()\n normalize()\n\n def create_widgets(self):\n self.line1 = Label(self, text='--------------EXAMINOR---------------')\n self.line1.pack()\n self.lbl = Label(self, text='Enter File Name:')\n self.lbl.pack()\n self.file = Entry(self)\n self.file.pack()\n self.file.insert(0, 'test1a.txt')\n self.openFile = Button(self, text='OK', command=self.fileStart)\n self.openFile.pack()\n self.line = Label(self, text='Enter a speed: 0, 1, 3, 5, none')\n self.line.pack()\n self.speed = Entry(self)\n self.speed.pack()\n self.speed.insert(0, '0')\n self.start = Button(self, text='START', command=self.start)\n self.start.pack(padx=5, pady=20)\n self.userline = Label(self, text='----------------USER----------------'\n )\n self.userline.pack()\n self.blue = Button(self, text='BLUE', fg='blue', bg='blue', height=\n 5, width=10)\n self.blue['command'] = lambda colour='blue': self.colour(colour)\n self.blue.pack(padx=10, pady=20, side='left')\n self.red = Button(self, text='RED', fg='red', bg='red', height=5,\n width=10)\n self.red['command'] = lambda colour='red': self.colour(colour)\n self.red.pack(padx=10, pady=20, side='left')\n self.green = Button(self, text='GREEN', fg='green', bg='green',\n height=5, width=10)\n self.green['command'] = lambda colour='green': self.colour(colour)\n self.green.pack(padx=10, pady=20, side='left')\n self.none = Button(self, text='UNSURE', fg='white', bg='black',\n height=5, width=10)\n self.none['command'] = lambda colour='unsure': self.colour(colour)\n self.none.pack(padx=10, pady=20, side='left')\n self.quit = Button(self, text='QUIT', fg='red', bg='black', command\n =root.destroy)\n self.quit.pack(padx=20, pady=20)\n\n def start(self):\n normalize()\n global start\n start = time.time()\n self.line1.after(60000, self.increment)\n option = self.speed.get()\n global f\n f.write('AUDIO REDUCTION LEVEL: ' + option + '\\n')\n if option == 'none':\n option = '0'\n else:\n audioCheck(option)\n f.write('SPOKEN CUE: ' + str(time.time() - start - float(option)) +\n '\\n')\n\n def increment(self):\n global f\n f.close()\n ctypes.windll.user32.MessageBoxW(0, 'DONE!', 'Test over', 1)\n\n def colour(self, colour):\n global start\n t = time.time() - start\n global f\n f.write('COLOUR: ' + colour + '\\n')\n f.write('REACTION:' + str(t) + '\\n')\n normalize()\n\n def enter(self):\n global start\n global f\n t = time.time() - start\n if t > 90:\n f.close()\n ctypes.windll.user32.MessageBoxW(0, 'DONE!', 'Test over', 1)\n else:\n f.write(self.ent.get() + '\\n')\n self.ent.delete(0, END)\n\n def fileStart(self):\n global f\n f = open(self.file.get(), 'w')\n\n\nroot = Tk()\napp = Application(master=root)\nroot.geometry('400x400')\napp.mainloop()\n",
"<import token>\nglobal start\nglobal f\n\n\ndef decrease(inputspeed):\n speed = int(inputspeed)\n count = 0\n if speed == 0:\n Sound.volume_set(10)\n elif speed == 1:\n while count <= 15:\n Sound.volume_down()\n time.sleep(1 / 15)\n count += 1\n elif speed == 3:\n while count <= 15:\n Sound.volume_down()\n time.sleep(3 / 15)\n count += 1\n elif speed == 5:\n while count <= 15:\n Sound.volume_down()\n time.sleep(5 / 15)\n count += 1\n\n\ndef audioCheck(inputspeed):\n if (inputspeed == '0' or inputspeed == '1' or inputspeed == '3' or \n inputspeed == '5'):\n model_path = get_model_path()\n speech = LiveSpeech(verbose=False, sampling_rate=16000, buffer_size\n =2048, no_search=False, full_utt=False, hmm=os.path.join(\n model_path, 'en-us'), lm=os.path.join(model_path,\n 'en-us.lm.bin'), dic=os.path.join(model_path, 'cmudict-en-us.dict')\n )\n for phrase in speech:\n print(phrase)\n break\n decrease(inputspeed)\n\n\ndef normalize():\n Sound.volume_set(40)\n\n\nclass Application(Frame):\n\n def __init__(self, master=None):\n super().__init__(master)\n self.pack()\n self.create_widgets()\n normalize()\n\n def create_widgets(self):\n self.line1 = Label(self, text='--------------EXAMINOR---------------')\n self.line1.pack()\n self.lbl = Label(self, text='Enter File Name:')\n self.lbl.pack()\n self.file = Entry(self)\n self.file.pack()\n self.file.insert(0, 'test1a.txt')\n self.openFile = Button(self, text='OK', command=self.fileStart)\n self.openFile.pack()\n self.line = Label(self, text='Enter a speed: 0, 1, 3, 5, none')\n self.line.pack()\n self.speed = Entry(self)\n self.speed.pack()\n self.speed.insert(0, '0')\n self.start = Button(self, text='START', command=self.start)\n self.start.pack(padx=5, pady=20)\n self.userline = Label(self, text='----------------USER----------------'\n )\n self.userline.pack()\n self.blue = Button(self, text='BLUE', fg='blue', bg='blue', height=\n 5, width=10)\n self.blue['command'] = lambda colour='blue': self.colour(colour)\n self.blue.pack(padx=10, pady=20, side='left')\n self.red = Button(self, text='RED', fg='red', bg='red', height=5,\n width=10)\n self.red['command'] = lambda colour='red': self.colour(colour)\n self.red.pack(padx=10, pady=20, side='left')\n self.green = Button(self, text='GREEN', fg='green', bg='green',\n height=5, width=10)\n self.green['command'] = lambda colour='green': self.colour(colour)\n self.green.pack(padx=10, pady=20, side='left')\n self.none = Button(self, text='UNSURE', fg='white', bg='black',\n height=5, width=10)\n self.none['command'] = lambda colour='unsure': self.colour(colour)\n self.none.pack(padx=10, pady=20, side='left')\n self.quit = Button(self, text='QUIT', fg='red', bg='black', command\n =root.destroy)\n self.quit.pack(padx=20, pady=20)\n\n def start(self):\n normalize()\n global start\n start = time.time()\n self.line1.after(60000, self.increment)\n option = self.speed.get()\n global f\n f.write('AUDIO REDUCTION LEVEL: ' + option + '\\n')\n if option == 'none':\n option = '0'\n else:\n audioCheck(option)\n f.write('SPOKEN CUE: ' + str(time.time() - start - float(option)) +\n '\\n')\n\n def increment(self):\n global f\n f.close()\n ctypes.windll.user32.MessageBoxW(0, 'DONE!', 'Test over', 1)\n\n def colour(self, colour):\n global start\n t = time.time() - start\n global f\n f.write('COLOUR: ' + colour + '\\n')\n f.write('REACTION:' + str(t) + '\\n')\n normalize()\n\n def enter(self):\n global start\n global f\n t = time.time() - start\n if t > 90:\n f.close()\n ctypes.windll.user32.MessageBoxW(0, 'DONE!', 'Test over', 1)\n else:\n f.write(self.ent.get() + '\\n')\n self.ent.delete(0, END)\n\n def fileStart(self):\n global f\n f = open(self.file.get(), 'w')\n\n\n<assignment token>\nroot.geometry('400x400')\napp.mainloop()\n",
"<import token>\n<code token>\n\n\ndef decrease(inputspeed):\n speed = int(inputspeed)\n count = 0\n if speed == 0:\n Sound.volume_set(10)\n elif speed == 1:\n while count <= 15:\n Sound.volume_down()\n time.sleep(1 / 15)\n count += 1\n elif speed == 3:\n while count <= 15:\n Sound.volume_down()\n time.sleep(3 / 15)\n count += 1\n elif speed == 5:\n while count <= 15:\n Sound.volume_down()\n time.sleep(5 / 15)\n count += 1\n\n\ndef audioCheck(inputspeed):\n if (inputspeed == '0' or inputspeed == '1' or inputspeed == '3' or \n inputspeed == '5'):\n model_path = get_model_path()\n speech = LiveSpeech(verbose=False, sampling_rate=16000, buffer_size\n =2048, no_search=False, full_utt=False, hmm=os.path.join(\n model_path, 'en-us'), lm=os.path.join(model_path,\n 'en-us.lm.bin'), dic=os.path.join(model_path, 'cmudict-en-us.dict')\n )\n for phrase in speech:\n print(phrase)\n break\n decrease(inputspeed)\n\n\ndef normalize():\n Sound.volume_set(40)\n\n\nclass Application(Frame):\n\n def __init__(self, master=None):\n super().__init__(master)\n self.pack()\n self.create_widgets()\n normalize()\n\n def create_widgets(self):\n self.line1 = Label(self, text='--------------EXAMINOR---------------')\n self.line1.pack()\n self.lbl = Label(self, text='Enter File Name:')\n self.lbl.pack()\n self.file = Entry(self)\n self.file.pack()\n self.file.insert(0, 'test1a.txt')\n self.openFile = Button(self, text='OK', command=self.fileStart)\n self.openFile.pack()\n self.line = Label(self, text='Enter a speed: 0, 1, 3, 5, none')\n self.line.pack()\n self.speed = Entry(self)\n self.speed.pack()\n self.speed.insert(0, '0')\n self.start = Button(self, text='START', command=self.start)\n self.start.pack(padx=5, pady=20)\n self.userline = Label(self, text='----------------USER----------------'\n )\n self.userline.pack()\n self.blue = Button(self, text='BLUE', fg='blue', bg='blue', height=\n 5, width=10)\n self.blue['command'] = lambda colour='blue': self.colour(colour)\n self.blue.pack(padx=10, pady=20, side='left')\n self.red = Button(self, text='RED', fg='red', bg='red', height=5,\n width=10)\n self.red['command'] = lambda colour='red': self.colour(colour)\n self.red.pack(padx=10, pady=20, side='left')\n self.green = Button(self, text='GREEN', fg='green', bg='green',\n height=5, width=10)\n self.green['command'] = lambda colour='green': self.colour(colour)\n self.green.pack(padx=10, pady=20, side='left')\n self.none = Button(self, text='UNSURE', fg='white', bg='black',\n height=5, width=10)\n self.none['command'] = lambda colour='unsure': self.colour(colour)\n self.none.pack(padx=10, pady=20, side='left')\n self.quit = Button(self, text='QUIT', fg='red', bg='black', command\n =root.destroy)\n self.quit.pack(padx=20, pady=20)\n\n def start(self):\n normalize()\n global start\n start = time.time()\n self.line1.after(60000, self.increment)\n option = self.speed.get()\n global f\n f.write('AUDIO REDUCTION LEVEL: ' + option + '\\n')\n if option == 'none':\n option = '0'\n else:\n audioCheck(option)\n f.write('SPOKEN CUE: ' + str(time.time() - start - float(option)) +\n '\\n')\n\n def increment(self):\n global f\n f.close()\n ctypes.windll.user32.MessageBoxW(0, 'DONE!', 'Test over', 1)\n\n def colour(self, colour):\n global start\n t = time.time() - start\n global f\n f.write('COLOUR: ' + colour + '\\n')\n f.write('REACTION:' + str(t) + '\\n')\n normalize()\n\n def enter(self):\n global start\n global f\n t = time.time() - start\n if t > 90:\n f.close()\n ctypes.windll.user32.MessageBoxW(0, 'DONE!', 'Test over', 1)\n else:\n f.write(self.ent.get() + '\\n')\n self.ent.delete(0, END)\n\n def fileStart(self):\n global f\n f = open(self.file.get(), 'w')\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n\n\ndef decrease(inputspeed):\n speed = int(inputspeed)\n count = 0\n if speed == 0:\n Sound.volume_set(10)\n elif speed == 1:\n while count <= 15:\n Sound.volume_down()\n time.sleep(1 / 15)\n count += 1\n elif speed == 3:\n while count <= 15:\n Sound.volume_down()\n time.sleep(3 / 15)\n count += 1\n elif speed == 5:\n while count <= 15:\n Sound.volume_down()\n time.sleep(5 / 15)\n count += 1\n\n\n<function token>\n\n\ndef normalize():\n Sound.volume_set(40)\n\n\nclass Application(Frame):\n\n def __init__(self, master=None):\n super().__init__(master)\n self.pack()\n self.create_widgets()\n normalize()\n\n def create_widgets(self):\n self.line1 = Label(self, text='--------------EXAMINOR---------------')\n self.line1.pack()\n self.lbl = Label(self, text='Enter File Name:')\n self.lbl.pack()\n self.file = Entry(self)\n self.file.pack()\n self.file.insert(0, 'test1a.txt')\n self.openFile = Button(self, text='OK', command=self.fileStart)\n self.openFile.pack()\n self.line = Label(self, text='Enter a speed: 0, 1, 3, 5, none')\n self.line.pack()\n self.speed = Entry(self)\n self.speed.pack()\n self.speed.insert(0, '0')\n self.start = Button(self, text='START', command=self.start)\n self.start.pack(padx=5, pady=20)\n self.userline = Label(self, text='----------------USER----------------'\n )\n self.userline.pack()\n self.blue = Button(self, text='BLUE', fg='blue', bg='blue', height=\n 5, width=10)\n self.blue['command'] = lambda colour='blue': self.colour(colour)\n self.blue.pack(padx=10, pady=20, side='left')\n self.red = Button(self, text='RED', fg='red', bg='red', height=5,\n width=10)\n self.red['command'] = lambda colour='red': self.colour(colour)\n self.red.pack(padx=10, pady=20, side='left')\n self.green = Button(self, text='GREEN', fg='green', bg='green',\n height=5, width=10)\n self.green['command'] = lambda colour='green': self.colour(colour)\n self.green.pack(padx=10, pady=20, side='left')\n self.none = Button(self, text='UNSURE', fg='white', bg='black',\n height=5, width=10)\n self.none['command'] = lambda colour='unsure': self.colour(colour)\n self.none.pack(padx=10, pady=20, side='left')\n self.quit = Button(self, text='QUIT', fg='red', bg='black', command\n =root.destroy)\n self.quit.pack(padx=20, pady=20)\n\n def start(self):\n normalize()\n global start\n start = time.time()\n self.line1.after(60000, self.increment)\n option = self.speed.get()\n global f\n f.write('AUDIO REDUCTION LEVEL: ' + option + '\\n')\n if option == 'none':\n option = '0'\n else:\n audioCheck(option)\n f.write('SPOKEN CUE: ' + str(time.time() - start - float(option)) +\n '\\n')\n\n def increment(self):\n global f\n f.close()\n ctypes.windll.user32.MessageBoxW(0, 'DONE!', 'Test over', 1)\n\n def colour(self, colour):\n global start\n t = time.time() - start\n global f\n f.write('COLOUR: ' + colour + '\\n')\n f.write('REACTION:' + str(t) + '\\n')\n normalize()\n\n def enter(self):\n global start\n global f\n t = time.time() - start\n if t > 90:\n f.close()\n ctypes.windll.user32.MessageBoxW(0, 'DONE!', 'Test over', 1)\n else:\n f.write(self.ent.get() + '\\n')\n self.ent.delete(0, END)\n\n def fileStart(self):\n global f\n f = open(self.file.get(), 'w')\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n\n\ndef decrease(inputspeed):\n speed = int(inputspeed)\n count = 0\n if speed == 0:\n Sound.volume_set(10)\n elif speed == 1:\n while count <= 15:\n Sound.volume_down()\n time.sleep(1 / 15)\n count += 1\n elif speed == 3:\n while count <= 15:\n Sound.volume_down()\n time.sleep(3 / 15)\n count += 1\n elif speed == 5:\n while count <= 15:\n Sound.volume_down()\n time.sleep(5 / 15)\n count += 1\n\n\n<function token>\n<function token>\n\n\nclass Application(Frame):\n\n def __init__(self, master=None):\n super().__init__(master)\n self.pack()\n self.create_widgets()\n normalize()\n\n def create_widgets(self):\n self.line1 = Label(self, text='--------------EXAMINOR---------------')\n self.line1.pack()\n self.lbl = Label(self, text='Enter File Name:')\n self.lbl.pack()\n self.file = Entry(self)\n self.file.pack()\n self.file.insert(0, 'test1a.txt')\n self.openFile = Button(self, text='OK', command=self.fileStart)\n self.openFile.pack()\n self.line = Label(self, text='Enter a speed: 0, 1, 3, 5, none')\n self.line.pack()\n self.speed = Entry(self)\n self.speed.pack()\n self.speed.insert(0, '0')\n self.start = Button(self, text='START', command=self.start)\n self.start.pack(padx=5, pady=20)\n self.userline = Label(self, text='----------------USER----------------'\n )\n self.userline.pack()\n self.blue = Button(self, text='BLUE', fg='blue', bg='blue', height=\n 5, width=10)\n self.blue['command'] = lambda colour='blue': self.colour(colour)\n self.blue.pack(padx=10, pady=20, side='left')\n self.red = Button(self, text='RED', fg='red', bg='red', height=5,\n width=10)\n self.red['command'] = lambda colour='red': self.colour(colour)\n self.red.pack(padx=10, pady=20, side='left')\n self.green = Button(self, text='GREEN', fg='green', bg='green',\n height=5, width=10)\n self.green['command'] = lambda colour='green': self.colour(colour)\n self.green.pack(padx=10, pady=20, side='left')\n self.none = Button(self, text='UNSURE', fg='white', bg='black',\n height=5, width=10)\n self.none['command'] = lambda colour='unsure': self.colour(colour)\n self.none.pack(padx=10, pady=20, side='left')\n self.quit = Button(self, text='QUIT', fg='red', bg='black', command\n =root.destroy)\n self.quit.pack(padx=20, pady=20)\n\n def start(self):\n normalize()\n global start\n start = time.time()\n self.line1.after(60000, self.increment)\n option = self.speed.get()\n global f\n f.write('AUDIO REDUCTION LEVEL: ' + option + '\\n')\n if option == 'none':\n option = '0'\n else:\n audioCheck(option)\n f.write('SPOKEN CUE: ' + str(time.time() - start - float(option)) +\n '\\n')\n\n def increment(self):\n global f\n f.close()\n ctypes.windll.user32.MessageBoxW(0, 'DONE!', 'Test over', 1)\n\n def colour(self, colour):\n global start\n t = time.time() - start\n global f\n f.write('COLOUR: ' + colour + '\\n')\n f.write('REACTION:' + str(t) + '\\n')\n normalize()\n\n def enter(self):\n global start\n global f\n t = time.time() - start\n if t > 90:\n f.close()\n ctypes.windll.user32.MessageBoxW(0, 'DONE!', 'Test over', 1)\n else:\n f.write(self.ent.get() + '\\n')\n self.ent.delete(0, END)\n\n def fileStart(self):\n global f\n f = open(self.file.get(), 'w')\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n\n\nclass Application(Frame):\n\n def __init__(self, master=None):\n super().__init__(master)\n self.pack()\n self.create_widgets()\n normalize()\n\n def create_widgets(self):\n self.line1 = Label(self, text='--------------EXAMINOR---------------')\n self.line1.pack()\n self.lbl = Label(self, text='Enter File Name:')\n self.lbl.pack()\n self.file = Entry(self)\n self.file.pack()\n self.file.insert(0, 'test1a.txt')\n self.openFile = Button(self, text='OK', command=self.fileStart)\n self.openFile.pack()\n self.line = Label(self, text='Enter a speed: 0, 1, 3, 5, none')\n self.line.pack()\n self.speed = Entry(self)\n self.speed.pack()\n self.speed.insert(0, '0')\n self.start = Button(self, text='START', command=self.start)\n self.start.pack(padx=5, pady=20)\n self.userline = Label(self, text='----------------USER----------------'\n )\n self.userline.pack()\n self.blue = Button(self, text='BLUE', fg='blue', bg='blue', height=\n 5, width=10)\n self.blue['command'] = lambda colour='blue': self.colour(colour)\n self.blue.pack(padx=10, pady=20, side='left')\n self.red = Button(self, text='RED', fg='red', bg='red', height=5,\n width=10)\n self.red['command'] = lambda colour='red': self.colour(colour)\n self.red.pack(padx=10, pady=20, side='left')\n self.green = Button(self, text='GREEN', fg='green', bg='green',\n height=5, width=10)\n self.green['command'] = lambda colour='green': self.colour(colour)\n self.green.pack(padx=10, pady=20, side='left')\n self.none = Button(self, text='UNSURE', fg='white', bg='black',\n height=5, width=10)\n self.none['command'] = lambda colour='unsure': self.colour(colour)\n self.none.pack(padx=10, pady=20, side='left')\n self.quit = Button(self, text='QUIT', fg='red', bg='black', command\n =root.destroy)\n self.quit.pack(padx=20, pady=20)\n\n def start(self):\n normalize()\n global start\n start = time.time()\n self.line1.after(60000, self.increment)\n option = self.speed.get()\n global f\n f.write('AUDIO REDUCTION LEVEL: ' + option + '\\n')\n if option == 'none':\n option = '0'\n else:\n audioCheck(option)\n f.write('SPOKEN CUE: ' + str(time.time() - start - float(option)) +\n '\\n')\n\n def increment(self):\n global f\n f.close()\n ctypes.windll.user32.MessageBoxW(0, 'DONE!', 'Test over', 1)\n\n def colour(self, colour):\n global start\n t = time.time() - start\n global f\n f.write('COLOUR: ' + colour + '\\n')\n f.write('REACTION:' + str(t) + '\\n')\n normalize()\n\n def enter(self):\n global start\n global f\n t = time.time() - start\n if t > 90:\n f.close()\n ctypes.windll.user32.MessageBoxW(0, 'DONE!', 'Test over', 1)\n else:\n f.write(self.ent.get() + '\\n')\n self.ent.delete(0, END)\n\n def fileStart(self):\n global f\n f = open(self.file.get(), 'w')\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n\n\nclass Application(Frame):\n <function token>\n\n def create_widgets(self):\n self.line1 = Label(self, text='--------------EXAMINOR---------------')\n self.line1.pack()\n self.lbl = Label(self, text='Enter File Name:')\n self.lbl.pack()\n self.file = Entry(self)\n self.file.pack()\n self.file.insert(0, 'test1a.txt')\n self.openFile = Button(self, text='OK', command=self.fileStart)\n self.openFile.pack()\n self.line = Label(self, text='Enter a speed: 0, 1, 3, 5, none')\n self.line.pack()\n self.speed = Entry(self)\n self.speed.pack()\n self.speed.insert(0, '0')\n self.start = Button(self, text='START', command=self.start)\n self.start.pack(padx=5, pady=20)\n self.userline = Label(self, text='----------------USER----------------'\n )\n self.userline.pack()\n self.blue = Button(self, text='BLUE', fg='blue', bg='blue', height=\n 5, width=10)\n self.blue['command'] = lambda colour='blue': self.colour(colour)\n self.blue.pack(padx=10, pady=20, side='left')\n self.red = Button(self, text='RED', fg='red', bg='red', height=5,\n width=10)\n self.red['command'] = lambda colour='red': self.colour(colour)\n self.red.pack(padx=10, pady=20, side='left')\n self.green = Button(self, text='GREEN', fg='green', bg='green',\n height=5, width=10)\n self.green['command'] = lambda colour='green': self.colour(colour)\n self.green.pack(padx=10, pady=20, side='left')\n self.none = Button(self, text='UNSURE', fg='white', bg='black',\n height=5, width=10)\n self.none['command'] = lambda colour='unsure': self.colour(colour)\n self.none.pack(padx=10, pady=20, side='left')\n self.quit = Button(self, text='QUIT', fg='red', bg='black', command\n =root.destroy)\n self.quit.pack(padx=20, pady=20)\n\n def start(self):\n normalize()\n global start\n start = time.time()\n self.line1.after(60000, self.increment)\n option = self.speed.get()\n global f\n f.write('AUDIO REDUCTION LEVEL: ' + option + '\\n')\n if option == 'none':\n option = '0'\n else:\n audioCheck(option)\n f.write('SPOKEN CUE: ' + str(time.time() - start - float(option)) +\n '\\n')\n\n def increment(self):\n global f\n f.close()\n ctypes.windll.user32.MessageBoxW(0, 'DONE!', 'Test over', 1)\n\n def colour(self, colour):\n global start\n t = time.time() - start\n global f\n f.write('COLOUR: ' + colour + '\\n')\n f.write('REACTION:' + str(t) + '\\n')\n normalize()\n\n def enter(self):\n global start\n global f\n t = time.time() - start\n if t > 90:\n f.close()\n ctypes.windll.user32.MessageBoxW(0, 'DONE!', 'Test over', 1)\n else:\n f.write(self.ent.get() + '\\n')\n self.ent.delete(0, END)\n\n def fileStart(self):\n global f\n f = open(self.file.get(), 'w')\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n\n\nclass Application(Frame):\n <function token>\n\n def create_widgets(self):\n self.line1 = Label(self, text='--------------EXAMINOR---------------')\n self.line1.pack()\n self.lbl = Label(self, text='Enter File Name:')\n self.lbl.pack()\n self.file = Entry(self)\n self.file.pack()\n self.file.insert(0, 'test1a.txt')\n self.openFile = Button(self, text='OK', command=self.fileStart)\n self.openFile.pack()\n self.line = Label(self, text='Enter a speed: 0, 1, 3, 5, none')\n self.line.pack()\n self.speed = Entry(self)\n self.speed.pack()\n self.speed.insert(0, '0')\n self.start = Button(self, text='START', command=self.start)\n self.start.pack(padx=5, pady=20)\n self.userline = Label(self, text='----------------USER----------------'\n )\n self.userline.pack()\n self.blue = Button(self, text='BLUE', fg='blue', bg='blue', height=\n 5, width=10)\n self.blue['command'] = lambda colour='blue': self.colour(colour)\n self.blue.pack(padx=10, pady=20, side='left')\n self.red = Button(self, text='RED', fg='red', bg='red', height=5,\n width=10)\n self.red['command'] = lambda colour='red': self.colour(colour)\n self.red.pack(padx=10, pady=20, side='left')\n self.green = Button(self, text='GREEN', fg='green', bg='green',\n height=5, width=10)\n self.green['command'] = lambda colour='green': self.colour(colour)\n self.green.pack(padx=10, pady=20, side='left')\n self.none = Button(self, text='UNSURE', fg='white', bg='black',\n height=5, width=10)\n self.none['command'] = lambda colour='unsure': self.colour(colour)\n self.none.pack(padx=10, pady=20, side='left')\n self.quit = Button(self, text='QUIT', fg='red', bg='black', command\n =root.destroy)\n self.quit.pack(padx=20, pady=20)\n\n def start(self):\n normalize()\n global start\n start = time.time()\n self.line1.after(60000, self.increment)\n option = self.speed.get()\n global f\n f.write('AUDIO REDUCTION LEVEL: ' + option + '\\n')\n if option == 'none':\n option = '0'\n else:\n audioCheck(option)\n f.write('SPOKEN CUE: ' + str(time.time() - start - float(option)) +\n '\\n')\n\n def increment(self):\n global f\n f.close()\n ctypes.windll.user32.MessageBoxW(0, 'DONE!', 'Test over', 1)\n\n def colour(self, colour):\n global start\n t = time.time() - start\n global f\n f.write('COLOUR: ' + colour + '\\n')\n f.write('REACTION:' + str(t) + '\\n')\n normalize()\n\n def enter(self):\n global start\n global f\n t = time.time() - start\n if t > 90:\n f.close()\n ctypes.windll.user32.MessageBoxW(0, 'DONE!', 'Test over', 1)\n else:\n f.write(self.ent.get() + '\\n')\n self.ent.delete(0, END)\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n\n\nclass Application(Frame):\n <function token>\n\n def create_widgets(self):\n self.line1 = Label(self, text='--------------EXAMINOR---------------')\n self.line1.pack()\n self.lbl = Label(self, text='Enter File Name:')\n self.lbl.pack()\n self.file = Entry(self)\n self.file.pack()\n self.file.insert(0, 'test1a.txt')\n self.openFile = Button(self, text='OK', command=self.fileStart)\n self.openFile.pack()\n self.line = Label(self, text='Enter a speed: 0, 1, 3, 5, none')\n self.line.pack()\n self.speed = Entry(self)\n self.speed.pack()\n self.speed.insert(0, '0')\n self.start = Button(self, text='START', command=self.start)\n self.start.pack(padx=5, pady=20)\n self.userline = Label(self, text='----------------USER----------------'\n )\n self.userline.pack()\n self.blue = Button(self, text='BLUE', fg='blue', bg='blue', height=\n 5, width=10)\n self.blue['command'] = lambda colour='blue': self.colour(colour)\n self.blue.pack(padx=10, pady=20, side='left')\n self.red = Button(self, text='RED', fg='red', bg='red', height=5,\n width=10)\n self.red['command'] = lambda colour='red': self.colour(colour)\n self.red.pack(padx=10, pady=20, side='left')\n self.green = Button(self, text='GREEN', fg='green', bg='green',\n height=5, width=10)\n self.green['command'] = lambda colour='green': self.colour(colour)\n self.green.pack(padx=10, pady=20, side='left')\n self.none = Button(self, text='UNSURE', fg='white', bg='black',\n height=5, width=10)\n self.none['command'] = lambda colour='unsure': self.colour(colour)\n self.none.pack(padx=10, pady=20, side='left')\n self.quit = Button(self, text='QUIT', fg='red', bg='black', command\n =root.destroy)\n self.quit.pack(padx=20, pady=20)\n\n def start(self):\n normalize()\n global start\n start = time.time()\n self.line1.after(60000, self.increment)\n option = self.speed.get()\n global f\n f.write('AUDIO REDUCTION LEVEL: ' + option + '\\n')\n if option == 'none':\n option = '0'\n else:\n audioCheck(option)\n f.write('SPOKEN CUE: ' + str(time.time() - start - float(option)) +\n '\\n')\n\n def increment(self):\n global f\n f.close()\n ctypes.windll.user32.MessageBoxW(0, 'DONE!', 'Test over', 1)\n\n def colour(self, colour):\n global start\n t = time.time() - start\n global f\n f.write('COLOUR: ' + colour + '\\n')\n f.write('REACTION:' + str(t) + '\\n')\n normalize()\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n\n\nclass Application(Frame):\n <function token>\n\n def create_widgets(self):\n self.line1 = Label(self, text='--------------EXAMINOR---------------')\n self.line1.pack()\n self.lbl = Label(self, text='Enter File Name:')\n self.lbl.pack()\n self.file = Entry(self)\n self.file.pack()\n self.file.insert(0, 'test1a.txt')\n self.openFile = Button(self, text='OK', command=self.fileStart)\n self.openFile.pack()\n self.line = Label(self, text='Enter a speed: 0, 1, 3, 5, none')\n self.line.pack()\n self.speed = Entry(self)\n self.speed.pack()\n self.speed.insert(0, '0')\n self.start = Button(self, text='START', command=self.start)\n self.start.pack(padx=5, pady=20)\n self.userline = Label(self, text='----------------USER----------------'\n )\n self.userline.pack()\n self.blue = Button(self, text='BLUE', fg='blue', bg='blue', height=\n 5, width=10)\n self.blue['command'] = lambda colour='blue': self.colour(colour)\n self.blue.pack(padx=10, pady=20, side='left')\n self.red = Button(self, text='RED', fg='red', bg='red', height=5,\n width=10)\n self.red['command'] = lambda colour='red': self.colour(colour)\n self.red.pack(padx=10, pady=20, side='left')\n self.green = Button(self, text='GREEN', fg='green', bg='green',\n height=5, width=10)\n self.green['command'] = lambda colour='green': self.colour(colour)\n self.green.pack(padx=10, pady=20, side='left')\n self.none = Button(self, text='UNSURE', fg='white', bg='black',\n height=5, width=10)\n self.none['command'] = lambda colour='unsure': self.colour(colour)\n self.none.pack(padx=10, pady=20, side='left')\n self.quit = Button(self, text='QUIT', fg='red', bg='black', command\n =root.destroy)\n self.quit.pack(padx=20, pady=20)\n <function token>\n\n def increment(self):\n global f\n f.close()\n ctypes.windll.user32.MessageBoxW(0, 'DONE!', 'Test over', 1)\n\n def colour(self, colour):\n global start\n t = time.time() - start\n global f\n f.write('COLOUR: ' + colour + '\\n')\n f.write('REACTION:' + str(t) + '\\n')\n normalize()\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n\n\nclass Application(Frame):\n <function token>\n\n def create_widgets(self):\n self.line1 = Label(self, text='--------------EXAMINOR---------------')\n self.line1.pack()\n self.lbl = Label(self, text='Enter File Name:')\n self.lbl.pack()\n self.file = Entry(self)\n self.file.pack()\n self.file.insert(0, 'test1a.txt')\n self.openFile = Button(self, text='OK', command=self.fileStart)\n self.openFile.pack()\n self.line = Label(self, text='Enter a speed: 0, 1, 3, 5, none')\n self.line.pack()\n self.speed = Entry(self)\n self.speed.pack()\n self.speed.insert(0, '0')\n self.start = Button(self, text='START', command=self.start)\n self.start.pack(padx=5, pady=20)\n self.userline = Label(self, text='----------------USER----------------'\n )\n self.userline.pack()\n self.blue = Button(self, text='BLUE', fg='blue', bg='blue', height=\n 5, width=10)\n self.blue['command'] = lambda colour='blue': self.colour(colour)\n self.blue.pack(padx=10, pady=20, side='left')\n self.red = Button(self, text='RED', fg='red', bg='red', height=5,\n width=10)\n self.red['command'] = lambda colour='red': self.colour(colour)\n self.red.pack(padx=10, pady=20, side='left')\n self.green = Button(self, text='GREEN', fg='green', bg='green',\n height=5, width=10)\n self.green['command'] = lambda colour='green': self.colour(colour)\n self.green.pack(padx=10, pady=20, side='left')\n self.none = Button(self, text='UNSURE', fg='white', bg='black',\n height=5, width=10)\n self.none['command'] = lambda colour='unsure': self.colour(colour)\n self.none.pack(padx=10, pady=20, side='left')\n self.quit = Button(self, text='QUIT', fg='red', bg='black', command\n =root.destroy)\n self.quit.pack(padx=20, pady=20)\n <function token>\n\n def increment(self):\n global f\n f.close()\n ctypes.windll.user32.MessageBoxW(0, 'DONE!', 'Test over', 1)\n <function token>\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n\n\nclass Application(Frame):\n <function token>\n\n def create_widgets(self):\n self.line1 = Label(self, text='--------------EXAMINOR---------------')\n self.line1.pack()\n self.lbl = Label(self, text='Enter File Name:')\n self.lbl.pack()\n self.file = Entry(self)\n self.file.pack()\n self.file.insert(0, 'test1a.txt')\n self.openFile = Button(self, text='OK', command=self.fileStart)\n self.openFile.pack()\n self.line = Label(self, text='Enter a speed: 0, 1, 3, 5, none')\n self.line.pack()\n self.speed = Entry(self)\n self.speed.pack()\n self.speed.insert(0, '0')\n self.start = Button(self, text='START', command=self.start)\n self.start.pack(padx=5, pady=20)\n self.userline = Label(self, text='----------------USER----------------'\n )\n self.userline.pack()\n self.blue = Button(self, text='BLUE', fg='blue', bg='blue', height=\n 5, width=10)\n self.blue['command'] = lambda colour='blue': self.colour(colour)\n self.blue.pack(padx=10, pady=20, side='left')\n self.red = Button(self, text='RED', fg='red', bg='red', height=5,\n width=10)\n self.red['command'] = lambda colour='red': self.colour(colour)\n self.red.pack(padx=10, pady=20, side='left')\n self.green = Button(self, text='GREEN', fg='green', bg='green',\n height=5, width=10)\n self.green['command'] = lambda colour='green': self.colour(colour)\n self.green.pack(padx=10, pady=20, side='left')\n self.none = Button(self, text='UNSURE', fg='white', bg='black',\n height=5, width=10)\n self.none['command'] = lambda colour='unsure': self.colour(colour)\n self.none.pack(padx=10, pady=20, side='left')\n self.quit = Button(self, text='QUIT', fg='red', bg='black', command\n =root.destroy)\n self.quit.pack(padx=20, pady=20)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n\n\nclass Application(Frame):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<class token>\n<assignment token>\n<code token>\n"
] | false |
99,656 |
e8179e5c011cfae45da5192f832366b22b8b483e
|
# -*- encoding: utf-8 -*-
#
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from stevedore import driver
from ceilometer.alarm import service as alarm_service
from ceilometer.openstack.common import log
from ceilometer.openstack.common import service as os_service
from ceilometer import service
OPTS = [
cfg.StrOpt('evaluation_service', default='default',
help='Driver to use for alarm evaluation service. DEPRECATED: '
'"singleton" and "partitioned" alarm evaluator '
'services will be removed in Kilo in favour of the '
'default alarm evaluation service using tooz for '
'partitioning.'),
]
cfg.CONF.register_opts(OPTS, group='alarm')
LOG = log.getLogger(__name__)
def notifier():
service.prepare_service()
os_service.launch(alarm_service.AlarmNotifierService()).wait()
def evaluator():
service.prepare_service()
eval_service_mgr = driver.DriverManager(
"ceilometer.alarm.evaluator_service",
cfg.CONF.alarm.evaluation_service,
invoke_on_load=True)
LOG.debug("Alarm evaluator loaded: %s" %
eval_service_mgr.driver.__class__.__name__)
os_service.launch(eval_service_mgr.driver).wait()
|
[
"# -*- encoding: utf-8 -*-\r\n#\r\n# Copyright 2014 OpenStack Foundation\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\r\n# not use this file except in compliance with the License. You may obtain\r\n# a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\r\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\r\n# License for the specific language governing permissions and limitations\r\n# under the License.\r\n\r\nfrom oslo.config import cfg\r\nfrom stevedore import driver\r\n\r\nfrom ceilometer.alarm import service as alarm_service\r\nfrom ceilometer.openstack.common import log\r\nfrom ceilometer.openstack.common import service as os_service\r\nfrom ceilometer import service\r\n\r\n\r\nOPTS = [\r\n cfg.StrOpt('evaluation_service', default='default',\r\n help='Driver to use for alarm evaluation service. DEPRECATED: '\r\n '\"singleton\" and \"partitioned\" alarm evaluator '\r\n 'services will be removed in Kilo in favour of the '\r\n 'default alarm evaluation service using tooz for '\r\n 'partitioning.'),\r\n]\r\n\r\ncfg.CONF.register_opts(OPTS, group='alarm')\r\n\r\nLOG = log.getLogger(__name__)\r\n\r\n\r\ndef notifier():\r\n service.prepare_service()\r\n os_service.launch(alarm_service.AlarmNotifierService()).wait()\r\n\r\n\r\ndef evaluator():\r\n service.prepare_service()\r\n eval_service_mgr = driver.DriverManager(\r\n \"ceilometer.alarm.evaluator_service\",\r\n cfg.CONF.alarm.evaluation_service,\r\n invoke_on_load=True)\r\n LOG.debug(\"Alarm evaluator loaded: %s\" %\r\n eval_service_mgr.driver.__class__.__name__)\r\n os_service.launch(eval_service_mgr.driver).wait()\r\n",
"from oslo.config import cfg\nfrom stevedore import driver\nfrom ceilometer.alarm import service as alarm_service\nfrom ceilometer.openstack.common import log\nfrom ceilometer.openstack.common import service as os_service\nfrom ceilometer import service\nOPTS = [cfg.StrOpt('evaluation_service', default='default', help=\n 'Driver to use for alarm evaluation service. DEPRECATED: \"singleton\" and \"partitioned\" alarm evaluator services will be removed in Kilo in favour of the default alarm evaluation service using tooz for partitioning.'\n )]\ncfg.CONF.register_opts(OPTS, group='alarm')\nLOG = log.getLogger(__name__)\n\n\ndef notifier():\n service.prepare_service()\n os_service.launch(alarm_service.AlarmNotifierService()).wait()\n\n\ndef evaluator():\n service.prepare_service()\n eval_service_mgr = driver.DriverManager(\n 'ceilometer.alarm.evaluator_service', cfg.CONF.alarm.\n evaluation_service, invoke_on_load=True)\n LOG.debug('Alarm evaluator loaded: %s' % eval_service_mgr.driver.\n __class__.__name__)\n os_service.launch(eval_service_mgr.driver).wait()\n",
"<import token>\nOPTS = [cfg.StrOpt('evaluation_service', default='default', help=\n 'Driver to use for alarm evaluation service. DEPRECATED: \"singleton\" and \"partitioned\" alarm evaluator services will be removed in Kilo in favour of the default alarm evaluation service using tooz for partitioning.'\n )]\ncfg.CONF.register_opts(OPTS, group='alarm')\nLOG = log.getLogger(__name__)\n\n\ndef notifier():\n service.prepare_service()\n os_service.launch(alarm_service.AlarmNotifierService()).wait()\n\n\ndef evaluator():\n service.prepare_service()\n eval_service_mgr = driver.DriverManager(\n 'ceilometer.alarm.evaluator_service', cfg.CONF.alarm.\n evaluation_service, invoke_on_load=True)\n LOG.debug('Alarm evaluator loaded: %s' % eval_service_mgr.driver.\n __class__.__name__)\n os_service.launch(eval_service_mgr.driver).wait()\n",
"<import token>\n<assignment token>\ncfg.CONF.register_opts(OPTS, group='alarm')\n<assignment token>\n\n\ndef notifier():\n service.prepare_service()\n os_service.launch(alarm_service.AlarmNotifierService()).wait()\n\n\ndef evaluator():\n service.prepare_service()\n eval_service_mgr = driver.DriverManager(\n 'ceilometer.alarm.evaluator_service', cfg.CONF.alarm.\n evaluation_service, invoke_on_load=True)\n LOG.debug('Alarm evaluator loaded: %s' % eval_service_mgr.driver.\n __class__.__name__)\n os_service.launch(eval_service_mgr.driver).wait()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef notifier():\n service.prepare_service()\n os_service.launch(alarm_service.AlarmNotifierService()).wait()\n\n\ndef evaluator():\n service.prepare_service()\n eval_service_mgr = driver.DriverManager(\n 'ceilometer.alarm.evaluator_service', cfg.CONF.alarm.\n evaluation_service, invoke_on_load=True)\n LOG.debug('Alarm evaluator loaded: %s' % eval_service_mgr.driver.\n __class__.__name__)\n os_service.launch(eval_service_mgr.driver).wait()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef notifier():\n service.prepare_service()\n os_service.launch(alarm_service.AlarmNotifierService()).wait()\n\n\n<function token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n"
] | false |
99,657 |
9ca3dd9e4d200739d0e85bd76cc57ed104bba87e
|
"""productreview URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from review import views
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
#Authentication
path('signup/', views.signupuser, name='signupuser'),
path('logout/', views.logoutuser, name='logoutuser'),
path('login/', views.loginuser, name='loginuser'),
path('review/<int:review_pk>', views.review, name='review'),
path('profile/<int:profile_pk>', views.profile, name='profile'),
#ProductReview
path('', views.home, name='home'),
path('userpanel/', views.userpanel, name='userpanel'),
path('userpanel/expertreviews/', views.expertreviews, name='expertreviews'),
path('createreview/', views.createreview, name='createreview'),
path('updateprofilephoto/', views.updateprofilephoto, name='updateprofilephoto'),
path('allexperts/', views.allexperts, name='allexperts'),
path('allreviews/', views.allreviews, name='allreviews'),
path('generatecode/', views.generatecode, name='generatecode'),
path('redeemcode/', views.redeemcode, name='redeemcode'),
path('editreview/<int:review_pk>', views.editreview, name='editreview'),
path('update/', views.update, name='update'),
path('updateprofile/', views.updateprofile, name='updateprofile'),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"\"\"\"productreview URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom review import views\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n\n #Authentication\n path('signup/', views.signupuser, name='signupuser'),\n path('logout/', views.logoutuser, name='logoutuser'),\n path('login/', views.loginuser, name='loginuser'),\n path('review/<int:review_pk>', views.review, name='review'), \n path('profile/<int:profile_pk>', views.profile, name='profile'), \n\n #ProductReview\n path('', views.home, name='home'),\n path('userpanel/', views.userpanel, name='userpanel'),\n path('userpanel/expertreviews/', views.expertreviews, name='expertreviews'),\n path('createreview/', views.createreview, name='createreview'),\n \n path('updateprofilephoto/', views.updateprofilephoto, name='updateprofilephoto'),\n path('allexperts/', views.allexperts, name='allexperts'),\n path('allreviews/', views.allreviews, name='allreviews'),\n path('generatecode/', views.generatecode, name='generatecode'),\n path('redeemcode/', views.redeemcode, name='redeemcode'),\n path('editreview/<int:review_pk>', views.editreview, name='editreview'),\n path('update/', views.update, name='update'),\n path('updateprofile/', views.updateprofile, name='updateprofile'),\n]\n\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n",
"<docstring token>\nfrom django.contrib import admin\nfrom django.urls import path\nfrom review import views\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nurlpatterns = [path('admin/', admin.site.urls), path('signup/', views.\n signupuser, name='signupuser'), path('logout/', views.logoutuser, name=\n 'logoutuser'), path('login/', views.loginuser, name='loginuser'), path(\n 'review/<int:review_pk>', views.review, name='review'), path(\n 'profile/<int:profile_pk>', views.profile, name='profile'), path('',\n views.home, name='home'), path('userpanel/', views.userpanel, name=\n 'userpanel'), path('userpanel/expertreviews/', views.expertreviews,\n name='expertreviews'), path('createreview/', views.createreview, name=\n 'createreview'), path('updateprofilephoto/', views.updateprofilephoto,\n name='updateprofilephoto'), path('allexperts/', views.allexperts, name=\n 'allexperts'), path('allreviews/', views.allreviews, name='allreviews'),\n path('generatecode/', views.generatecode, name='generatecode'), path(\n 'redeemcode/', views.redeemcode, name='redeemcode'), path(\n 'editreview/<int:review_pk>', views.editreview, name='editreview'),\n path('update/', views.update, name='update'), path('updateprofile/',\n views.updateprofile, name='updateprofile')]\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n",
"<docstring token>\n<import token>\nurlpatterns = [path('admin/', admin.site.urls), path('signup/', views.\n signupuser, name='signupuser'), path('logout/', views.logoutuser, name=\n 'logoutuser'), path('login/', views.loginuser, name='loginuser'), path(\n 'review/<int:review_pk>', views.review, name='review'), path(\n 'profile/<int:profile_pk>', views.profile, name='profile'), path('',\n views.home, name='home'), path('userpanel/', views.userpanel, name=\n 'userpanel'), path('userpanel/expertreviews/', views.expertreviews,\n name='expertreviews'), path('createreview/', views.createreview, name=\n 'createreview'), path('updateprofilephoto/', views.updateprofilephoto,\n name='updateprofilephoto'), path('allexperts/', views.allexperts, name=\n 'allexperts'), path('allreviews/', views.allreviews, name='allreviews'),\n path('generatecode/', views.generatecode, name='generatecode'), path(\n 'redeemcode/', views.redeemcode, name='redeemcode'), path(\n 'editreview/<int:review_pk>', views.editreview, name='editreview'),\n path('update/', views.update, name='update'), path('updateprofile/',\n views.updateprofile, name='updateprofile')]\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n",
"<docstring token>\n<import token>\n<assignment token>\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n"
] | false |
99,658 |
0743713da3c4a63bb0f73fceebcabd6bd52c6001
|
from typing import List
import bitstring
from ops import KEY_CKR_DATA, KEY_CKR_CONTROL
from program import Channel, FPGA, Program
CKS_TARGET_QSFP = 0
CKS_TARGET_CKR = 1
class NoRouteFound(BaseException):
pass
def closest_path_to_fpga(paths, channel: Channel, target: FPGA):
routes = paths[channel]
connections = []
for destination in routes:
if destination.fpga == target:
connections.append(routes[destination])
if not connections:
raise NoRouteFound("No route found from {} to {}".format(channel, target))
return min(connections, key=lambda c: len(c))
def get_output_target(paths, channel: Channel, target: FPGA):
"""
0 -> local QSFP
1 -> CK_R
2 -> first neighbour
3 -> second neighbour
4 -> ...
"""
if target == channel.fpga:
return CKS_TARGET_CKR
path = closest_path_to_fpga(paths, channel, target)[1:] # skip the channel itself
if path[0].fpga == channel.fpga:
return 2 + channel.target_index(path[0].index)
else:
return CKS_TARGET_QSFP
def cks_routing_table(paths, fpgas: List[FPGA], channel: Channel) -> List[int]:
table = []
for fpga in fpgas:
target = get_output_target(paths, channel, fpga)
table.append(target)
return table
def get_input_target(channel: Channel, logical_port: int, program: Program,
channels_per_fpga: int, key) -> int:
"""
0 -> local CK_S (never generated here)
1 -> CK_R_0
2 -> CK_R_1
...
[channels_per_fpga - 1] -> CK_R_N-1
N -> first hardware port assigned to the given channel
N + 1 -> second hardware port assigned to the given channel
"""
target_channel_index = program.get_channel_for_port_key(logical_port, key)
if target_channel_index is None:
return 0
if target_channel_index != channel.index:
return 1 + channel.target_index(target_channel_index)
allocations = tuple((op.logical_port, key) for (op, key)
in program.get_channel_allocations_with_prefix(channel.index, "ckr"))
return channels_per_fpga + allocations.index((logical_port, key))
def ckr_routing_table(channel: Channel, channels_per_fpga: int, program: Program) -> List[int]:
table = []
for port in range(program.logical_port_count):
table.append(get_input_target(channel, port, program, channels_per_fpga, KEY_CKR_DATA))
table.append(get_input_target(channel, port, program, channels_per_fpga, KEY_CKR_CONTROL))
return table
def serialize_to_array(table: List[int], bytes=1):
stream = bitstring.BitStream()
bitcount = bytes * 8
for target in table:
stream.append("uintle:{}={}".format(bitcount, target))
return stream.bytes
|
[
"from typing import List\n\nimport bitstring\n\nfrom ops import KEY_CKR_DATA, KEY_CKR_CONTROL\nfrom program import Channel, FPGA, Program\n\nCKS_TARGET_QSFP = 0\nCKS_TARGET_CKR = 1\n\n\nclass NoRouteFound(BaseException):\n pass\n\n\ndef closest_path_to_fpga(paths, channel: Channel, target: FPGA):\n routes = paths[channel]\n connections = []\n for destination in routes:\n if destination.fpga == target:\n connections.append(routes[destination])\n\n if not connections:\n raise NoRouteFound(\"No route found from {} to {}\".format(channel, target))\n return min(connections, key=lambda c: len(c))\n\n\ndef get_output_target(paths, channel: Channel, target: FPGA):\n \"\"\"\n 0 -> local QSFP\n 1 -> CK_R\n 2 -> first neighbour\n 3 -> second neighbour\n 4 -> ...\n \"\"\"\n if target == channel.fpga:\n return CKS_TARGET_CKR\n\n path = closest_path_to_fpga(paths, channel, target)[1:] # skip the channel itself\n if path[0].fpga == channel.fpga:\n return 2 + channel.target_index(path[0].index)\n else:\n return CKS_TARGET_QSFP\n\n\ndef cks_routing_table(paths, fpgas: List[FPGA], channel: Channel) -> List[int]:\n table = []\n for fpga in fpgas:\n target = get_output_target(paths, channel, fpga)\n table.append(target)\n return table\n\n\ndef get_input_target(channel: Channel, logical_port: int, program: Program,\n channels_per_fpga: int, key) -> int:\n \"\"\"\n 0 -> local CK_S (never generated here)\n 1 -> CK_R_0\n 2 -> CK_R_1\n ...\n [channels_per_fpga - 1] -> CK_R_N-1\n N -> first hardware port assigned to the given channel\n N + 1 -> second hardware port assigned to the given channel\n \"\"\"\n\n target_channel_index = program.get_channel_for_port_key(logical_port, key)\n if target_channel_index is None:\n return 0\n if target_channel_index != channel.index:\n return 1 + channel.target_index(target_channel_index)\n\n allocations = tuple((op.logical_port, key) for (op, key)\n in program.get_channel_allocations_with_prefix(channel.index, \"ckr\"))\n return channels_per_fpga + allocations.index((logical_port, key))\n\n\ndef ckr_routing_table(channel: Channel, channels_per_fpga: int, program: Program) -> List[int]:\n table = []\n for port in range(program.logical_port_count):\n table.append(get_input_target(channel, port, program, channels_per_fpga, KEY_CKR_DATA))\n table.append(get_input_target(channel, port, program, channels_per_fpga, KEY_CKR_CONTROL))\n return table\n\n\ndef serialize_to_array(table: List[int], bytes=1):\n stream = bitstring.BitStream()\n bitcount = bytes * 8\n for target in table:\n stream.append(\"uintle:{}={}\".format(bitcount, target))\n return stream.bytes\n",
"from typing import List\nimport bitstring\nfrom ops import KEY_CKR_DATA, KEY_CKR_CONTROL\nfrom program import Channel, FPGA, Program\nCKS_TARGET_QSFP = 0\nCKS_TARGET_CKR = 1\n\n\nclass NoRouteFound(BaseException):\n pass\n\n\ndef closest_path_to_fpga(paths, channel: Channel, target: FPGA):\n routes = paths[channel]\n connections = []\n for destination in routes:\n if destination.fpga == target:\n connections.append(routes[destination])\n if not connections:\n raise NoRouteFound('No route found from {} to {}'.format(channel,\n target))\n return min(connections, key=lambda c: len(c))\n\n\ndef get_output_target(paths, channel: Channel, target: FPGA):\n \"\"\"\n 0 -> local QSFP\n 1 -> CK_R\n 2 -> first neighbour\n 3 -> second neighbour\n 4 -> ...\n \"\"\"\n if target == channel.fpga:\n return CKS_TARGET_CKR\n path = closest_path_to_fpga(paths, channel, target)[1:]\n if path[0].fpga == channel.fpga:\n return 2 + channel.target_index(path[0].index)\n else:\n return CKS_TARGET_QSFP\n\n\ndef cks_routing_table(paths, fpgas: List[FPGA], channel: Channel) ->List[int]:\n table = []\n for fpga in fpgas:\n target = get_output_target(paths, channel, fpga)\n table.append(target)\n return table\n\n\ndef get_input_target(channel: Channel, logical_port: int, program: Program,\n channels_per_fpga: int, key) ->int:\n \"\"\"\n 0 -> local CK_S (never generated here)\n 1 -> CK_R_0\n 2 -> CK_R_1\n ...\n [channels_per_fpga - 1] -> CK_R_N-1\n N -> first hardware port assigned to the given channel\n N + 1 -> second hardware port assigned to the given channel\n \"\"\"\n target_channel_index = program.get_channel_for_port_key(logical_port, key)\n if target_channel_index is None:\n return 0\n if target_channel_index != channel.index:\n return 1 + channel.target_index(target_channel_index)\n allocations = tuple((op.logical_port, key) for op, key in program.\n get_channel_allocations_with_prefix(channel.index, 'ckr'))\n return channels_per_fpga + allocations.index((logical_port, key))\n\n\ndef ckr_routing_table(channel: Channel, channels_per_fpga: int, program:\n Program) ->List[int]:\n table = []\n for port in range(program.logical_port_count):\n table.append(get_input_target(channel, port, program,\n channels_per_fpga, KEY_CKR_DATA))\n table.append(get_input_target(channel, port, program,\n channels_per_fpga, KEY_CKR_CONTROL))\n return table\n\n\ndef serialize_to_array(table: List[int], bytes=1):\n stream = bitstring.BitStream()\n bitcount = bytes * 8\n for target in table:\n stream.append('uintle:{}={}'.format(bitcount, target))\n return stream.bytes\n",
"<import token>\nCKS_TARGET_QSFP = 0\nCKS_TARGET_CKR = 1\n\n\nclass NoRouteFound(BaseException):\n pass\n\n\ndef closest_path_to_fpga(paths, channel: Channel, target: FPGA):\n routes = paths[channel]\n connections = []\n for destination in routes:\n if destination.fpga == target:\n connections.append(routes[destination])\n if not connections:\n raise NoRouteFound('No route found from {} to {}'.format(channel,\n target))\n return min(connections, key=lambda c: len(c))\n\n\ndef get_output_target(paths, channel: Channel, target: FPGA):\n \"\"\"\n 0 -> local QSFP\n 1 -> CK_R\n 2 -> first neighbour\n 3 -> second neighbour\n 4 -> ...\n \"\"\"\n if target == channel.fpga:\n return CKS_TARGET_CKR\n path = closest_path_to_fpga(paths, channel, target)[1:]\n if path[0].fpga == channel.fpga:\n return 2 + channel.target_index(path[0].index)\n else:\n return CKS_TARGET_QSFP\n\n\ndef cks_routing_table(paths, fpgas: List[FPGA], channel: Channel) ->List[int]:\n table = []\n for fpga in fpgas:\n target = get_output_target(paths, channel, fpga)\n table.append(target)\n return table\n\n\ndef get_input_target(channel: Channel, logical_port: int, program: Program,\n channels_per_fpga: int, key) ->int:\n \"\"\"\n 0 -> local CK_S (never generated here)\n 1 -> CK_R_0\n 2 -> CK_R_1\n ...\n [channels_per_fpga - 1] -> CK_R_N-1\n N -> first hardware port assigned to the given channel\n N + 1 -> second hardware port assigned to the given channel\n \"\"\"\n target_channel_index = program.get_channel_for_port_key(logical_port, key)\n if target_channel_index is None:\n return 0\n if target_channel_index != channel.index:\n return 1 + channel.target_index(target_channel_index)\n allocations = tuple((op.logical_port, key) for op, key in program.\n get_channel_allocations_with_prefix(channel.index, 'ckr'))\n return channels_per_fpga + allocations.index((logical_port, key))\n\n\ndef ckr_routing_table(channel: Channel, channels_per_fpga: int, program:\n Program) ->List[int]:\n table = []\n for port in range(program.logical_port_count):\n table.append(get_input_target(channel, port, program,\n channels_per_fpga, KEY_CKR_DATA))\n table.append(get_input_target(channel, port, program,\n channels_per_fpga, KEY_CKR_CONTROL))\n return table\n\n\ndef serialize_to_array(table: List[int], bytes=1):\n stream = bitstring.BitStream()\n bitcount = bytes * 8\n for target in table:\n stream.append('uintle:{}={}'.format(bitcount, target))\n return stream.bytes\n",
"<import token>\n<assignment token>\n\n\nclass NoRouteFound(BaseException):\n pass\n\n\ndef closest_path_to_fpga(paths, channel: Channel, target: FPGA):\n routes = paths[channel]\n connections = []\n for destination in routes:\n if destination.fpga == target:\n connections.append(routes[destination])\n if not connections:\n raise NoRouteFound('No route found from {} to {}'.format(channel,\n target))\n return min(connections, key=lambda c: len(c))\n\n\ndef get_output_target(paths, channel: Channel, target: FPGA):\n \"\"\"\n 0 -> local QSFP\n 1 -> CK_R\n 2 -> first neighbour\n 3 -> second neighbour\n 4 -> ...\n \"\"\"\n if target == channel.fpga:\n return CKS_TARGET_CKR\n path = closest_path_to_fpga(paths, channel, target)[1:]\n if path[0].fpga == channel.fpga:\n return 2 + channel.target_index(path[0].index)\n else:\n return CKS_TARGET_QSFP\n\n\ndef cks_routing_table(paths, fpgas: List[FPGA], channel: Channel) ->List[int]:\n table = []\n for fpga in fpgas:\n target = get_output_target(paths, channel, fpga)\n table.append(target)\n return table\n\n\ndef get_input_target(channel: Channel, logical_port: int, program: Program,\n channels_per_fpga: int, key) ->int:\n \"\"\"\n 0 -> local CK_S (never generated here)\n 1 -> CK_R_0\n 2 -> CK_R_1\n ...\n [channels_per_fpga - 1] -> CK_R_N-1\n N -> first hardware port assigned to the given channel\n N + 1 -> second hardware port assigned to the given channel\n \"\"\"\n target_channel_index = program.get_channel_for_port_key(logical_port, key)\n if target_channel_index is None:\n return 0\n if target_channel_index != channel.index:\n return 1 + channel.target_index(target_channel_index)\n allocations = tuple((op.logical_port, key) for op, key in program.\n get_channel_allocations_with_prefix(channel.index, 'ckr'))\n return channels_per_fpga + allocations.index((logical_port, key))\n\n\ndef ckr_routing_table(channel: Channel, channels_per_fpga: int, program:\n Program) ->List[int]:\n table = []\n for port in range(program.logical_port_count):\n table.append(get_input_target(channel, port, program,\n channels_per_fpga, KEY_CKR_DATA))\n table.append(get_input_target(channel, port, program,\n channels_per_fpga, KEY_CKR_CONTROL))\n return table\n\n\ndef serialize_to_array(table: List[int], bytes=1):\n stream = bitstring.BitStream()\n bitcount = bytes * 8\n for target in table:\n stream.append('uintle:{}={}'.format(bitcount, target))\n return stream.bytes\n",
"<import token>\n<assignment token>\n\n\nclass NoRouteFound(BaseException):\n pass\n\n\ndef closest_path_to_fpga(paths, channel: Channel, target: FPGA):\n routes = paths[channel]\n connections = []\n for destination in routes:\n if destination.fpga == target:\n connections.append(routes[destination])\n if not connections:\n raise NoRouteFound('No route found from {} to {}'.format(channel,\n target))\n return min(connections, key=lambda c: len(c))\n\n\n<function token>\n\n\ndef cks_routing_table(paths, fpgas: List[FPGA], channel: Channel) ->List[int]:\n table = []\n for fpga in fpgas:\n target = get_output_target(paths, channel, fpga)\n table.append(target)\n return table\n\n\ndef get_input_target(channel: Channel, logical_port: int, program: Program,\n channels_per_fpga: int, key) ->int:\n \"\"\"\n 0 -> local CK_S (never generated here)\n 1 -> CK_R_0\n 2 -> CK_R_1\n ...\n [channels_per_fpga - 1] -> CK_R_N-1\n N -> first hardware port assigned to the given channel\n N + 1 -> second hardware port assigned to the given channel\n \"\"\"\n target_channel_index = program.get_channel_for_port_key(logical_port, key)\n if target_channel_index is None:\n return 0\n if target_channel_index != channel.index:\n return 1 + channel.target_index(target_channel_index)\n allocations = tuple((op.logical_port, key) for op, key in program.\n get_channel_allocations_with_prefix(channel.index, 'ckr'))\n return channels_per_fpga + allocations.index((logical_port, key))\n\n\ndef ckr_routing_table(channel: Channel, channels_per_fpga: int, program:\n Program) ->List[int]:\n table = []\n for port in range(program.logical_port_count):\n table.append(get_input_target(channel, port, program,\n channels_per_fpga, KEY_CKR_DATA))\n table.append(get_input_target(channel, port, program,\n channels_per_fpga, KEY_CKR_CONTROL))\n return table\n\n\ndef serialize_to_array(table: List[int], bytes=1):\n stream = bitstring.BitStream()\n bitcount = bytes * 8\n for target in table:\n stream.append('uintle:{}={}'.format(bitcount, target))\n return stream.bytes\n",
"<import token>\n<assignment token>\n\n\nclass NoRouteFound(BaseException):\n pass\n\n\n<function token>\n<function token>\n\n\ndef cks_routing_table(paths, fpgas: List[FPGA], channel: Channel) ->List[int]:\n table = []\n for fpga in fpgas:\n target = get_output_target(paths, channel, fpga)\n table.append(target)\n return table\n\n\ndef get_input_target(channel: Channel, logical_port: int, program: Program,\n channels_per_fpga: int, key) ->int:\n \"\"\"\n 0 -> local CK_S (never generated here)\n 1 -> CK_R_0\n 2 -> CK_R_1\n ...\n [channels_per_fpga - 1] -> CK_R_N-1\n N -> first hardware port assigned to the given channel\n N + 1 -> second hardware port assigned to the given channel\n \"\"\"\n target_channel_index = program.get_channel_for_port_key(logical_port, key)\n if target_channel_index is None:\n return 0\n if target_channel_index != channel.index:\n return 1 + channel.target_index(target_channel_index)\n allocations = tuple((op.logical_port, key) for op, key in program.\n get_channel_allocations_with_prefix(channel.index, 'ckr'))\n return channels_per_fpga + allocations.index((logical_port, key))\n\n\ndef ckr_routing_table(channel: Channel, channels_per_fpga: int, program:\n Program) ->List[int]:\n table = []\n for port in range(program.logical_port_count):\n table.append(get_input_target(channel, port, program,\n channels_per_fpga, KEY_CKR_DATA))\n table.append(get_input_target(channel, port, program,\n channels_per_fpga, KEY_CKR_CONTROL))\n return table\n\n\ndef serialize_to_array(table: List[int], bytes=1):\n stream = bitstring.BitStream()\n bitcount = bytes * 8\n for target in table:\n stream.append('uintle:{}={}'.format(bitcount, target))\n return stream.bytes\n",
"<import token>\n<assignment token>\n\n\nclass NoRouteFound(BaseException):\n pass\n\n\n<function token>\n<function token>\n\n\ndef cks_routing_table(paths, fpgas: List[FPGA], channel: Channel) ->List[int]:\n table = []\n for fpga in fpgas:\n target = get_output_target(paths, channel, fpga)\n table.append(target)\n return table\n\n\n<function token>\n\n\ndef ckr_routing_table(channel: Channel, channels_per_fpga: int, program:\n Program) ->List[int]:\n table = []\n for port in range(program.logical_port_count):\n table.append(get_input_target(channel, port, program,\n channels_per_fpga, KEY_CKR_DATA))\n table.append(get_input_target(channel, port, program,\n channels_per_fpga, KEY_CKR_CONTROL))\n return table\n\n\ndef serialize_to_array(table: List[int], bytes=1):\n stream = bitstring.BitStream()\n bitcount = bytes * 8\n for target in table:\n stream.append('uintle:{}={}'.format(bitcount, target))\n return stream.bytes\n",
"<import token>\n<assignment token>\n\n\nclass NoRouteFound(BaseException):\n pass\n\n\n<function token>\n<function token>\n\n\ndef cks_routing_table(paths, fpgas: List[FPGA], channel: Channel) ->List[int]:\n table = []\n for fpga in fpgas:\n target = get_output_target(paths, channel, fpga)\n table.append(target)\n return table\n\n\n<function token>\n<function token>\n\n\ndef serialize_to_array(table: List[int], bytes=1):\n stream = bitstring.BitStream()\n bitcount = bytes * 8\n for target in table:\n stream.append('uintle:{}={}'.format(bitcount, target))\n return stream.bytes\n",
"<import token>\n<assignment token>\n\n\nclass NoRouteFound(BaseException):\n pass\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef serialize_to_array(table: List[int], bytes=1):\n stream = bitstring.BitStream()\n bitcount = bytes * 8\n for target in table:\n stream.append('uintle:{}={}'.format(bitcount, target))\n return stream.bytes\n",
"<import token>\n<assignment token>\n\n\nclass NoRouteFound(BaseException):\n pass\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,659 |
85bee8cc7328b124f26112024247d8e0222d966c
|
#!/usr/bin/python3.7
import numpy
lines = [line for line in open('input.txt')]
#test
lines.append("3,9,8,9,10,9,4,9,99,-1,8") # input == 8
lines.append("3,3,1108,-1,8,3,4,3,99") # input == 8
lines.append("3,9,7,9,10,9,4,9,99,-1,8") # input < 8
lines.append("3,3,1107,-1,8,3,4,3,99") # input < 8
lines.append("3,12,6,12,15,1,13,14,13,4,13,99,-1,0,1,9") # print 0 on zero or 1 on non-zero
lines.append("3,3,1105,-1,9,1101,0,0,12,4,12,99,1") # print 0 on zero or 1 on non-zero
# 999 -> <8, 1000 -> ==8, 1001 -> >8
lines.append("3,21,1008,21,8,20,1005,20,22,107,8,21,20,1006,20,31,1106,0,36,98,0,0,1002,21,125,20,4,20,1105,1,46,104,"
"999,1105,1,46,1101,1000,1,20,4,20,1105,1,46,98,99")
prog = lines[0].split(',')
prog = numpy.array(list(map(int, prog)))
def instr_len(op_code: int):
if op_code > 99:
rev_param = str(op_code).zfill(5)[::-1]
op_code = int(rev_param[1] + rev_param[0])
return {
1: 4,
2: 4,
3: 2,
4: 2,
5: 3,
6: 3,
7: 4,
8: 4
}.get(op_code)
def resolve_parameter_mode(prog: list, param: int, *args):
if param < 100:
nested_params = [prog[a:a + 1] for a in args]
return [param, *nested_params]
rev_param = str(param).zfill(5)[::-1]
op_code = int(rev_param[1] + rev_param[0])
arg_param = list(rev_param[2:])
resolved_params = []
args = list(args)
if args:
for i, is_immediate in enumerate(arg_param):
if bool(int(is_immediate)):
resolved_params.append([args[i]])
elif len(args) > i:
resolved_params.append(prog[args[i]:args[i] + 1])
return [op_code, *resolved_params]
def operate(wp: int, ops_code: int, *args, ) -> int:
if ops_code == 1:
args[2][0] = args[0][0] + args[1][0]
elif ops_code == 2:
args[2][0] = args[0][0] * args[1][0]
elif ops_code == 3:
args[0][0] = int(input("Enter input args: "))
# args[0][0] = 1
elif ops_code == 4:
print(args[0][0])
elif ops_code == 5:
if args[0][0] != 0:
return args[1][0]
elif ops_code == 6:
if args[0][0] == 0:
return args[1][0]
elif ops_code == 7:
args[2][0] = 1 if args[0][0] < args[1][0] else 0
elif ops_code == 8:
args[2][0] = 1 if args[0][0] == args[1][0] else 0
return wp + instr_len(ops_code)
def process(_prog):
wp = 0
p = _prog.copy()
while True:
op_code = p[wp]
if op_code == 99:
break
op_len = instr_len(p[wp])
instruction = p[wp:op_len + wp]
resolved_instruction = resolve_parameter_mode(p, *instruction)
wp = operate(wp, *resolved_instruction)
if wp > len(p):
break
# 6627023
process(prog)
# for i in range(0, 99):
# for j in range(0, 99):
# if process(prog, i, j) == 19690720:
# print(f"found it:{i},{j}")
# sys.exit()
|
[
"#!/usr/bin/python3.7\n\nimport numpy\n\nlines = [line for line in open('input.txt')]\n#test\nlines.append(\"3,9,8,9,10,9,4,9,99,-1,8\") # input == 8\nlines.append(\"3,3,1108,-1,8,3,4,3,99\") # input == 8\nlines.append(\"3,9,7,9,10,9,4,9,99,-1,8\") # input < 8\nlines.append(\"3,3,1107,-1,8,3,4,3,99\") # input < 8\nlines.append(\"3,12,6,12,15,1,13,14,13,4,13,99,-1,0,1,9\") # print 0 on zero or 1 on non-zero\nlines.append(\"3,3,1105,-1,9,1101,0,0,12,4,12,99,1\") # print 0 on zero or 1 on non-zero\n# 999 -> <8, 1000 -> ==8, 1001 -> >8\nlines.append(\"3,21,1008,21,8,20,1005,20,22,107,8,21,20,1006,20,31,1106,0,36,98,0,0,1002,21,125,20,4,20,1105,1,46,104,\"\n \"999,1105,1,46,1101,1000,1,20,4,20,1105,1,46,98,99\")\n\nprog = lines[0].split(',')\nprog = numpy.array(list(map(int, prog)))\n\n\ndef instr_len(op_code: int):\n if op_code > 99:\n rev_param = str(op_code).zfill(5)[::-1]\n op_code = int(rev_param[1] + rev_param[0])\n\n return {\n 1: 4,\n 2: 4,\n 3: 2,\n 4: 2,\n 5: 3,\n 6: 3,\n 7: 4,\n 8: 4\n }.get(op_code)\n\n\ndef resolve_parameter_mode(prog: list, param: int, *args):\n if param < 100:\n nested_params = [prog[a:a + 1] for a in args]\n return [param, *nested_params]\n\n rev_param = str(param).zfill(5)[::-1]\n op_code = int(rev_param[1] + rev_param[0])\n arg_param = list(rev_param[2:])\n resolved_params = []\n args = list(args)\n if args:\n for i, is_immediate in enumerate(arg_param):\n if bool(int(is_immediate)):\n resolved_params.append([args[i]])\n elif len(args) > i:\n resolved_params.append(prog[args[i]:args[i] + 1])\n\n return [op_code, *resolved_params]\n\n\ndef operate(wp: int, ops_code: int, *args, ) -> int:\n if ops_code == 1:\n args[2][0] = args[0][0] + args[1][0]\n elif ops_code == 2:\n args[2][0] = args[0][0] * args[1][0]\n elif ops_code == 3:\n args[0][0] = int(input(\"Enter input args: \"))\n # args[0][0] = 1\n elif ops_code == 4:\n print(args[0][0])\n elif ops_code == 5:\n if args[0][0] != 0:\n return args[1][0]\n elif ops_code == 6:\n if args[0][0] == 0:\n return args[1][0]\n elif ops_code == 7:\n args[2][0] = 1 if args[0][0] < args[1][0] else 0\n elif ops_code == 8:\n args[2][0] = 1 if args[0][0] == args[1][0] else 0\n\n return wp + instr_len(ops_code)\n\n\ndef process(_prog):\n wp = 0\n p = _prog.copy()\n\n while True:\n op_code = p[wp]\n if op_code == 99:\n break\n\n op_len = instr_len(p[wp])\n\n instruction = p[wp:op_len + wp]\n resolved_instruction = resolve_parameter_mode(p, *instruction)\n\n wp = operate(wp, *resolved_instruction)\n\n if wp > len(p):\n break\n\n\n# 6627023\n\nprocess(prog)\n\n# for i in range(0, 99):\n# for j in range(0, 99):\n# if process(prog, i, j) == 19690720:\n# print(f\"found it:{i},{j}\")\n# sys.exit()\n",
"import numpy\nlines = [line for line in open('input.txt')]\nlines.append('3,9,8,9,10,9,4,9,99,-1,8')\nlines.append('3,3,1108,-1,8,3,4,3,99')\nlines.append('3,9,7,9,10,9,4,9,99,-1,8')\nlines.append('3,3,1107,-1,8,3,4,3,99')\nlines.append('3,12,6,12,15,1,13,14,13,4,13,99,-1,0,1,9')\nlines.append('3,3,1105,-1,9,1101,0,0,12,4,12,99,1')\nlines.append(\n '3,21,1008,21,8,20,1005,20,22,107,8,21,20,1006,20,31,1106,0,36,98,0,0,1002,21,125,20,4,20,1105,1,46,104,999,1105,1,46,1101,1000,1,20,4,20,1105,1,46,98,99'\n )\nprog = lines[0].split(',')\nprog = numpy.array(list(map(int, prog)))\n\n\ndef instr_len(op_code: int):\n if op_code > 99:\n rev_param = str(op_code).zfill(5)[::-1]\n op_code = int(rev_param[1] + rev_param[0])\n return {(1): 4, (2): 4, (3): 2, (4): 2, (5): 3, (6): 3, (7): 4, (8): 4\n }.get(op_code)\n\n\ndef resolve_parameter_mode(prog: list, param: int, *args):\n if param < 100:\n nested_params = [prog[a:a + 1] for a in args]\n return [param, *nested_params]\n rev_param = str(param).zfill(5)[::-1]\n op_code = int(rev_param[1] + rev_param[0])\n arg_param = list(rev_param[2:])\n resolved_params = []\n args = list(args)\n if args:\n for i, is_immediate in enumerate(arg_param):\n if bool(int(is_immediate)):\n resolved_params.append([args[i]])\n elif len(args) > i:\n resolved_params.append(prog[args[i]:args[i] + 1])\n return [op_code, *resolved_params]\n\n\ndef operate(wp: int, ops_code: int, *args) ->int:\n if ops_code == 1:\n args[2][0] = args[0][0] + args[1][0]\n elif ops_code == 2:\n args[2][0] = args[0][0] * args[1][0]\n elif ops_code == 3:\n args[0][0] = int(input('Enter input args: '))\n elif ops_code == 4:\n print(args[0][0])\n elif ops_code == 5:\n if args[0][0] != 0:\n return args[1][0]\n elif ops_code == 6:\n if args[0][0] == 0:\n return args[1][0]\n elif ops_code == 7:\n args[2][0] = 1 if args[0][0] < args[1][0] else 0\n elif ops_code == 8:\n args[2][0] = 1 if args[0][0] == args[1][0] else 0\n return wp + instr_len(ops_code)\n\n\ndef process(_prog):\n wp = 0\n p = _prog.copy()\n while True:\n op_code = p[wp]\n if op_code == 99:\n break\n op_len = instr_len(p[wp])\n instruction = p[wp:op_len + wp]\n resolved_instruction = resolve_parameter_mode(p, *instruction)\n wp = operate(wp, *resolved_instruction)\n if wp > len(p):\n break\n\n\nprocess(prog)\n",
"<import token>\nlines = [line for line in open('input.txt')]\nlines.append('3,9,8,9,10,9,4,9,99,-1,8')\nlines.append('3,3,1108,-1,8,3,4,3,99')\nlines.append('3,9,7,9,10,9,4,9,99,-1,8')\nlines.append('3,3,1107,-1,8,3,4,3,99')\nlines.append('3,12,6,12,15,1,13,14,13,4,13,99,-1,0,1,9')\nlines.append('3,3,1105,-1,9,1101,0,0,12,4,12,99,1')\nlines.append(\n '3,21,1008,21,8,20,1005,20,22,107,8,21,20,1006,20,31,1106,0,36,98,0,0,1002,21,125,20,4,20,1105,1,46,104,999,1105,1,46,1101,1000,1,20,4,20,1105,1,46,98,99'\n )\nprog = lines[0].split(',')\nprog = numpy.array(list(map(int, prog)))\n\n\ndef instr_len(op_code: int):\n if op_code > 99:\n rev_param = str(op_code).zfill(5)[::-1]\n op_code = int(rev_param[1] + rev_param[0])\n return {(1): 4, (2): 4, (3): 2, (4): 2, (5): 3, (6): 3, (7): 4, (8): 4\n }.get(op_code)\n\n\ndef resolve_parameter_mode(prog: list, param: int, *args):\n if param < 100:\n nested_params = [prog[a:a + 1] for a in args]\n return [param, *nested_params]\n rev_param = str(param).zfill(5)[::-1]\n op_code = int(rev_param[1] + rev_param[0])\n arg_param = list(rev_param[2:])\n resolved_params = []\n args = list(args)\n if args:\n for i, is_immediate in enumerate(arg_param):\n if bool(int(is_immediate)):\n resolved_params.append([args[i]])\n elif len(args) > i:\n resolved_params.append(prog[args[i]:args[i] + 1])\n return [op_code, *resolved_params]\n\n\ndef operate(wp: int, ops_code: int, *args) ->int:\n if ops_code == 1:\n args[2][0] = args[0][0] + args[1][0]\n elif ops_code == 2:\n args[2][0] = args[0][0] * args[1][0]\n elif ops_code == 3:\n args[0][0] = int(input('Enter input args: '))\n elif ops_code == 4:\n print(args[0][0])\n elif ops_code == 5:\n if args[0][0] != 0:\n return args[1][0]\n elif ops_code == 6:\n if args[0][0] == 0:\n return args[1][0]\n elif ops_code == 7:\n args[2][0] = 1 if args[0][0] < args[1][0] else 0\n elif ops_code == 8:\n args[2][0] = 1 if args[0][0] == args[1][0] else 0\n return wp + instr_len(ops_code)\n\n\ndef process(_prog):\n wp = 0\n p = _prog.copy()\n while True:\n op_code = p[wp]\n if op_code == 99:\n break\n op_len = instr_len(p[wp])\n instruction = p[wp:op_len + wp]\n resolved_instruction = resolve_parameter_mode(p, *instruction)\n wp = operate(wp, *resolved_instruction)\n if wp > len(p):\n break\n\n\nprocess(prog)\n",
"<import token>\n<assignment token>\nlines.append('3,9,8,9,10,9,4,9,99,-1,8')\nlines.append('3,3,1108,-1,8,3,4,3,99')\nlines.append('3,9,7,9,10,9,4,9,99,-1,8')\nlines.append('3,3,1107,-1,8,3,4,3,99')\nlines.append('3,12,6,12,15,1,13,14,13,4,13,99,-1,0,1,9')\nlines.append('3,3,1105,-1,9,1101,0,0,12,4,12,99,1')\nlines.append(\n '3,21,1008,21,8,20,1005,20,22,107,8,21,20,1006,20,31,1106,0,36,98,0,0,1002,21,125,20,4,20,1105,1,46,104,999,1105,1,46,1101,1000,1,20,4,20,1105,1,46,98,99'\n )\n<assignment token>\n\n\ndef instr_len(op_code: int):\n if op_code > 99:\n rev_param = str(op_code).zfill(5)[::-1]\n op_code = int(rev_param[1] + rev_param[0])\n return {(1): 4, (2): 4, (3): 2, (4): 2, (5): 3, (6): 3, (7): 4, (8): 4\n }.get(op_code)\n\n\ndef resolve_parameter_mode(prog: list, param: int, *args):\n if param < 100:\n nested_params = [prog[a:a + 1] for a in args]\n return [param, *nested_params]\n rev_param = str(param).zfill(5)[::-1]\n op_code = int(rev_param[1] + rev_param[0])\n arg_param = list(rev_param[2:])\n resolved_params = []\n args = list(args)\n if args:\n for i, is_immediate in enumerate(arg_param):\n if bool(int(is_immediate)):\n resolved_params.append([args[i]])\n elif len(args) > i:\n resolved_params.append(prog[args[i]:args[i] + 1])\n return [op_code, *resolved_params]\n\n\ndef operate(wp: int, ops_code: int, *args) ->int:\n if ops_code == 1:\n args[2][0] = args[0][0] + args[1][0]\n elif ops_code == 2:\n args[2][0] = args[0][0] * args[1][0]\n elif ops_code == 3:\n args[0][0] = int(input('Enter input args: '))\n elif ops_code == 4:\n print(args[0][0])\n elif ops_code == 5:\n if args[0][0] != 0:\n return args[1][0]\n elif ops_code == 6:\n if args[0][0] == 0:\n return args[1][0]\n elif ops_code == 7:\n args[2][0] = 1 if args[0][0] < args[1][0] else 0\n elif ops_code == 8:\n args[2][0] = 1 if args[0][0] == args[1][0] else 0\n return wp + instr_len(ops_code)\n\n\ndef process(_prog):\n wp = 0\n p = _prog.copy()\n while True:\n op_code = p[wp]\n if op_code == 99:\n break\n op_len = instr_len(p[wp])\n instruction = p[wp:op_len + wp]\n resolved_instruction = resolve_parameter_mode(p, *instruction)\n wp = operate(wp, *resolved_instruction)\n if wp > len(p):\n break\n\n\nprocess(prog)\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef instr_len(op_code: int):\n if op_code > 99:\n rev_param = str(op_code).zfill(5)[::-1]\n op_code = int(rev_param[1] + rev_param[0])\n return {(1): 4, (2): 4, (3): 2, (4): 2, (5): 3, (6): 3, (7): 4, (8): 4\n }.get(op_code)\n\n\ndef resolve_parameter_mode(prog: list, param: int, *args):\n if param < 100:\n nested_params = [prog[a:a + 1] for a in args]\n return [param, *nested_params]\n rev_param = str(param).zfill(5)[::-1]\n op_code = int(rev_param[1] + rev_param[0])\n arg_param = list(rev_param[2:])\n resolved_params = []\n args = list(args)\n if args:\n for i, is_immediate in enumerate(arg_param):\n if bool(int(is_immediate)):\n resolved_params.append([args[i]])\n elif len(args) > i:\n resolved_params.append(prog[args[i]:args[i] + 1])\n return [op_code, *resolved_params]\n\n\ndef operate(wp: int, ops_code: int, *args) ->int:\n if ops_code == 1:\n args[2][0] = args[0][0] + args[1][0]\n elif ops_code == 2:\n args[2][0] = args[0][0] * args[1][0]\n elif ops_code == 3:\n args[0][0] = int(input('Enter input args: '))\n elif ops_code == 4:\n print(args[0][0])\n elif ops_code == 5:\n if args[0][0] != 0:\n return args[1][0]\n elif ops_code == 6:\n if args[0][0] == 0:\n return args[1][0]\n elif ops_code == 7:\n args[2][0] = 1 if args[0][0] < args[1][0] else 0\n elif ops_code == 8:\n args[2][0] = 1 if args[0][0] == args[1][0] else 0\n return wp + instr_len(ops_code)\n\n\ndef process(_prog):\n wp = 0\n p = _prog.copy()\n while True:\n op_code = p[wp]\n if op_code == 99:\n break\n op_len = instr_len(p[wp])\n instruction = p[wp:op_len + wp]\n resolved_instruction = resolve_parameter_mode(p, *instruction)\n wp = operate(wp, *resolved_instruction)\n if wp > len(p):\n break\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef instr_len(op_code: int):\n if op_code > 99:\n rev_param = str(op_code).zfill(5)[::-1]\n op_code = int(rev_param[1] + rev_param[0])\n return {(1): 4, (2): 4, (3): 2, (4): 2, (5): 3, (6): 3, (7): 4, (8): 4\n }.get(op_code)\n\n\ndef resolve_parameter_mode(prog: list, param: int, *args):\n if param < 100:\n nested_params = [prog[a:a + 1] for a in args]\n return [param, *nested_params]\n rev_param = str(param).zfill(5)[::-1]\n op_code = int(rev_param[1] + rev_param[0])\n arg_param = list(rev_param[2:])\n resolved_params = []\n args = list(args)\n if args:\n for i, is_immediate in enumerate(arg_param):\n if bool(int(is_immediate)):\n resolved_params.append([args[i]])\n elif len(args) > i:\n resolved_params.append(prog[args[i]:args[i] + 1])\n return [op_code, *resolved_params]\n\n\ndef operate(wp: int, ops_code: int, *args) ->int:\n if ops_code == 1:\n args[2][0] = args[0][0] + args[1][0]\n elif ops_code == 2:\n args[2][0] = args[0][0] * args[1][0]\n elif ops_code == 3:\n args[0][0] = int(input('Enter input args: '))\n elif ops_code == 4:\n print(args[0][0])\n elif ops_code == 5:\n if args[0][0] != 0:\n return args[1][0]\n elif ops_code == 6:\n if args[0][0] == 0:\n return args[1][0]\n elif ops_code == 7:\n args[2][0] = 1 if args[0][0] < args[1][0] else 0\n elif ops_code == 8:\n args[2][0] = 1 if args[0][0] == args[1][0] else 0\n return wp + instr_len(ops_code)\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef resolve_parameter_mode(prog: list, param: int, *args):\n if param < 100:\n nested_params = [prog[a:a + 1] for a in args]\n return [param, *nested_params]\n rev_param = str(param).zfill(5)[::-1]\n op_code = int(rev_param[1] + rev_param[0])\n arg_param = list(rev_param[2:])\n resolved_params = []\n args = list(args)\n if args:\n for i, is_immediate in enumerate(arg_param):\n if bool(int(is_immediate)):\n resolved_params.append([args[i]])\n elif len(args) > i:\n resolved_params.append(prog[args[i]:args[i] + 1])\n return [op_code, *resolved_params]\n\n\ndef operate(wp: int, ops_code: int, *args) ->int:\n if ops_code == 1:\n args[2][0] = args[0][0] + args[1][0]\n elif ops_code == 2:\n args[2][0] = args[0][0] * args[1][0]\n elif ops_code == 3:\n args[0][0] = int(input('Enter input args: '))\n elif ops_code == 4:\n print(args[0][0])\n elif ops_code == 5:\n if args[0][0] != 0:\n return args[1][0]\n elif ops_code == 6:\n if args[0][0] == 0:\n return args[1][0]\n elif ops_code == 7:\n args[2][0] = 1 if args[0][0] < args[1][0] else 0\n elif ops_code == 8:\n args[2][0] = 1 if args[0][0] == args[1][0] else 0\n return wp + instr_len(ops_code)\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef operate(wp: int, ops_code: int, *args) ->int:\n if ops_code == 1:\n args[2][0] = args[0][0] + args[1][0]\n elif ops_code == 2:\n args[2][0] = args[0][0] * args[1][0]\n elif ops_code == 3:\n args[0][0] = int(input('Enter input args: '))\n elif ops_code == 4:\n print(args[0][0])\n elif ops_code == 5:\n if args[0][0] != 0:\n return args[1][0]\n elif ops_code == 6:\n if args[0][0] == 0:\n return args[1][0]\n elif ops_code == 7:\n args[2][0] = 1 if args[0][0] < args[1][0] else 0\n elif ops_code == 8:\n args[2][0] = 1 if args[0][0] == args[1][0] else 0\n return wp + instr_len(ops_code)\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
99,660 |
8867ac6b95c571226163227e1650ef8e03708b1f
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
h = head
while h:
tmp = h.next
if tmp is None:
break
while tmp and tmp.val == h.val:
h.next = tmp.next
tmp.next = None
tmp = h.next
h = h.next
return head
|
[
"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def deleteDuplicates(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n h = head\n while h:\n \n tmp = h.next\n if tmp is None:\n break\n while tmp and tmp.val == h.val:\n h.next = tmp.next\n tmp.next = None\n tmp = h.next\n h = h.next\n return head",
"class Solution(object):\n\n def deleteDuplicates(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n h = head\n while h:\n tmp = h.next\n if tmp is None:\n break\n while tmp and tmp.val == h.val:\n h.next = tmp.next\n tmp.next = None\n tmp = h.next\n h = h.next\n return head\n",
"class Solution(object):\n <function token>\n",
"<class token>\n"
] | false |
99,661 |
fcbb6dc82360d327a643ced681efb0910e5b920a
|
import sys, os
from jnius import autoclass
print 'CLASSPATH: ', os.environ['CLASSPATH']
Thing = autoclass('Thing')
|
[
"import sys, os\nfrom jnius import autoclass\n\nprint 'CLASSPATH: ', os.environ['CLASSPATH']\n\nThing = autoclass('Thing')\n"
] | true |
99,662 |
af82cbf289f607dbf9d8d93586fb56c3ee4ee9dd
|
ITEM: TIMESTEP
1500
ITEM: NUMBER OF ATOMS
2048
ITEM: BOX BOUNDS pp pp pp
3.7179638144627702e-01 4.6828203618547612e+01
3.7179638144627702e-01 4.6828203618547612e+01
3.7179638144627702e-01 4.6828203618547612e+01
ITEM: ATOMS id type xs ys zs
8 1 0.122506 0.0616676 0.0702085
35 1 0.0673125 0.126372 0.0590109
130 1 0.066038 0.0664378 0.128403
165 1 0.131273 0.119179 0.132273
3 1 0.0759593 0.994963 0.0580912
133 1 0.130252 0.997937 0.135408
20 1 0.490006 0.0658088 0.0693352
449 1 0.00177477 0.247615 0.373689
12 1 0.25666 0.0558656 0.0598576
39 1 0.200559 0.126113 0.0712715
43 1 0.306891 0.126318 0.0602848
134 1 0.191536 0.0594373 0.123808
138 1 0.307313 0.0650693 0.122444
169 1 0.248513 0.123428 0.139504
1649 1 0.49553 0.876242 0.5014
1033 1 0.244958 0.496979 0.00455496
1265 1 0.49861 0.883951 0.126467
16 1 0.37903 0.0651308 0.0608996
47 1 0.440855 0.128591 0.0705798
142 1 0.431912 0.0666677 0.139264
173 1 0.364353 0.122605 0.121545
15 1 0.434522 0.00514585 0.0698344
141 1 0.369629 0.00770925 0.12703
42 1 0.308197 0.190027 0.00257871
397 1 0.374937 0.991822 0.37703
1220 1 0.00339772 0.805224 0.181201
40 1 0.131754 0.185106 0.0691189
67 1 0.0770215 0.244332 0.0608682
72 1 0.130189 0.317184 0.0640843
162 1 0.0685323 0.193806 0.123728
194 1 0.0541531 0.307424 0.116713
197 1 0.127741 0.249351 0.130472
13 1 0.385449 0.00480166 0.00084798
1236 1 0.494815 0.807684 0.185551
399 1 0.442251 0.99691 0.438793
1457 1 0.489891 0.619199 0.372224
1577 1 0.254908 0.620826 0.496723
404 1 0.50114 0.0578598 0.43443
44 1 0.253758 0.195456 0.0686689
71 1 0.187097 0.253985 0.0626851
75 1 0.307258 0.252861 0.0633578
76 1 0.240451 0.314502 0.0653119
166 1 0.189914 0.183752 0.132939
170 1 0.309328 0.189348 0.129023
198 1 0.182471 0.319167 0.123029
201 1 0.241093 0.257213 0.135788
202 1 0.302909 0.307447 0.126464
500 1 0.49646 0.441037 0.433168
1546 1 0.311966 0.562394 0.49584
48 1 0.367614 0.191715 0.0654884
79 1 0.429687 0.247514 0.0699353
80 1 0.37849 0.31252 0.0658664
174 1 0.434668 0.184594 0.128757
205 1 0.369058 0.246982 0.128743
206 1 0.440055 0.309684 0.123238
1518 1 0.437677 0.936612 0.369773
1520 1 0.382322 0.935025 0.437416
244 1 0.493123 0.43251 0.181315
9 1 0.240378 0.987569 0.0125905
99 1 0.0571992 0.374845 0.0571279
104 1 0.115896 0.440157 0.0566418
226 1 0.0529092 0.437446 0.127062
229 1 0.113658 0.382261 0.120713
1517 1 0.380158 0.876227 0.375905
66 1 0.0694349 0.306826 8.01478e-05
1519 1 0.440738 0.876592 0.435602
289 1 0.0067298 0.127927 0.248886
103 1 0.183569 0.37911 0.0632626
107 1 0.304556 0.368928 0.0599431
108 1 0.24758 0.43403 0.0680997
230 1 0.184907 0.440628 0.122798
233 1 0.247622 0.374682 0.126779
234 1 0.305484 0.439598 0.131699
1031 1 0.180425 0.500208 0.0595709
1097 1 0.245977 0.747062 0.00656892
14 1 0.442944 0.0699623 0.000723564
1172 1 0.494855 0.548594 0.190751
111 1 0.433877 0.376366 0.0632987
112 1 0.372655 0.430348 0.0608133
237 1 0.372039 0.377706 0.124139
238 1 0.437158 0.437419 0.12482
388 1 0.0033361 0.0587117 0.439143
38 1 0.188293 0.185957 0.00163708
417 1 0.00198284 0.125831 0.385084
1027 1 0.0547332 0.511491 0.0684469
1521 1 0.494549 0.872195 0.367588
1032 1 0.125793 0.55813 0.0616619
1059 1 0.069085 0.613502 0.0603673
1154 1 0.060225 0.564065 0.13549
1189 1 0.12007 0.623865 0.137432
1157 1 0.119343 0.49181 0.123964
1036 1 0.251455 0.55932 0.0657171
1063 1 0.173034 0.630658 0.0766565
1067 1 0.308758 0.624548 0.0658114
1158 1 0.180413 0.554769 0.125694
1162 1 0.31368 0.561652 0.122002
1193 1 0.253549 0.634747 0.126577
1035 1 0.311738 0.494827 0.0617163
1161 1 0.245696 0.505242 0.129213
1473 1 0.00857143 0.743087 0.381099
1040 1 0.370854 0.555958 0.0601095
1071 1 0.442268 0.62859 0.0608984
1166 1 0.434265 0.562089 0.128014
1197 1 0.377975 0.632047 0.122107
1165 1 0.370212 0.490963 0.121175
1281 1 0.00152018 0.501231 0.24923
1039 1 0.431952 0.496487 0.06299
6 1 0.193618 0.0605467 0.000470822
1064 1 0.1284 0.698947 0.0597927
1091 1 0.0568844 0.756779 0.0585327
1096 1 0.115828 0.806048 0.0592113
1186 1 0.0627154 0.682191 0.124165
1218 1 0.0593727 0.812914 0.12507
1221 1 0.11024 0.748004 0.124696
1122 1 0.0602491 0.934098 -0.0022776
324 1 0.000978361 0.31218 0.313079
1542 1 0.18614 0.557972 0.499156
1068 1 0.240089 0.682753 0.0589561
1095 1 0.183876 0.758784 0.0653921
1099 1 0.312829 0.748815 0.0615798
1100 1 0.253558 0.81033 0.0578296
1190 1 0.184582 0.695813 0.12044
1194 1 0.31561 0.692831 0.133151
1222 1 0.188256 0.803535 0.131422
1225 1 0.247864 0.752113 0.129388
1226 1 0.311365 0.810136 0.126462
1072 1 0.37721 0.697796 0.0625984
1103 1 0.44676 0.747004 0.061531
1104 1 0.383133 0.813369 0.0624226
1198 1 0.440946 0.689152 0.125403
1229 1 0.368963 0.757683 0.125765
1230 1 0.442762 0.804805 0.118703
1377 1 -0.00177314 0.873845 0.248954
164 1 0.00788548 0.18704 0.192822
1123 1 0.0596043 0.8773 0.0593343
1128 1 0.127809 0.936211 0.0582613
1250 1 0.0627318 0.939063 0.127873
1253 1 0.124922 0.868711 0.122509
1129 1 0.248776 0.868679 0.00314125
1130 1 0.302477 0.929354 0.00408463
1634 1 0.0654849 0.931689 0.50017
1070 1 0.452253 0.686062 0.00224698
137 1 0.249367 0.995542 0.128421
11 1 0.307689 0.994469 0.0623884
1127 1 0.185281 0.856566 0.0716325
1131 1 0.310694 0.874352 0.0674903
1132 1 0.241675 0.930183 0.0687533
1254 1 0.18657 0.939355 0.140843
1257 1 0.245223 0.886175 0.13609
1258 1 0.316532 0.937468 0.126062
7 1 0.177269 0.9992 0.0672
586 1 0.310712 0.318469 0.500931
1553 1 0.495832 0.495574 0.496547
1135 1 0.441545 0.885524 0.0666351
1136 1 0.373041 0.936344 0.0604851
1261 1 0.377842 0.872496 0.125044
1262 1 0.434078 0.946021 0.124703
1185 1 0.0017602 0.627138 0.112849
136 1 0.121751 0.0562408 0.191549
163 1 0.0614944 0.121292 0.190071
258 1 0.064937 0.0581001 0.254968
264 1 0.122228 0.0603273 0.318807
291 1 0.0532977 0.121715 0.316977
293 1 0.117611 0.117219 0.256496
261 1 0.128534 0.998489 0.252125
139 1 0.317556 0.995792 0.188194
140 1 0.254961 0.0523218 0.185528
167 1 0.186808 0.125108 0.19678
171 1 0.311977 0.131227 0.197636
262 1 0.186168 0.0697726 0.251697
266 1 0.318512 0.063158 0.257481
268 1 0.251521 0.0584419 0.319894
295 1 0.186633 0.124789 0.315503
297 1 0.253269 0.11602 0.252853
299 1 0.311035 0.125172 0.319872
267 1 0.315362 0.00453774 0.322065
265 1 0.256798 0.00561573 0.259357
144 1 0.369775 0.0678744 0.191577
175 1 0.439925 0.131723 0.189697
270 1 0.433988 0.0714637 0.246394
272 1 0.369031 0.0613734 0.318422
301 1 0.374826 0.126225 0.264325
303 1 0.439594 0.135301 0.321385
276 1 0.492552 0.0661693 0.305057
46 1 0.436308 0.187126 0.012633
273 1 0.495935 1.00043 0.25072
168 1 0.121486 0.185393 0.18972
195 1 0.0595153 0.254885 0.184446
200 1 0.121982 0.311428 0.188403
290 1 0.0669253 0.19048 0.255712
296 1 0.122196 0.181085 0.313729
322 1 0.0624558 0.311905 0.24819
323 1 0.0583294 0.246891 0.311951
325 1 0.124726 0.253254 0.248837
328 1 0.117957 0.308657 0.317762
172 1 0.256294 0.19269 0.18918
199 1 0.18244 0.248419 0.191483
203 1 0.316421 0.250163 0.189595
204 1 0.245142 0.316827 0.190887
294 1 0.188161 0.185123 0.252789
298 1 0.31407 0.190325 0.253404
300 1 0.245124 0.187418 0.311529
326 1 0.181853 0.316846 0.251668
327 1 0.186849 0.250834 0.306185
329 1 0.242228 0.247093 0.245743
330 1 0.302705 0.310879 0.247868
331 1 0.299613 0.248171 0.312951
332 1 0.246723 0.314994 0.304887
176 1 0.37331 0.190886 0.191773
207 1 0.440945 0.253744 0.184251
208 1 0.375167 0.314646 0.183487
302 1 0.43725 0.191855 0.252516
304 1 0.376565 0.194188 0.318439
333 1 0.376385 0.256958 0.247537
334 1 0.438966 0.315377 0.247511
335 1 0.439068 0.253467 0.315848
336 1 0.376128 0.309705 0.311291
1510 1 0.183177 0.942615 0.372092
227 1 0.0550952 0.364663 0.189725
232 1 0.115429 0.434788 0.187888
354 1 0.0623164 0.428504 0.253391
355 1 0.0578367 0.375884 0.319124
357 1 0.125629 0.375835 0.252521
360 1 0.120437 0.430766 0.315902
1285 1 0.12908 0.497692 0.250474
1283 1 0.0581211 0.494219 0.30848
1515 1 0.312435 0.873624 0.437999
1287 1 0.183195 0.490334 0.305911
231 1 0.185019 0.38124 0.189628
235 1 0.309138 0.377159 0.192638
236 1 0.245192 0.440995 0.188607
358 1 0.181359 0.432866 0.25278
359 1 0.189525 0.376057 0.315277
361 1 0.245932 0.380996 0.251914
362 1 0.307711 0.440807 0.246856
363 1 0.311112 0.367624 0.302259
364 1 0.256423 0.427085 0.316448
1289 1 0.242323 0.495227 0.24887
239 1 0.440744 0.369249 0.187298
240 1 0.373278 0.435997 0.186289
365 1 0.378523 0.369329 0.249592
366 1 0.436213 0.435501 0.244044
367 1 0.440315 0.376135 0.312224
368 1 0.367597 0.435858 0.309025
1065 1 0.243312 0.624084 -1.90417e-05
610 1 0.0617822 0.433071 0.492755
1160 1 0.12368 0.5557 0.189803
1187 1 0.0552874 0.625174 0.190524
1282 1 0.0586902 0.557468 0.248031
1288 1 0.119151 0.560481 0.304054
1315 1 0.0667845 0.63566 0.306116
1317 1 0.125878 0.622536 0.244913
1155 1 0.0570289 0.494142 0.19144
1163 1 0.302803 0.504381 0.194521
1291 1 0.310345 0.494839 0.312951
1159 1 0.183957 0.494138 0.183996
1164 1 0.246811 0.564139 0.183834
1191 1 0.18554 0.620404 0.188666
1195 1 0.313597 0.623611 0.189085
1286 1 0.181137 0.560319 0.247685
1290 1 0.312749 0.562916 0.257968
1292 1 0.241424 0.557052 0.306946
1319 1 0.184272 0.621255 0.311063
1321 1 0.249033 0.621724 0.242654
1323 1 0.306114 0.626136 0.318211
1428 1 0.501614 0.566466 0.436811
1167 1 0.432644 0.495007 0.186922
1168 1 0.37019 0.562688 0.181007
1199 1 0.433306 0.627853 0.180565
1294 1 0.430318 0.562414 0.240584
1296 1 0.373964 0.555733 0.313619
1325 1 0.365309 0.619481 0.257195
1327 1 0.437269 0.625707 0.304254
1293 1 0.370872 0.500165 0.253963
1295 1 0.435971 0.500973 0.30753
132 1 -0.00198531 0.0686882 0.1846
1192 1 0.127921 0.690891 0.186996
1219 1 0.0601923 0.745095 0.18482
1224 1 0.122366 0.809431 0.184445
1314 1 0.0548868 0.688485 0.248706
1320 1 0.125793 0.685527 0.3066
1346 1 0.0566146 0.806383 0.241237
1347 1 0.0653959 0.746161 0.309319
1349 1 0.120765 0.755915 0.250628
1352 1 0.127772 0.81448 0.316563
1511 1 0.186742 0.872299 0.43541
1513 1 0.248981 0.871872 0.370855
1196 1 0.246753 0.694394 0.186637
1223 1 0.187916 0.752072 0.198206
1227 1 0.318982 0.754774 0.193965
1228 1 0.254863 0.810062 0.192887
1318 1 0.184797 0.683156 0.254603
1322 1 0.312537 0.688262 0.254464
1324 1 0.247201 0.691125 0.313542
1350 1 0.187049 0.817001 0.252557
1351 1 0.192054 0.751492 0.314033
1353 1 0.251007 0.752498 0.251508
1354 1 0.323816 0.818495 0.240274
1355 1 0.311308 0.754803 0.316231
1356 1 0.250777 0.815188 0.31043
1200 1 0.372915 0.679655 0.194125
1231 1 0.440702 0.745631 0.184007
1232 1 0.389365 0.811413 0.186673
1326 1 0.445726 0.687151 0.244815
1328 1 0.386337 0.70092 0.313403
1357 1 0.376438 0.759177 0.256521
1358 1 0.444029 0.810945 0.252344
1359 1 0.443633 0.754368 0.31098
1360 1 0.375904 0.820406 0.311756
1617 1 0.498411 0.7518 0.496678
1514 1 0.311424 0.929325 0.369378
1252 1 0.000447115 0.942148 0.187364
259 1 0.0608312 0.995676 0.320297
1251 1 0.0585569 0.872086 0.187545
1256 1 0.11454 0.937388 0.18959
1378 1 0.0637809 0.937709 0.255673
1379 1 0.0606781 0.870872 0.31552
1381 1 0.113389 0.867572 0.253261
1384 1 0.124924 0.939011 0.304377
131 1 0.052896 0.00642105 0.190592
1284 1 0.00490827 0.556127 0.317283
263 1 0.187771 0.992939 0.306527
135 1 0.188786 0.00264093 0.200688
1255 1 0.181276 0.868011 0.190743
1259 1 0.314173 0.871732 0.182527
1260 1 0.250476 0.950839 0.199195
1382 1 0.182528 0.925053 0.240822
1383 1 0.175793 0.8761 0.314739
1385 1 0.2471 0.872821 0.244705
1386 1 0.314424 0.931986 0.244239
1387 1 0.315784 0.869994 0.301753
1388 1 0.252261 0.933621 0.300904
143 1 0.423171 0.00373925 0.198018
271 1 0.430727 0.00312778 0.303801
1263 1 0.443103 0.874301 0.183329
1264 1 0.380781 0.93062 0.181519
1389 1 0.382923 0.874088 0.243496
1390 1 0.435403 0.940486 0.248066
1391 1 0.438719 0.878378 0.306702
1392 1 0.376086 0.935363 0.311429
269 1 0.36745 0.994712 0.254967
386 1 0.0576023 0.0609346 0.374566
392 1 0.122666 0.0653702 0.443146
419 1 0.0594213 0.119725 0.443638
421 1 0.117568 0.125878 0.372213
1524 1 0.494468 0.93418 0.436814
558 1 0.434113 0.192327 0.492927
389 1 0.122876 0.996023 0.375035
387 1 0.06108 0.997252 0.435671
101 1 0.123726 0.374535 -0.000754328
1268 1 0.49566 0.941389 0.185295
393 1 0.245299 0.00200825 0.378844
390 1 0.187557 0.0646447 0.379495
394 1 0.312864 0.0631309 0.383544
396 1 0.257571 0.0640053 0.442291
423 1 0.173736 0.135372 0.443078
425 1 0.248714 0.127633 0.37994
427 1 0.317796 0.130245 0.443566
391 1 0.187203 0.00545213 0.436906
1642 1 0.31227 0.937761 0.492864
613 1 0.126716 0.375002 0.498158
1516 1 0.250089 0.932145 0.435518
398 1 0.437746 0.0564424 0.373139
400 1 0.382801 0.0629863 0.435846
429 1 0.381006 0.124945 0.376057
431 1 0.441308 0.120418 0.438564
1332 1 0.492249 0.687943 0.308758
1541 1 0.122581 0.494023 0.498834
418 1 0.0597534 0.182245 0.375428
424 1 0.112864 0.192746 0.438942
450 1 0.0599641 0.311922 0.375529
451 1 0.0689029 0.255789 0.437007
453 1 0.125829 0.244035 0.367447
456 1 0.122452 0.31811 0.42746
4 1 0.000488162 0.0574654 0.0642114
161 1 0.00273703 0.130191 0.120629
1606 1 0.191511 0.815706 0.495888
422 1 0.180404 0.184515 0.377158
426 1 0.304086 0.188131 0.372248
428 1 0.24317 0.193359 0.436099
454 1 0.181966 0.309867 0.370806
455 1 0.176691 0.248114 0.432926
457 1 0.244633 0.246908 0.37067
458 1 0.307583 0.310143 0.365408
459 1 0.312182 0.248512 0.438926
460 1 0.248969 0.310991 0.435353
1508 1 0.00374302 0.934232 0.436172
1169 1 0.495712 0.499928 0.126913
49 1 0.497555 0.132778 0.00624142
395 1 0.322222 0.000358097 0.438281
1361 1 0.499014 0.748944 0.249997
1044 1 0.492277 0.556064 0.059834
430 1 0.444773 0.192342 0.378222
432 1 0.377178 0.18742 0.438446
461 1 0.377941 0.252309 0.378405
462 1 0.446756 0.315737 0.374707
463 1 0.439072 0.26039 0.44305
464 1 0.371708 0.319879 0.431667
1570 1 0.0567564 0.679202 0.502204
1396 1 0.499125 0.934671 0.311865
1573 1 0.121989 0.621842 0.493958
1509 1 0.125654 0.877959 0.37613
483 1 0.0664608 0.380924 0.434603
488 1 0.124701 0.434777 0.437018
485 1 0.127545 0.375577 0.374697
482 1 0.0493675 0.437516 0.376229
1413 1 0.121734 0.492461 0.375677
1411 1 0.0649851 0.494912 0.43247
1201 1 0.497372 0.616287 0.122607
625 1 0.496586 0.375369 0.499124
1417 1 0.249862 0.49747 0.367471
1415 1 0.183514 0.485588 0.437906
492 1 0.252687 0.441974 0.430185
491 1 0.30843 0.376621 0.4421
486 1 0.182366 0.437767 0.366812
489 1 0.242541 0.370184 0.379759
490 1 0.313311 0.436846 0.378488
487 1 0.189332 0.370783 0.442451
2 1 0.0542735 0.0658839 -0.000155184
129 1 0.00600734 0.00123546 0.124306
1507 1 0.0641452 0.867003 0.435044
495 1 0.441513 0.379173 0.427413
496 1 0.36742 0.437171 0.443355
493 1 0.374641 0.382878 0.373214
494 1 0.433673 0.436594 0.367697
1609 1 0.248461 0.759446 0.500556
1124 1 0.000331305 0.940167 0.0672741
1456 1 0.38315 0.685702 0.435892
340 1 0.498874 0.314523 0.31013
1410 1 0.0658854 0.559294 0.370955
1416 1 0.119619 0.564391 0.428709
1443 1 0.0620913 0.631176 0.43298
1445 1 0.122609 0.623221 0.37387
84 1 0.492058 0.315184 0.0588292
1069 1 0.366918 0.628907 0.00704809
1419 1 0.307005 0.503527 0.431937
1449 1 0.253734 0.627141 0.378376
1451 1 0.315692 0.628689 0.437211
1420 1 0.245872 0.557917 0.430298
1447 1 0.183707 0.621689 0.441281
1414 1 0.180321 0.558631 0.364678
1418 1 0.30635 0.558493 0.371723
1613 1 0.379831 0.745409 0.497706
81 1 0.497141 0.248909 0.00243865
77 1 0.376602 0.25531 0.00476921
1421 1 0.375879 0.49704 0.3766
1423 1 0.429451 0.49903 0.445518
1422 1 0.432282 0.561135 0.379
1455 1 0.437138 0.619746 0.440019
1424 1 0.369706 0.564669 0.432747
1453 1 0.37678 0.628075 0.376028
1512 1 0.132889 0.931517 0.438687
148 1 0.50219 0.0694575 0.192913
1545 1 0.251679 0.500712 0.496265
1487 1 0.435223 0.750202 0.433603
1448 1 0.132944 0.685493 0.439709
1474 1 0.0628031 0.809679 0.37407
1480 1 0.125996 0.799455 0.435884
1475 1 0.0636878 0.738299 0.441805
1442 1 0.0663725 0.688521 0.368719
1477 1 0.124662 0.74371 0.375654
525 1 0.380351 0.991393 0.497936
241 1 0.496368 0.371082 0.124036
1454 1 0.437933 0.688471 0.374555
1485 1 0.373054 0.749508 0.378959
1446 1 0.185815 0.681205 0.374201
1450 1 0.312188 0.683472 0.373473
1479 1 0.191169 0.746955 0.431775
1452 1 0.247804 0.689843 0.438324
1483 1 0.306318 0.746349 0.440484
1488 1 0.378313 0.823892 0.436313
1481 1 0.251464 0.754776 0.379042
1478 1 0.183118 0.813734 0.377355
1484 1 0.251319 0.821039 0.431932
1482 1 0.313435 0.817674 0.373421
1486 1 0.438121 0.81326 0.378183
1506 1 0.0624125 0.927338 0.379048
1441 1 0.00389512 0.623802 0.373425
1412 1 0.0131757 0.564446 0.430045
145 1 0.492987 0.0112282 0.129952
321 1 0.000565523 0.252079 0.244692
1 1 0.00865976 0.995812 0.00468351
1425 1 0.491648 0.501671 0.37406
1153 1 0.000905819 0.501442 0.130671
1569 1 -0.00128455 0.621441 0.500485
1140 1 0.496599 0.948499 0.0689179
1614 1 0.437755 0.81584 0.498494
582 1 0.184416 0.314197 0.499812
585 1 0.24543 0.250108 0.497465
45 1 0.374859 0.132328 0.00582776
17 1 0.497128 0.00132085 0.00505437
517 1 0.129553 0.99141 0.497427
1610 1 0.311808 0.8109 0.495786
1605 1 0.120384 0.746076 0.497389
1637 1 0.127739 0.870203 0.493341
109 1 0.371719 0.367725 0.00662379
1061 1 0.132574 0.626222 0.00242393
1030 1 0.186734 0.556661 0.0019528
1633 1 0.000801536 0.875898 0.498031
522 1 0.324768 0.0589172 0.49427
1041 1 0.497048 0.495732 0.00496832
1026 1 0.0666779 0.55676 0.00223266
1102 1 0.443861 0.820071 0.00473293
557 1 0.378041 0.128788 0.496564
105 1 0.246053 0.366605 0.00314357
1125 1 0.119383 0.872352 0.00249215
102 1 0.185112 0.440805 0.00281566
589 1 0.374081 0.257641 0.495868
41 1 0.24821 0.122643 0.00315032
37 1 0.126494 0.116291 0.00230434
1126 1 0.193605 0.921573 0.00167859
1581 1 0.373014 0.622238 0.494282
1058 1 0.0623897 0.691033 0.0052758
578 1 0.0619631 0.311598 0.497751
110 1 0.436149 0.435592 0.0063796
520 1 0.124923 0.0556291 0.554462
547 1 0.0604994 0.12359 0.557763
642 1 0.0637322 0.0574714 0.628437
677 1 0.121755 0.123025 0.617419
515 1 0.0623535 0.995192 0.558417
1025 1 0.00469795 0.498495 0.994574
612 1 0.010753 0.43394 0.555281
524 1 0.247746 0.0611719 0.567891
551 1 0.184164 0.129713 0.558856
555 1 0.317417 0.12056 0.571044
646 1 0.178669 0.0585815 0.626539
650 1 0.312033 0.0564887 0.623534
681 1 0.239784 0.114625 0.626827
788 1 0.499506 0.0718837 0.817912
836 1 0.00450208 0.311116 0.816721
528 1 0.371387 0.0587991 0.566119
559 1 0.428294 0.132567 0.570178
654 1 0.439431 0.0626788 0.622386
685 1 0.37192 0.127083 0.632601
1796 1 0.00243525 0.563668 0.813513
97 1 0.0045896 0.371383 0.992587
1905 1 0.501123 0.868693 0.74242
708 1 0.00838476 0.315552 0.681261
552 1 0.125756 0.184503 0.560086
579 1 0.0662357 0.244243 0.564509
584 1 0.121382 0.312841 0.557252
674 1 0.0620141 0.177977 0.614357
706 1 0.0681323 0.310433 0.619145
709 1 0.140406 0.248225 0.624808
556 1 0.261466 0.189955 0.562979
583 1 0.187126 0.261111 0.563531
587 1 0.312555 0.255041 0.55916
588 1 0.249996 0.314025 0.556857
678 1 0.196887 0.191328 0.620684
682 1 0.314388 0.189736 0.627777
710 1 0.194705 0.312117 0.631366
713 1 0.25683 0.254175 0.620839
714 1 0.305548 0.314063 0.625123
724 1 0.495974 0.322321 0.674225
1572 1 0.000895679 0.684597 0.561054
560 1 0.372868 0.188152 0.564441
591 1 0.430643 0.255017 0.559177
592 1 0.368447 0.316962 0.568197
686 1 0.429759 0.188756 0.627803
717 1 0.372551 0.25233 0.623663
718 1 0.437295 0.317373 0.617352
1009 1 0.496294 0.374879 0.881047
550 1 0.188155 0.193892 0.502455
1538 1 0.0607017 0.563915 0.500091
868 1 0.00565354 0.436079 0.81449
611 1 0.063534 0.374628 0.56201
616 1 0.12004 0.436953 0.558338
738 1 0.0600086 0.43033 0.633245
741 1 0.118688 0.374596 0.624547
1669 1 0.113159 0.494634 0.630159
1539 1 0.0634962 0.499918 0.561149
1921 1 0.0046337 0.498147 0.878281
676 1 0.00159967 0.183543 0.681211
1578 1 0.305696 0.690452 0.503122
546 1 0.0597136 0.18 0.500204
996 1 0.00369753 0.436778 0.932029
615 1 0.187123 0.369846 0.566851
619 1 0.312393 0.379194 0.561566
620 1 0.24622 0.433817 0.560487
742 1 0.178837 0.434541 0.62467
745 1 0.250381 0.384363 0.623945
746 1 0.316182 0.437941 0.622084
1543 1 0.185476 0.49757 0.565457
1673 1 0.244296 0.505328 0.623166
1547 1 0.302959 0.496765 0.560659
1645 1 0.377799 0.880364 0.505803
623 1 0.44327 0.386276 0.568742
624 1 0.371226 0.435795 0.554118
749 1 0.372895 0.369959 0.628842
750 1 0.435864 0.44786 0.624457
1544 1 0.121498 0.559187 0.564655
1571 1 0.0555704 0.627255 0.567327
1666 1 0.0569678 0.560015 0.628323
1701 1 0.124771 0.623469 0.62751
772 1 0.00463161 0.0607173 0.809457
721 1 0.49771 0.243133 0.620551
900 1 0.00721966 0.0626131 0.930153
1548 1 0.240717 0.560404 0.559699
1575 1 0.178794 0.6195 0.561515
1579 1 0.308565 0.622254 0.563351
1670 1 0.186072 0.566805 0.620655
1674 1 0.306547 0.56086 0.619277
1705 1 0.249667 0.624446 0.625393
553 1 0.259707 0.122933 0.512839
549 1 0.120082 0.123357 0.503028
737 1 0.000978936 0.380909 0.618379
514 1 0.0656852 0.0520956 0.502349
1677 1 0.372813 0.494982 0.620175
1552 1 0.368958 0.560934 0.56449
1583 1 0.43675 0.618666 0.565969
1678 1 0.437727 0.555952 0.6276
1709 1 0.372471 0.620446 0.629888
1551 1 0.435536 0.498853 0.564738
1908 1 0.495978 0.935337 0.819276
1764 1 6.6931e-05 0.939612 0.686437
1576 1 0.122974 0.685621 0.560806
1603 1 0.0579212 0.742935 0.567844
1608 1 0.12507 0.812454 0.56416
1698 1 0.0612873 0.690436 0.632028
1730 1 0.0618984 0.812341 0.625845
1733 1 0.125521 0.750705 0.625781
1574 1 0.189512 0.692786 0.50063
1825 1 0.00811385 0.629902 0.753802
961 1 0.00318251 0.246248 0.87482
1580 1 0.250094 0.684343 0.56448
1607 1 0.1916 0.757647 0.55882
1611 1 0.310407 0.750806 0.560008
1612 1 0.247829 0.813418 0.563137
1702 1 0.184033 0.6848 0.624215
1706 1 0.319238 0.688645 0.628038
1734 1 0.192087 0.809237 0.630585
1737 1 0.246712 0.753833 0.621226
1738 1 0.311211 0.80392 0.631476
617 1 0.245294 0.375661 0.504116
1584 1 0.371437 0.680195 0.553421
1615 1 0.439301 0.761097 0.557889
1616 1 0.370153 0.817635 0.564086
1710 1 0.431948 0.689192 0.616306
1741 1 0.376905 0.75202 0.624635
1742 1 0.438886 0.81199 0.620665
1857 1 0.00282567 0.753079 0.7539
645 1 0.119408 0.997317 0.623684
641 1 0.00166836 0.999928 0.624647
1635 1 0.0550548 0.875732 0.562756
1640 1 0.128935 0.941279 0.568341
1762 1 0.0618252 0.933879 0.615468
1765 1 0.124163 0.873649 0.621027
801 1 0.00354311 0.123787 0.750813
1646 1 0.437214 0.934803 0.506211
649 1 0.242489 0.999093 0.627428
1639 1 0.192768 0.869984 0.566475
1643 1 0.311807 0.873912 0.559471
1644 1 0.256832 0.935848 0.563105
1766 1 0.1825 0.935753 0.632933
1769 1 0.254507 0.870692 0.628596
1770 1 0.303111 0.937436 0.631694
523 1 0.313027 -0.0019627 0.561195
519 1 0.191205 0.000725099 0.567202
980 1 0.493824 0.303172 0.930255
1537 1 0.00950806 0.495517 0.50263
653 1 0.378709 0.00594827 0.625278
1647 1 0.434831 0.878177 0.563832
1648 1 0.366497 0.931729 0.571687
1773 1 0.37547 0.875979 0.628512
1774 1 0.44011 0.942897 0.626221
527 1 0.440169 0.999582 0.560544
865 1 0.0047214 0.377467 0.758197
1134 1 0.447396 0.931496 0.996293
1550 1 0.433345 0.561431 0.5024
34 1 0.0675455 0.190976 1.00125
648 1 0.116547 0.0598784 0.683535
675 1 0.0568727 0.11985 0.690038
770 1 0.0645233 0.0588351 0.752494
776 1 0.125737 0.0608684 0.811774
803 1 0.071654 0.117898 0.813605
805 1 0.129137 0.129491 0.743254
771 1 0.0700643 0.0027744 0.812916
773 1 0.12465 1.00031 0.751681
779 1 0.313743 1.00123 0.815745
651 1 0.314962 0.00249789 0.687027
652 1 0.243082 0.0573783 0.687914
679 1 0.191138 0.125959 0.69081
683 1 0.317307 0.126484 0.684458
774 1 0.187008 0.0659859 0.751048
778 1 0.306068 0.0618246 0.746868
780 1 0.250068 0.0565402 0.815691
807 1 0.182878 0.130117 0.81571
809 1 0.251606 0.121516 0.747043
811 1 0.319848 0.123701 0.811782
775 1 0.188847 0.993296 0.814711
783 1 0.429906 0.000709371 0.81743
656 1 0.380338 0.0619117 0.684897
687 1 0.435917 0.126621 0.685323
782 1 0.435224 0.0706723 0.755222
784 1 0.373023 0.0646255 0.81694
813 1 0.370784 0.119425 0.749504
815 1 0.433092 0.122738 0.808614
781 1 0.383889 0.00317311 0.746333
655 1 0.447778 0.00810396 0.688007
581 1 0.125785 0.246435 0.500641
804 1 0.0076123 0.188736 0.817547
911 1 0.441374 0.00246793 0.937401
2030 1 0.437202 0.943014 0.875599
707 1 0.0641306 0.238988 0.679269
680 1 0.123622 0.180398 0.683686
712 1 0.12909 0.312038 0.680206
802 1 0.0629758 0.181651 0.745897
808 1 0.124094 0.189873 0.814975
834 1 0.0610345 0.30899 0.750232
835 1 0.063202 0.249145 0.815094
837 1 0.119211 0.243629 0.740098
840 1 0.126749 0.308894 0.808725
564 1 0.499446 0.186011 0.551921
2032 1 0.379015 0.941006 0.938169
684 1 0.254853 0.18406 0.690585
711 1 0.197638 0.247374 0.684059
715 1 0.314591 0.258188 0.687201
716 1 0.25237 0.312638 0.691024
806 1 0.187792 0.188173 0.748032
810 1 0.321504 0.186685 0.749211
812 1 0.247732 0.176877 0.806564
838 1 0.184097 0.310716 0.749773
839 1 0.188375 0.245412 0.805599
841 1 0.254271 0.239964 0.752227
842 1 0.307519 0.31069 0.750891
843 1 0.309901 0.240243 0.810733
844 1 0.247644 0.307628 0.804847
688 1 0.374157 0.190047 0.692102
719 1 0.433756 0.259498 0.687906
720 1 0.372325 0.315448 0.68716
814 1 0.436329 0.19087 0.755477
816 1 0.371407 0.185886 0.818964
845 1 0.376591 0.249605 0.748061
846 1 0.437673 0.311583 0.749885
847 1 0.442259 0.251933 0.816121
848 1 0.3714 0.304369 0.812639
590 1 0.438631 0.319616 0.510579
618 1 0.311086 0.439224 0.502007
78 1 0.437202 0.311666 1.00058
2029 1 0.379685 0.884449 0.883557
739 1 0.0640574 0.374845 0.69516
744 1 0.128467 0.432131 0.687804
866 1 0.0649707 0.432278 0.755411
867 1 0.0678166 0.375462 0.821345
869 1 0.127227 0.372419 0.748502
872 1 0.133104 0.433184 0.799569
1797 1 0.133584 0.500497 0.749149
1667 1 0.0568912 0.500691 0.690266
98 1 0.065206 0.434275 0.996587
964 1 0.00378105 0.302603 0.937252
1675 1 0.314118 0.49946 0.685983
743 1 0.19346 0.371395 0.684723
747 1 0.309095 0.375768 0.689216
748 1 0.24564 0.44284 0.686681
870 1 0.194989 0.439275 0.749343
871 1 0.188602 0.375193 0.816463
873 1 0.245679 0.375565 0.748217
874 1 0.305884 0.427694 0.747631
875 1 0.311753 0.368978 0.818735
876 1 0.251125 0.427429 0.813937
1801 1 0.253797 0.500244 0.746659
1799 1 0.193897 0.499019 0.812666
1671 1 0.180967 0.504845 0.687068
1679 1 0.435693 0.500465 0.691401
751 1 0.431781 0.383303 0.684382
752 1 0.37105 0.440607 0.684911
877 1 0.380704 0.373856 0.754986
878 1 0.440671 0.437499 0.751886
879 1 0.438944 0.37113 0.811147
880 1 0.372176 0.4324 0.811241
1805 1 0.36823 0.49913 0.751888
2031 1 0.447847 0.871554 0.933546
1038 1 0.434098 0.562651 0.992221
993 1 0.00828726 0.366921 0.878414
1795 1 0.0691032 0.504531 0.813812
1672 1 0.124217 0.564 0.683708
1699 1 0.0603867 0.631087 0.692862
1794 1 0.0620646 0.568791 0.755161
1800 1 0.133602 0.561014 0.814513
1827 1 0.0664889 0.627562 0.811994
1829 1 0.123181 0.629518 0.749462
2022 1 0.189831 0.932854 0.875866
1803 1 0.312872 0.48881 0.808454
1676 1 0.252927 0.558617 0.690042
1703 1 0.186135 0.628887 0.690239
1707 1 0.315171 0.620854 0.688865
1798 1 0.185536 0.566677 0.741179
1802 1 0.316038 0.572066 0.753603
1804 1 0.24885 0.564104 0.807577
1831 1 0.188954 0.625209 0.808746
1833 1 0.252799 0.629424 0.747946
1835 1 0.312511 0.627236 0.816508
903 1 0.184344 0.991417 0.93458
1745 1 0.500926 0.750956 0.626686
1680 1 0.37395 0.556694 0.679858
1711 1 0.440257 0.622482 0.686276
1806 1 0.442031 0.562472 0.755932
1808 1 0.381153 0.566413 0.813881
1837 1 0.377088 0.623501 0.74375
1839 1 0.440929 0.629738 0.815406
1807 1 0.43775 0.494585 0.815903
692 1 0.494208 0.188255 0.693631
532 1 0.49922 0.0610442 0.55908
881 1 0.497793 0.367158 0.743359
1704 1 0.12216 0.6869 0.685803
1731 1 0.0570404 0.750196 0.686905
1736 1 0.117821 0.809091 0.687231
1826 1 0.0611927 0.692667 0.751992
1832 1 0.123932 0.692888 0.821267
1858 1 0.0607171 0.809852 0.756956
1859 1 0.0581403 0.743631 0.818032
1861 1 0.12014 0.746408 0.748469
1864 1 0.124062 0.811979 0.814871
2025 1 0.253392 0.878875 0.871554
1708 1 0.262075 0.697042 0.683685
1735 1 0.187777 0.743714 0.683248
1739 1 0.31879 0.750001 0.70369
1740 1 0.256229 0.807109 0.692021
1830 1 0.19295 0.694617 0.748977
1834 1 0.312728 0.683388 0.755273
1836 1 0.24233 0.685818 0.8178
1862 1 0.189305 0.816272 0.752604
1863 1 0.182327 0.750009 0.810914
1865 1 0.251483 0.750287 0.752238
1866 1 0.32285 0.813658 0.752798
1867 1 0.311549 0.742612 0.809017
1868 1 0.257581 0.815315 0.811184
833 1 0.00892262 0.243458 0.7421
2027 1 0.30894 0.878655 0.940086
2023 1 0.189442 0.869709 0.933381
2028 1 0.246204 0.934215 0.939124
1712 1 0.374155 0.685275 0.688071
1743 1 0.438579 0.746316 0.693177
1744 1 0.385444 0.804752 0.689834
1838 1 0.436777 0.688612 0.75661
1840 1 0.37977 0.683278 0.820061
1869 1 0.379613 0.74762 0.755116
1870 1 0.43286 0.817583 0.752113
1871 1 0.440007 0.754202 0.807917
1872 1 0.379633 0.823995 0.816817
1940 1 0.496594 0.556306 0.937749
643 1 0.0565139 0.00225613 0.686257
1763 1 0.0588473 0.871526 0.687968
1768 1 0.113957 0.933395 0.687317
1890 1 0.0598387 0.932314 0.749068
1891 1 0.0609171 0.87529 0.822461
1893 1 0.123003 0.873248 0.743391
1896 1 0.129659 0.931651 0.810177
2026 1 0.311238 0.934234 0.879467
74 1 0.308314 0.304635 0.992235
647 1 0.181802 0.000674345 0.690939
1767 1 0.191159 0.875309 0.689749
1771 1 0.312238 0.863657 0.687832
1772 1 0.252416 0.935642 0.695503
1894 1 0.1871 0.939005 0.745126
1895 1 0.190711 0.870736 0.813751
1897 1 0.256546 0.875562 0.751008
1898 1 0.31042 0.944871 0.755496
1899 1 0.317958 0.879314 0.813054
1900 1 0.246814 0.936145 0.810343
777 1 0.242614 0.00224255 0.747314
1775 1 0.440827 0.874362 0.68646
1776 1 0.370061 0.942519 0.688233
1901 1 0.374552 0.882058 0.752214
1902 1 0.441886 0.937939 0.739212
1903 1 0.43976 0.878871 0.817677
1904 1 0.375768 0.933629 0.815701
516 1 0.00320962 0.063201 0.559979
1588 1 0.500262 0.692164 0.569933
5 1 0.13009 0.99786 0.999911
901 1 0.125189 0.994827 0.873792
898 1 0.0672325 0.0543554 0.872875
904 1 0.126996 0.0555146 0.940502
931 1 0.0566347 0.125311 0.945114
933 1 0.123492 0.121388 0.874
1093 1 0.119743 0.749582 0.992779
905 1 0.251027 0.99351 0.869856
907 1 0.31584 0.996273 0.942216
902 1 0.189349 0.063715 0.87876
906 1 0.309261 0.0534238 0.87543
908 1 0.248229 0.048832 0.934397
935 1 0.180695 0.117459 0.942499
937 1 0.247877 0.118378 0.875666
939 1 0.312406 0.116856 0.938733
2018 1 0.0631743 0.944125 0.864334
929 1 0.00418941 0.125731 0.870657
1638 1 0.192963 0.938146 0.502185
10 1 0.318405 0.0595571 0.997157
909 1 0.371898 0.00089451 0.883736
910 1 0.436305 0.0644349 0.880276
912 1 0.383037 0.0611406 0.943255
941 1 0.371964 0.113102 0.879893
943 1 0.442177 0.132466 0.939216
1892 1 0.00299163 0.93346 0.815288
1997 1 0.373818 0.750485 0.865998
1828 1 0.00207346 0.686141 0.81678
2019 1 0.0583476 0.879385 0.941495
930 1 0.0669812 0.184538 0.872989
936 1 0.126926 0.176366 0.938311
962 1 0.0653878 0.306603 0.879292
963 1 0.0575519 0.242209 0.937223
965 1 0.123149 0.245508 0.880006
968 1 0.128388 0.310608 0.938263
1968 1 0.381883 0.692195 0.949469
934 1 0.188277 0.180035 0.885317
938 1 0.297794 0.185455 0.876444
940 1 0.244703 0.183863 0.943054
966 1 0.194461 0.309478 0.870023
967 1 0.183754 0.253593 0.93014
969 1 0.246167 0.241546 0.86537
970 1 0.304164 0.306054 0.876946
971 1 0.309931 0.240662 0.935127
972 1 0.249594 0.306122 0.934473
1966 1 0.444902 0.691935 0.873199
1540 1 0.00323066 0.554071 0.565957
973 1 0.373015 0.253324 0.873415
944 1 0.372624 0.185809 0.925027
942 1 0.437274 0.185398 0.86953
976 1 0.376103 0.310908 0.931693
975 1 0.429569 0.244652 0.937537
974 1 0.439273 0.312954 0.874881
1998 1 0.444848 0.811929 0.870726
705 1 0.00870281 0.245961 0.623818
899 1 0.0663287 0.986788 0.934698
884 1 0.499006 0.431028 0.810354
1602 1 0.0616781 0.809763 0.502278
1923 1 0.0638624 0.495706 0.932282
1000 1 0.132043 0.440013 0.931247
997 1 0.130011 0.379822 0.876214
994 1 0.0723289 0.441156 0.867021
995 1 0.0645517 0.369805 0.93911
1925 1 0.131995 0.501484 0.876209
820 1 0.49381 0.188819 0.810436
1927 1 0.191929 0.500085 0.938263
999 1 0.186432 0.374011 0.937754
1001 1 0.249555 0.374864 0.876543
1004 1 0.25257 0.430408 0.9389
1002 1 0.314061 0.42734 0.877613
998 1 0.191134 0.432669 0.876691
1003 1 0.314038 0.366949 0.9347
1841 1 0.494792 0.619205 0.743736
849 1 0.494712 0.253452 0.749065
1090 1 0.0602918 0.811138 0.994756
2024 1 0.123515 0.926099 0.937113
1935 1 0.43546 0.490243 0.944624
1008 1 0.371993 0.434023 0.936911
1006 1 0.44188 0.436038 0.870797
1005 1 0.37795 0.367155 0.872954
1007 1 0.441772 0.367777 0.944044
1999 1 0.439182 0.752018 0.931954
580 1 0.00567911 0.308421 0.567642
577 1 0.00438244 0.251756 0.500306
1860 1 0.000981196 0.808374 0.821249
1994 1 0.320238 0.817529 0.876205
106 1 0.311696 0.430368 1.00063
1922 1 0.0556723 0.563408 0.871766
1928 1 0.126639 0.564106 0.929915
1955 1 0.0610327 0.621709 0.933466
1957 1 0.125713 0.623407 0.86994
756 1 0.496635 0.441678 0.689027
1929 1 0.246224 0.503045 0.868108
1931 1 0.305895 0.497996 0.946627
1930 1 0.30539 0.560935 0.877268
1926 1 0.188316 0.562731 0.868105
1963 1 0.306504 0.620143 0.935507
1961 1 0.24551 0.625988 0.873111
1932 1 0.252502 0.561237 0.941662
1959 1 0.186845 0.623698 0.937215
1793 1 0.00603649 0.500631 0.752672
70 1 0.184421 0.309154 0.994969
1933 1 0.374209 0.489546 0.872417
1965 1 0.38074 0.630962 0.888939
1934 1 0.435296 0.556737 0.876158
1936 1 0.373424 0.552228 0.936859
1967 1 0.438973 0.631372 0.93651
1012 1 0.495833 0.435023 0.94724
1986 1 0.0596201 0.815035 0.880908
1987 1 0.0638493 0.752185 0.934872
1954 1 0.0612761 0.68173 0.880515
1992 1 0.121604 0.814064 0.941098
1989 1 0.123738 0.758159 0.877563
1960 1 0.116129 0.682081 0.940457
2021 1 0.122793 0.873977 0.874786
614 1 0.187507 0.435709 0.502568
2000 1 0.380814 0.81824 0.935294
2017 1 0.00050907 0.877513 0.883059
1057 1 0.00377007 0.626486 0.994926
852 1 0.496837 0.313591 0.810744
1995 1 0.31535 0.746847 0.93953
1996 1 0.250359 0.812572 0.936142
1991 1 0.193425 0.750707 0.936571
1958 1 0.185951 0.684047 0.874964
1964 1 0.248181 0.689901 0.936541
1993 1 0.263014 0.756456 0.870635
1962 1 0.313744 0.693583 0.878277
1990 1 0.190851 0.80822 0.870121
1101 1 0.384559 0.758294 0.997368
513 1 -0.00111045 0.992988 0.499574
1066 1 0.307024 0.685609 0.993024
1062 1 0.177084 0.691159 0.99251
1098 1 0.323614 0.815032 0.997971
1094 1 0.181971 0.811653 0.997905
1037 1 0.376163 0.493249 0.99865
1641 1 0.250735 0.873304 0.501111
69 1 0.130265 0.246058 0.994491
621 1 0.371709 0.370175 0.50187
521 1 0.251707 0.995653 0.504092
73 1 0.242055 0.246469 0.996925
1034 1 0.311827 0.559331 0.999834
1029 1 0.123917 0.49861 0.991562
526 1 0.439939 0.0631188 0.503977
561 1 0.495782 0.122957 0.504919
1133 1 0.387315 0.877543 0.995346
1549 1 0.366773 0.501339 0.504379
622 1 0.434394 0.443043 0.49994
554 1 0.314208 0.195529 0.504445
609 1 0.0111024 0.368201 0.501021
518 1 0.192639 0.0548292 0.502502
1582 1 0.439472 0.690157 0.503075
24 1 0.627622 0.0681381 0.0563435
51 1 0.556684 0.122808 0.0719578
146 1 0.562692 0.0621769 0.12826
181 1 0.619821 0.121244 0.130893
118 1 0.683063 0.434645 0.00136704
50 1 0.565869 0.188932 0.000369547
1444 1 0.999922 0.679112 0.434039
28 1 0.747135 0.0601459 0.0643591
55 1 0.685429 0.125421 0.0608751
59 1 0.814435 0.123674 0.0569612
150 1 0.69328 0.0638606 0.127271
154 1 0.807178 0.057109 0.124243
185 1 0.748654 0.124846 0.121325
27 1 0.806144 0.000962202 0.0660125
52 1 0.503253 0.184291 0.0717217
23 1 0.684138 0.00155861 0.0644732
153 1 0.742802 -0.000387054 0.123203
53 1 0.613956 0.12198 -0.000616362
32 1 0.873272 0.0645786 0.0595555
63 1 0.932729 0.127102 0.0582275
158 1 0.940095 0.0663564 0.129047
189 1 0.86569 0.121313 0.123415
157 1 0.875451 0.00233995 0.126775
1076 1 0.513593 0.677562 0.0666785
1492 1 0.502483 0.815347 0.431075
56 1 0.628488 0.194044 0.0648261
83 1 0.559217 0.253047 0.0624927
88 1 0.632629 0.311448 0.0638155
178 1 0.565348 0.193292 0.120259
210 1 0.559072 0.317427 0.125689
213 1 0.630818 0.256417 0.125622
1108 1 0.503021 0.81179 0.0657219
1505 1 0.99877 0.863936 0.374087
60 1 0.751716 0.184113 0.0626611
87 1 0.693754 0.244416 0.0526849
91 1 0.805937 0.244003 0.0655797
92 1 0.743446 0.322585 0.0721552
182 1 0.691 0.185323 0.125193
186 1 0.8059 0.184271 0.126934
214 1 0.681191 0.319503 0.128251
217 1 0.747773 0.253701 0.122106
218 1 0.814723 0.309506 0.122103
1662 1 0.938598 0.934402 0.494125
36 1 1.00051 0.19315 0.0626937
629 1 0.628281 0.377812 0.486847
64 1 0.872042 0.18656 0.0630743
95 1 0.937006 0.247127 0.0597483
96 1 0.879039 0.307676 0.0581863
190 1 0.933497 0.179476 0.127762
221 1 0.875028 0.242717 0.124201
222 1 0.938525 0.311672 0.116911
413 1 0.87658 0.997155 0.380084
122 1 0.803945 0.438684 0.00733658
545 1 0.998496 0.114513 0.499821
115 1 0.560401 0.382597 0.0670006
120 1 0.621705 0.444464 0.0608983
242 1 0.554523 0.440111 0.128154
245 1 0.623103 0.381694 0.131225
1043 1 0.562552 0.497915 0.0677658
1173 1 0.624313 0.499334 0.125097
193 1 0.996762 0.239512 0.124821
1533 1 0.878808 0.870947 0.378816
1249 1 0.993139 0.874425 0.118636
119 1 0.670992 0.379019 0.0707094
123 1 0.810473 0.364243 0.0595529
124 1 0.744981 0.428813 0.0655492
246 1 0.683146 0.439058 0.126618
249 1 0.73748 0.377042 0.135651
250 1 0.810952 0.435808 0.123628
1047 1 0.689235 0.497759 0.0679115
1177 1 0.747087 0.505134 0.126938
1051 1 0.80736 0.501653 0.0614774
353 1 0.998888 0.369917 0.246729
127 1 0.943308 0.375907 0.0548147
128 1 0.874516 0.448557 0.0554534
253 1 0.876129 0.376564 0.11286
254 1 0.943215 0.446076 0.120437
1534 1 0.938574 0.934341 0.376782
225 1 0.988405 0.374653 0.12985
372 1 0.504385 0.4323 0.310591
209 1 0.498402 0.248649 0.126666
1106 1 0.560992 0.813903 0.00410081
1048 1 0.624031 0.565101 0.0622968
1075 1 0.559313 0.610145 0.0586557
1170 1 0.562662 0.561916 0.128661
1205 1 0.621858 0.626538 0.121899
433 1 0.499957 0.126738 0.379454
1052 1 0.751161 0.568277 0.0626529
1079 1 0.682847 0.622383 0.0565735
1083 1 0.817879 0.623134 0.0626916
1174 1 0.686277 0.560271 0.124873
1178 1 0.816478 0.561092 0.126324
1209 1 0.749949 0.629513 0.118241
1028 1 0.991651 0.569455 0.0544827
1657 1 0.750042 0.873808 0.502361
82 1 0.559811 0.313526 0.000718582
1181 1 0.875383 0.500481 0.123989
1055 1 0.949661 0.504588 0.0634763
1056 1 0.876186 0.556704 0.060739
1087 1 0.938404 0.633458 0.0645937
1182 1 0.935315 0.55791 0.127215
1213 1 0.879789 0.62722 0.123789
1145 1 0.742114 0.880996 0.00332669
468 1 0.505221 0.318072 0.437839
1535 1 0.932683 0.865063 0.441539
1080 1 0.624308 0.686025 0.0557501
1107 1 0.567379 0.749328 0.0614127
1112 1 0.622178 0.817425 0.061743
1202 1 0.570531 0.688386 0.128178
1234 1 0.561717 0.816755 0.127123
1237 1 0.629551 0.755391 0.115015
481 1 0.995874 0.371001 0.376337
1536 1 0.872266 0.931405 0.442544
415 1 0.937717 0.996673 0.431473
100 1 0.999278 0.436513 0.0588353
212 1 0.500575 0.31694 0.195393
1084 1 0.746268 0.687713 0.0552974
1111 1 0.68869 0.752282 0.0605918
1115 1 0.810489 0.74919 0.060539
1116 1 0.752007 0.81278 0.060841
1206 1 0.680605 0.682017 0.115746
1210 1 0.813017 0.681128 0.122183
1238 1 0.693719 0.818025 0.122206
1241 1 0.745442 0.748444 0.130416
1242 1 0.812826 0.818507 0.117566
116 1 0.49877 0.437542 0.0605507
1088 1 0.869059 0.689588 0.0545749
1119 1 0.933309 0.749745 0.060944
1120 1 0.874456 0.818791 0.0605453
1214 1 0.939725 0.688928 0.126272
1245 1 0.872211 0.74697 0.122224
1246 1 0.935989 0.808178 0.122087
1060 1 0.996727 0.693281 0.0525292
19 1 0.55275 0.0101306 0.0630798
1139 1 0.560029 0.881001 0.0687701
1144 1 0.617333 0.942702 0.0612847
1266 1 0.556511 0.94669 0.125702
1269 1 0.627043 0.883457 0.129539
149 1 0.634386 1.00147 0.12928
465 1 0.500919 0.252458 0.372383
228 1 0.998434 0.432122 0.197799
436 1 0.500583 0.183218 0.447371
196 1 0.992803 0.304442 0.181699
1143 1 0.682637 0.883047 0.061056
1147 1 0.811009 0.884205 0.065622
1148 1 0.746187 0.944005 0.05555
1270 1 0.684868 0.937759 0.122822
1273 1 0.751239 0.880449 0.132295
1274 1 0.79847 0.941056 0.128616
1316 1 0.994273 0.69129 0.320265
31 1 0.934449 0.00437402 0.0627595
1151 1 0.934937 0.870965 0.0654663
1152 1 0.869443 0.942929 0.0693372
1277 1 0.878287 0.875461 0.124912
1278 1 0.940588 0.944914 0.127483
114 1 0.558883 0.434914 0.0010107
1558 1 0.689322 0.555443 0.498849
601 1 0.746249 0.252076 0.496962
1233 1 0.505742 0.740445 0.124161
1313 1 0.996165 0.627529 0.255813
152 1 0.626484 0.0630667 0.194179
179 1 0.563489 0.127164 0.198012
274 1 0.565429 0.0643482 0.257875
280 1 0.630942 0.065476 0.307979
307 1 0.565529 0.132293 0.304886
309 1 0.63315 0.123582 0.249564
1364 1 0.504059 0.813027 0.307527
147 1 0.555984 0.00725905 0.195284
630 1 0.694234 0.438401 0.496963
156 1 0.754513 0.0622323 0.193318
183 1 0.68612 0.13025 0.186694
187 1 0.809399 0.123658 0.187009
278 1 0.690218 0.0582849 0.257459
282 1 0.812155 0.0627025 0.248929
284 1 0.761664 0.0555587 0.312345
311 1 0.697675 0.123637 0.317849
313 1 0.74273 0.126041 0.245127
315 1 0.82381 0.119779 0.30889
155 1 0.81597 0.00263902 0.185479
308 1 0.503776 0.197016 0.314904
285 1 0.881557 0.00834046 0.252766
160 1 0.881313 0.0659471 0.193293
191 1 0.939526 0.120816 0.191551
286 1 0.944753 0.0650458 0.259256
288 1 0.88044 0.0611024 0.314166
317 1 0.881252 0.124408 0.247687
319 1 0.939071 0.119643 0.324433
159 1 0.937633 0.0083299 0.189404
287 1 0.933704 0.992222 0.316093
452 1 0.992346 0.30607 0.43644
570 1 0.814709 0.18761 0.499496
184 1 0.626934 0.187032 0.190966
211 1 0.562464 0.259126 0.18786
216 1 0.62188 0.312552 0.184148
306 1 0.569654 0.195814 0.246913
312 1 0.625228 0.190664 0.319857
338 1 0.565632 0.312346 0.250447
339 1 0.567398 0.254929 0.310737
341 1 0.625318 0.257232 0.253806
344 1 0.623279 0.316028 0.313509
385 1 0.997718 0.995455 0.373582
530 1 0.557246 0.0684699 0.491089
18 1 0.554409 0.0671832 0.00408847
188 1 0.751304 0.193126 0.187429
215 1 0.683506 0.251997 0.191041
219 1 0.820016 0.248164 0.182245
220 1 0.749181 0.303162 0.188361
310 1 0.686137 0.191128 0.256188
314 1 0.813844 0.177502 0.245413
316 1 0.748284 0.188544 0.313912
342 1 0.687583 0.311241 0.251757
343 1 0.680182 0.255586 0.321426
345 1 0.755442 0.243366 0.252325
346 1 0.815133 0.307778 0.253894
347 1 0.820727 0.244767 0.308983
348 1 0.746559 0.319191 0.309744
1618 1 0.561285 0.817569 0.495437
192 1 0.877469 0.181368 0.190275
223 1 0.934246 0.244411 0.186753
224 1 0.872018 0.308603 0.18613
318 1 0.9394 0.186193 0.250219
320 1 0.87913 0.183182 0.316999
349 1 0.877102 0.238435 0.248328
350 1 0.936934 0.308952 0.254086
351 1 0.942687 0.25002 0.318492
352 1 0.870599 0.315907 0.314802
305 1 0.502572 0.133429 0.253235
1489 1 0.501893 0.753706 0.374142
409 1 0.758233 0.00112369 0.375024
1329 1 0.504533 0.62619 0.246758
257 1 0.99653 0.995826 0.257795
243 1 0.563174 0.379764 0.195606
248 1 0.630437 0.443618 0.198467
370 1 0.559585 0.439733 0.252842
371 1 0.560294 0.373608 0.311217
373 1 0.62619 0.370888 0.253472
376 1 0.616913 0.431847 0.30942
566 1 0.688771 0.189215 0.496954
1171 1 0.564382 0.497805 0.193388
247 1 0.682932 0.373476 0.193257
251 1 0.818902 0.372314 0.176036
252 1 0.754078 0.440855 0.181397
374 1 0.693089 0.442107 0.254025
375 1 0.686812 0.374413 0.309146
377 1 0.759912 0.381769 0.245039
378 1 0.808252 0.44446 0.24487
379 1 0.815339 0.3753 0.311457
380 1 0.756548 0.439794 0.316422
1305 1 0.755188 0.499786 0.253844
598 1 0.692712 0.312638 0.493429
255 1 0.935375 0.370622 0.191127
256 1 0.870663 0.438243 0.182734
381 1 0.872053 0.369099 0.245815
382 1 0.937375 0.430371 0.247471
383 1 0.944152 0.376243 0.306566
384 1 0.876851 0.442336 0.308733
1309 1 0.877006 0.505575 0.249492
1393 1 0.507827 0.873911 0.242326
1142 1 0.682405 0.945027 0.00108158
574 1 0.935063 0.188652 0.495939
1176 1 0.631011 0.567549 0.187001
1203 1 0.567149 0.626812 0.194492
1298 1 0.562627 0.561276 0.251354
1304 1 0.625921 0.55587 0.316676
1331 1 0.559545 0.617665 0.313587
1333 1 0.619897 0.617814 0.253183
1299 1 0.559845 0.500499 0.311961
1301 1 0.628191 0.501292 0.257872
1179 1 0.811374 0.505471 0.19271
1303 1 0.687729 0.495992 0.317438
1307 1 0.819782 0.504713 0.319875
1180 1 0.751908 0.561711 0.189884
1207 1 0.692464 0.630565 0.18482
1211 1 0.813915 0.614659 0.194679
1302 1 0.693435 0.557176 0.248354
1306 1 0.816816 0.553215 0.256882
1308 1 0.757504 0.563177 0.309617
1335 1 0.679746 0.621428 0.307915
1337 1 0.748154 0.620297 0.255568
1339 1 0.817704 0.62072 0.313516
1175 1 0.692781 0.50153 0.189121
407 1 0.682413 0.994889 0.436221
1183 1 0.936779 0.495204 0.18923
1311 1 0.939115 0.499058 0.317551
1184 1 0.878781 0.560293 0.181358
1215 1 0.933916 0.619457 0.191274
1310 1 0.935284 0.559003 0.250732
1312 1 0.880811 0.560108 0.318344
1341 1 0.878183 0.623694 0.255083
1343 1 0.940795 0.622926 0.314761
1476 1 0.991918 0.813258 0.439466
260 1 0.995085 0.0583321 0.321193
1208 1 0.634355 0.694451 0.188554
1235 1 0.56654 0.749248 0.188291
1240 1 0.634167 0.81784 0.183254
1330 1 0.563623 0.690159 0.255608
1336 1 0.616104 0.685487 0.31154
1362 1 0.565163 0.815468 0.247979
1363 1 0.558247 0.74518 0.315122
1365 1 0.623657 0.75086 0.256714
1368 1 0.626762 0.819604 0.31614
401 1 0.503762 0.00341344 0.370181
1212 1 0.754972 0.683377 0.188335
1239 1 0.692512 0.756682 0.194277
1243 1 0.815847 0.751122 0.184974
1244 1 0.751721 0.816956 0.19433
1334 1 0.690739 0.682939 0.251808
1338 1 0.822502 0.685765 0.253299
1340 1 0.756387 0.687908 0.314006
1366 1 0.689001 0.810598 0.254133
1367 1 0.685612 0.740133 0.318564
1369 1 0.755692 0.751966 0.247436
1370 1 0.808379 0.812196 0.260058
1371 1 0.810794 0.754499 0.320812
1372 1 0.746086 0.808172 0.319943
497 1 0.506718 0.372601 0.373456
1380 1 0.998063 0.931256 0.316885
1216 1 0.878039 0.690635 0.190423
1247 1 0.934958 0.746871 0.187504
1248 1 0.88073 0.81675 0.180875
1342 1 0.935916 0.688382 0.251981
1344 1 0.87205 0.691879 0.313117
1373 1 0.869855 0.753321 0.252059
1374 1 0.933815 0.806474 0.243076
1375 1 0.925871 0.748754 0.312155
1376 1 0.874342 0.816118 0.314207
1531 1 0.800069 0.882095 0.436705
1188 1 0.996314 0.690246 0.192862
275 1 0.564657 0.00403318 0.313741
1267 1 0.567365 0.878238 0.18565
1272 1 0.624333 0.941566 0.187328
1394 1 0.563633 0.939116 0.24589
1395 1 0.563209 0.879587 0.31509
1397 1 0.625072 0.881023 0.256576
1400 1 0.625356 0.934702 0.313615
277 1 0.62503 0.00445727 0.246262
1348 1 0.997612 0.813073 0.315547
369 1 0.502164 0.376419 0.245039
337 1 0.501905 0.250293 0.249251
283 1 0.821651 0.998091 0.316958
281 1 0.761533 0.997049 0.248765
151 1 0.691816 0.0051821 0.193925
1271 1 0.692619 0.875072 0.189874
1275 1 0.812192 0.879381 0.184461
1276 1 0.747869 0.943877 0.190787
1398 1 0.678858 0.939978 0.250921
1399 1 0.693628 0.876915 0.308361
1401 1 0.746459 0.884919 0.24779
1402 1 0.821903 0.942333 0.250886
1403 1 0.8155 0.873418 0.306844
1404 1 0.757067 0.938359 0.310353
279 1 0.695169 0.997064 0.312819
180 1 0.50068 0.186067 0.195612
1204 1 0.506375 0.68241 0.189067
1279 1 0.936272 0.875546 0.18211
1280 1 0.870772 0.941208 0.185156
1405 1 0.878088 0.879088 0.245155
1406 1 0.936501 0.934935 0.251951
1407 1 0.933142 0.87724 0.308966
1408 1 0.871977 0.934413 0.320082
1529 1 0.755164 0.878022 0.37015
403 1 0.563934 -0.00147318 0.438432
402 1 0.56557 0.0631387 0.377908
408 1 0.631185 0.0666954 0.439227
435 1 0.560039 0.130153 0.439721
437 1 0.619449 0.12401 0.370547
405 1 0.62361 0.00258448 0.366568
420 1 1.0006 0.188523 0.440329
406 1 0.686935 0.0667628 0.379802
410 1 0.817882 0.0660507 0.384344
412 1 0.755219 0.0630135 0.438527
439 1 0.689116 0.13181 0.432785
441 1 0.75267 0.128648 0.374246
443 1 0.814953 0.122553 0.447375
1532 1 0.755893 0.951467 0.43696
1561 1 0.753462 0.501769 0.495677
414 1 0.936914 0.0618531 0.385325
416 1 0.875155 0.0633061 0.439216
445 1 0.873332 0.12965 0.382321
447 1 0.937134 0.128593 0.434416
597 1 0.628514 0.251111 0.49811
1527 1 0.695249 0.883045 0.437503
1345 1 0.991752 0.753637 0.25144
467 1 0.562773 0.253219 0.435898
434 1 0.560642 0.195002 0.37835
469 1 0.622874 0.250042 0.379796
472 1 0.62859 0.312167 0.437169
466 1 0.558852 0.307656 0.371578
440 1 0.628325 0.182979 0.435138
569 1 0.750487 0.130131 0.499183
1300 1 0.500669 0.559454 0.306838
471 1 0.695334 0.254153 0.426677
444 1 0.750098 0.189023 0.430644
474 1 0.812284 0.312859 0.376303
442 1 0.812314 0.186096 0.370318
473 1 0.751581 0.257224 0.370798
476 1 0.754551 0.322331 0.436277
438 1 0.684047 0.187057 0.371974
470 1 0.69168 0.321567 0.372003
475 1 0.806459 0.247933 0.431306
1217 1 1.0013 0.747692 0.121284
1653 1 0.637469 0.878141 0.500739
1530 1 0.817159 0.936241 0.378073
1526 1 0.683188 0.938528 0.375696
1460 1 0.500646 0.686921 0.444153
1525 1 0.62642 0.874088 0.381726
594 1 0.567038 0.31663 0.494032
480 1 0.875359 0.304632 0.442115
478 1 0.932386 0.314368 0.373801
477 1 0.875142 0.24853 0.37133
479 1 0.933995 0.243304 0.438313
446 1 0.934892 0.187034 0.380366
448 1 0.872462 0.185946 0.443466
29 1 0.871998 0.998532 0.0017033
1522 1 0.560778 0.939556 0.370491
504 1 0.630263 0.436106 0.431834
498 1 0.560438 0.439064 0.372196
501 1 0.627952 0.371666 0.370331
499 1 0.562502 0.384678 0.434373
68 1 0.997271 0.310988 0.0565089
1156 1 0.99425 0.564543 0.185879
356 1 0.997174 0.43879 0.307571
1523 1 0.559661 0.884132 0.43509
1650 1 0.55997 0.930457 0.497021
502 1 0.691823 0.43736 0.375722
506 1 0.816028 0.435392 0.376541
505 1 0.755314 0.378451 0.369933
503 1 0.695635 0.376412 0.438173
508 1 0.748162 0.434223 0.431893
507 1 0.816746 0.375948 0.436102
1431 1 0.692462 0.504115 0.433061
1433 1 0.755932 0.501703 0.370053
1092 1 1 0.815333 0.0627421
512 1 0.874823 0.438321 0.433819
511 1 0.932123 0.372798 0.432118
510 1 0.938622 0.43911 0.375534
509 1 0.872735 0.375089 0.373121
1439 1 0.937341 0.497198 0.437601
1409 1 0.997047 0.501987 0.373321
1470 1 0.941189 0.683663 0.380853
1429 1 0.626796 0.499472 0.378523
1461 1 0.620751 0.620064 0.369016
1426 1 0.557128 0.556192 0.372117
1432 1 0.624185 0.560031 0.433499
1459 1 0.56215 0.624021 0.442178
1427 1 0.55865 0.497658 0.434248
1501 1 0.877964 0.743732 0.382746
1504 1 0.871305 0.801898 0.44496
1435 1 0.821832 0.503892 0.437302
1430 1 0.689798 0.564402 0.370803
1463 1 0.685878 0.625125 0.435033
1436 1 0.751306 0.562948 0.432784
1434 1 0.818731 0.56313 0.375379
1465 1 0.75698 0.621566 0.373582
1467 1 0.814375 0.62625 0.443659
1503 1 0.932095 0.739723 0.441745
1437 1 0.880035 0.499453 0.377479
1440 1 0.878337 0.56605 0.441919
1469 1 0.88743 0.620392 0.381141
1471 1 0.93955 0.617256 0.441738
1438 1 0.944016 0.561677 0.380803
1626 1 0.808341 0.804429 0.495426
177 1 0.503759 0.13189 0.132604
411 1 0.819386 0.998708 0.44039
1458 1 0.554138 0.676665 0.374637
1490 1 0.564346 0.814778 0.374335
1496 1 0.622818 0.810445 0.439118
1491 1 0.561619 0.747867 0.437116
1493 1 0.62001 0.756041 0.369445
1464 1 0.627064 0.683875 0.432015
1494 1 0.684663 0.806685 0.378815
1502 1 0.931836 0.805533 0.37337
1472 1 0.876101 0.679363 0.438305
1528 1 0.621669 0.935497 0.442366
1495 1 0.684944 0.738116 0.438519
1498 1 0.813862 0.814629 0.376972
1462 1 0.689243 0.678755 0.37101
1468 1 0.748955 0.685393 0.44263
1500 1 0.751261 0.810172 0.43258
1497 1 0.748322 0.741341 0.376848
1466 1 0.819746 0.679043 0.371652
1499 1 0.812895 0.737261 0.438532
484 1 0.991057 0.439154 0.441714
1297 1 0.500919 0.495322 0.25045
292 1 0.991114 0.183902 0.321386
1117 1 0.875449 0.755366 0.00234754
1622 1 0.688931 0.814043 0.496022
638 1 0.933993 0.43165 0.498995
1585 1 0.502135 0.620818 0.496375
1557 1 0.625039 0.498023 0.49224
1621 1 0.626655 0.744917 0.496511
529 1 0.506395 0.993105 0.493265
113 1 0.498406 0.377871 0.00611899
626 1 0.561591 0.435389 0.495432
533 1 0.627424 0.00511218 0.497481
534 1 0.693142 0.0717999 0.495337
1630 1 0.930074 0.807155 0.498983
1110 1 0.698215 0.813072 0.00058474
30 1 0.934165 0.0661533 0.0030658
1073 1 0.505135 0.612804 0.00187212
542 1 0.9401 0.0571921 0.495933
573 1 0.881147 0.123118 0.498567
26 1 0.816237 0.0595393 0.0048998
25 1 0.747324 0.00271601 0.00154262
1661 1 0.86023 0.870311 0.497183
1141 1 0.625925 0.876984 0.0060009
536 1 0.625936 0.0665007 0.559719
563 1 0.561363 0.131202 0.561483
658 1 0.5655 0.0632903 0.622549
693 1 0.623986 0.1254 0.621285
661 1 0.631129 0.993962 0.620751
1652 1 0.503811 0.936598 0.5686
927 1 0.93697 0.010106 0.9418
2045 1 0.881599 0.879269 0.873197
1597 1 0.870248 0.618111 0.504505
540 1 0.751828 0.0635681 0.562761
567 1 0.68968 0.136899 0.561133
571 1 0.81449 0.129093 0.554005
662 1 0.688228 0.07153 0.624238
666 1 0.807144 0.0614012 0.621988
697 1 0.750795 0.130761 0.614456
535 1 0.683349 0.00977208 0.555591
1937 1 0.505995 0.499087 0.870334
21 1 0.624015 0.0111931 0.998211
740 1 0.998751 0.434461 0.691248
544 1 0.877108 0.0690291 0.562355
575 1 0.942126 0.129901 0.555099
670 1 0.940854 0.0598247 0.620014
701 1 0.874201 0.126024 0.624188
669 1 0.875487 0.00880054 0.621415
1590 1 0.68914 0.679517 0.500556
568 1 0.620156 0.188117 0.565352
595 1 0.568464 0.255061 0.556291
600 1 0.627756 0.316894 0.557746
690 1 0.564057 0.193823 0.626957
722 1 0.553967 0.311176 0.624202
725 1 0.631112 0.249125 0.621056
1953 1 0.994028 0.624673 0.872984
1566 1 0.949599 0.557326 0.503925
1625 1 0.742712 0.750248 0.50457
1562 1 0.809516 0.557481 0.501654
548 1 0.99922 0.188925 0.558225
572 1 0.755965 0.195792 0.55402
599 1 0.689424 0.248384 0.559552
603 1 0.816383 0.252218 0.558494
604 1 0.753254 0.309808 0.561115
694 1 0.690993 0.193555 0.625549
698 1 0.814358 0.18573 0.615992
726 1 0.689068 0.309161 0.622094
729 1 0.754975 0.250138 0.61548
730 1 0.815948 0.309985 0.627632
1589 1 0.62842 0.617404 0.50155
576 1 0.881852 0.186545 0.561722
607 1 0.945641 0.248484 0.556964
608 1 0.889148 0.314412 0.565135
702 1 0.940253 0.182949 0.61343
733 1 0.879331 0.2537 0.622355
734 1 0.945246 0.312419 0.628755
945 1 0.499579 0.129707 0.877784
596 1 0.497797 0.31268 0.562914
593 1 0.50044 0.253193 0.502763
1555 1 0.564666 0.507585 0.56214
627 1 0.564275 0.373318 0.562022
632 1 0.625174 0.430721 0.55782
754 1 0.560621 0.442758 0.623824
757 1 0.627963 0.37357 0.622968
2020 1 0.995389 0.940582 0.936305
1658 1 0.808401 0.938281 0.506601
1924 1 0.998006 0.563148 0.940104
631 1 0.690289 0.369504 0.549885
635 1 0.818341 0.375928 0.574217
636 1 0.74662 0.433504 0.559092
758 1 0.678591 0.44188 0.624814
761 1 0.743436 0.377362 0.623271
762 1 0.809009 0.444173 0.623304
1563 1 0.819012 0.503155 0.565724
1689 1 0.753227 0.504163 0.626264
1559 1 0.687282 0.501295 0.561515
977 1 0.50425 0.246591 0.875752
639 1 0.945277 0.377774 0.561195
640 1 0.878796 0.437579 0.56649
765 1 0.876066 0.369869 0.638179
766 1 0.935584 0.431071 0.629022
1567 1 0.933928 0.495918 0.557292
1693 1 0.87972 0.50299 0.626964
1713 1 0.508305 0.61998 0.631209
121 1 0.75365 0.371292 0.998445
1685 1 0.625628 0.505327 0.62956
1560 1 0.628687 0.561181 0.561449
1587 1 0.561405 0.620866 0.559235
1682 1 0.566953 0.567409 0.630367
1717 1 0.620385 0.632704 0.639691
2046 1 0.938055 0.946312 0.881474
1716 1 0.504081 0.690316 0.692095
1564 1 0.754286 0.559158 0.562178
1591 1 0.689432 0.617791 0.56827
1595 1 0.810937 0.619302 0.567219
1686 1 0.688899 0.565965 0.629357
1690 1 0.819585 0.570386 0.631319
1721 1 0.748262 0.628764 0.628165
1876 1 0.499692 0.811771 0.805783
89 1 0.75086 0.24881 0.991812
1780 1 0.500945 0.933381 0.684805
1985 1 0.992301 0.743776 0.880159
1568 1 0.876737 0.565559 0.57043
1599 1 0.939195 0.619859 0.56377
1694 1 0.941819 0.558205 0.632022
1725 1 0.88014 0.625559 0.632252
753 1 0.509237 0.376557 0.619454
916 1 0.504249 0.0712712 0.937216
913 1 0.504602 0.0054716 0.872285
1592 1 0.626548 0.686409 0.560883
1619 1 0.553539 0.755075 0.559603
1624 1 0.622085 0.814803 0.56637
1714 1 0.564185 0.69591 0.626227
1746 1 0.56282 0.807878 0.623986
1749 1 0.628535 0.747332 0.63216
2047 1 0.938769 0.872503 0.940177
1137 1 0.509696 0.88142 1.00021
562 1 0.56596 0.191506 0.500419
1596 1 0.745201 0.683648 0.56377
1623 1 0.684943 0.748695 0.56193
1627 1 0.805496 0.752971 0.561531
1628 1 0.750711 0.812016 0.562939
1718 1 0.689558 0.692668 0.624208
1722 1 0.814592 0.690501 0.617633
1750 1 0.693739 0.810476 0.626418
1753 1 0.75203 0.754318 0.621587
1754 1 0.808673 0.816243 0.622396
65 1 0.995871 0.253523 0.997836
1600 1 0.880772 0.683651 0.568463
1631 1 0.93544 0.751606 0.562879
1632 1 0.871312 0.814846 0.557771
1726 1 0.934618 0.688676 0.628769
1757 1 0.873145 0.755847 0.622356
1758 1 0.936401 0.81593 0.616956
1629 1 0.870633 0.738304 0.500065
531 1 0.562984 0.00354613 0.55515
1651 1 0.563428 0.875236 0.556896
1656 1 0.621731 0.936207 0.559365
1778 1 0.563519 0.937632 0.627172
1781 1 0.623102 0.874411 0.623452
62 1 0.927724 0.191202 0.998293
1777 1 0.501732 0.870141 0.621568
539 1 0.811777 -6.838e-05 0.568121
1655 1 0.68798 0.870309 0.567674
1659 1 0.807495 0.881242 0.56946
1660 1 0.740551 0.952089 0.565365
1782 1 0.678988 0.933564 0.623863
1785 1 0.745742 0.881492 0.630429
1786 1 0.817153 0.940653 0.624509
665 1 0.740562 0.00621942 0.629171
94 1 0.934067 0.312428 0.998831
543 1 0.941352 0.997269 0.560527
1663 1 0.92657 0.878163 0.551752
1664 1 0.877368 0.936113 0.561235
1789 1 0.869443 0.878925 0.626859
1790 1 0.930409 0.934605 0.624598
61 1 0.866233 0.126404 0.994179
2004 1 0.509895 0.80944 0.926427
1684 1 0.500424 0.557604 0.68746
1844 1 0.507196 0.691418 0.804608
664 1 0.623048 0.0739922 0.690033
691 1 0.557817 0.129527 0.684036
786 1 0.565727 0.0686743 0.746483
792 1 0.630616 0.0612294 0.812678
819 1 0.567811 0.12914 0.812355
821 1 0.622453 0.128868 0.752212
787 1 0.561484 0.99123 0.80905
789 1 0.620942 0.00444563 0.746269
2048 1 0.873987 0.945581 0.93525
668 1 0.748761 0.0656692 0.684849
695 1 0.686518 0.127317 0.684963
699 1 0.803096 0.131419 0.682399
790 1 0.692993 0.0716656 0.7509
794 1 0.802803 0.0646308 0.743945
796 1 0.744778 0.0589769 0.8111
823 1 0.686242 0.130553 0.808182
825 1 0.74554 0.131507 0.753634
827 1 0.809459 0.123933 0.805822
791 1 0.677532 0.995988 0.809531
795 1 0.811444 0.995163 0.807976
1812 1 0.500506 0.562299 0.816273
2036 1 0.502681 0.942607 0.931813
1598 1 0.934131 0.685822 0.499963
799 1 0.937066 0.00370873 0.816175
672 1 0.880908 0.0695675 0.684007
703 1 0.93225 0.132629 0.685817
798 1 0.935024 0.068444 0.747799
800 1 0.872831 0.0588407 0.814172
829 1 0.880938 0.128897 0.748099
831 1 0.940024 0.126245 0.818468
671 1 0.935269 0.00203386 0.674556
797 1 0.870898 0.00646758 0.747284
1085 1 0.872692 0.622535 0.997587
1594 1 0.806226 0.686263 0.499236
696 1 0.62024 0.191045 0.68784
723 1 0.551457 0.255148 0.685576
728 1 0.618919 0.308834 0.680625
818 1 0.553534 0.189132 0.740041
824 1 0.625955 0.188677 0.811387
850 1 0.565336 0.30762 0.745328
851 1 0.558449 0.255676 0.810665
853 1 0.621047 0.244914 0.753631
856 1 0.622245 0.311033 0.811209
817 1 0.499236 0.126212 0.750469
700 1 0.750938 0.191715 0.684003
727 1 0.690994 0.255558 0.68569
731 1 0.809224 0.251703 0.684835
732 1 0.75161 0.309162 0.690231
822 1 0.682221 0.189088 0.739167
826 1 0.808529 0.190973 0.747885
828 1 0.74703 0.193526 0.813361
854 1 0.675714 0.312809 0.751662
855 1 0.685931 0.251469 0.805933
857 1 0.750379 0.251501 0.749887
858 1 0.818343 0.30655 0.747636
859 1 0.80384 0.25214 0.813585
860 1 0.745607 0.312886 0.808423
1121 1 0.999448 0.872719 0.999959
704 1 0.866684 0.196243 0.68541
735 1 0.949033 0.24274 0.682474
736 1 0.879707 0.303799 0.691045
830 1 0.946094 0.182402 0.745653
832 1 0.875952 0.183677 0.808483
861 1 0.879988 0.253557 0.757161
862 1 0.949165 0.308498 0.742198
863 1 0.9461 0.242346 0.810534
864 1 0.880082 0.31876 0.810652
1586 1 0.564428 0.692864 0.499711
1813 1 0.623531 0.491548 0.740469
755 1 0.563366 0.37914 0.685055
760 1 0.616006 0.438757 0.68162
882 1 0.563138 0.435566 0.743478
883 1 0.562365 0.366699 0.800843
885 1 0.621957 0.377271 0.743707
888 1 0.6193 0.426629 0.808105
1811 1 0.568652 0.495229 0.80571
1086 1 0.937677 0.681608 0.996795
1815 1 0.68285 0.49526 0.809447
759 1 0.683331 0.364617 0.68469
763 1 0.81129 0.379237 0.689014
764 1 0.753316 0.440682 0.691235
886 1 0.685375 0.430304 0.746431
887 1 0.69184 0.370519 0.814792
889 1 0.760938 0.376456 0.753789
890 1 0.820829 0.439921 0.75744
891 1 0.81017 0.377801 0.816205
892 1 0.738277 0.435793 0.809585
1817 1 0.752243 0.49522 0.757265
1695 1 0.939482 0.496639 0.693954
767 1 0.938618 0.378943 0.696694
768 1 0.873081 0.441786 0.691003
893 1 0.882151 0.375648 0.749435
894 1 0.941793 0.438818 0.747803
895 1 0.948622 0.374486 0.813194
896 1 0.880644 0.43344 0.817144
1821 1 0.878123 0.499601 0.756013
1683 1 0.563507 0.506105 0.686218
1688 1 0.630593 0.562616 0.70312
1715 1 0.564581 0.627326 0.696706
1810 1 0.560163 0.555459 0.745753
1816 1 0.619697 0.561999 0.804132
1843 1 0.55753 0.623958 0.809026
1845 1 0.630256 0.63035 0.751687
1691 1 0.809595 0.498152 0.69352
1819 1 0.807315 0.498697 0.814573
1687 1 0.690385 0.507818 0.684893
1692 1 0.758607 0.570545 0.698997
1719 1 0.694912 0.626776 0.691026
1723 1 0.817159 0.638734 0.683151
1814 1 0.694818 0.564877 0.758726
1818 1 0.821432 0.563036 0.750408
1820 1 0.758617 0.564612 0.811511
1847 1 0.691795 0.637172 0.807649
1849 1 0.75727 0.637065 0.745232
1851 1 0.819548 0.625851 0.802379
1593 1 0.75074 0.622688 0.505906
1823 1 0.942217 0.503052 0.816604
1696 1 0.87828 0.560902 0.695143
1727 1 0.938441 0.62456 0.688595
1822 1 0.941685 0.560066 0.757018
1824 1 0.885004 0.565025 0.815946
1853 1 0.878737 0.629258 0.746458
1855 1 0.940538 0.634618 0.811192
2038 1 0.691176 0.945431 0.876576
1720 1 0.630691 0.692365 0.694102
1747 1 0.559641 0.755724 0.689987
1752 1 0.627743 0.815384 0.680523
1842 1 0.572955 0.691167 0.749456
1848 1 0.634329 0.694291 0.8132
1874 1 0.565302 0.809723 0.75286
1875 1 0.565893 0.751222 0.816604
1877 1 0.632321 0.758056 0.745391
1880 1 0.63515 0.81552 0.817292
1724 1 0.756765 0.689891 0.685044
1751 1 0.694994 0.757292 0.689586
1755 1 0.811903 0.753489 0.68285
1756 1 0.761466 0.819847 0.690531
1846 1 0.696693 0.692217 0.744848
1850 1 0.819111 0.69091 0.750607
1852 1 0.761018 0.688455 0.81299
1878 1 0.692902 0.820008 0.750554
1879 1 0.701328 0.75188 0.804715
1881 1 0.759554 0.753203 0.745519
1882 1 0.813721 0.818022 0.761864
1883 1 0.813422 0.756026 0.813056
1884 1 0.750824 0.816752 0.812824
1700 1 0.995647 0.685185 0.692998
897 1 0.999772 0.00367949 0.876676
1601 1 0.994218 0.750757 0.501117
1728 1 0.875409 0.694653 0.68997
1759 1 0.939675 0.755036 0.684858
1760 1 0.87766 0.813067 0.683927
1854 1 0.942142 0.68791 0.747579
1856 1 0.882464 0.68677 0.807212
1885 1 0.877755 0.753936 0.747729
1886 1 0.940235 0.810966 0.745349
1887 1 0.936441 0.749593 0.808897
1888 1 0.881978 0.810117 0.806326
1109 1 0.631789 0.74921 0.998875
659 1 0.572501 0.00643844 0.68271
1779 1 0.559831 0.874671 0.687442
1784 1 0.620805 0.93576 0.683412
1906 1 0.555551 0.932522 0.752948
1907 1 0.558055 0.868745 0.805806
1909 1 0.622499 0.868106 0.745876
1912 1 0.620677 0.931861 0.809531
2042 1 0.812343 0.938745 0.875706
667 1 0.807994 0.00381242 0.682724
793 1 0.741849 0.00456894 0.740402
663 1 0.677919 0.00695365 0.685281
1783 1 0.688177 0.879891 0.685208
1787 1 0.823165 0.872634 0.688857
1788 1 0.758118 0.939405 0.690962
1910 1 0.674875 0.943914 0.74049
1911 1 0.689143 0.875026 0.810934
1913 1 0.753891 0.876911 0.749386
1914 1 0.814115 0.941384 0.746563
1915 1 0.816419 0.885454 0.813007
1916 1 0.74673 0.940738 0.809293
1809 1 0.501211 0.49279 0.750276
1791 1 0.939572 0.884687 0.687313
1792 1 0.878108 0.936049 0.69223
1917 1 0.882182 0.873694 0.751971
1918 1 0.938314 0.939776 0.757708
1919 1 0.938922 0.879155 0.819297
1920 1 0.878147 0.937284 0.813288
914 1 0.562167 0.0666896 0.871346
920 1 0.618664 0.0712888 0.932464
947 1 0.556803 0.13227 0.942697
949 1 0.634923 0.121109 0.870595
917 1 0.622221 0.00321902 0.875685
1074 1 0.558299 0.682783 0.994567
923 1 0.809796 0.998425 0.94283
918 1 0.686934 0.0562751 0.86703
922 1 0.81121 0.062483 0.871778
924 1 0.751308 0.0564471 0.9341
951 1 0.692691 0.122163 0.925005
953 1 0.746012 0.130592 0.863586
955 1 0.798235 0.129561 0.933273
921 1 0.753485 0.000726633 0.868118
1972 1 0.501286 0.692191 0.934071
1050 1 0.806828 0.555231 0.998861
925 1 0.869938 0.998208 0.875897
2041 1 0.747217 0.881776 0.868877
948 1 0.502392 0.192614 0.94237
926 1 0.934732 0.0648407 0.875538
928 1 0.869324 0.0659902 0.936809
957 1 0.871025 0.121209 0.878058
959 1 0.933723 0.122616 0.938679
628 1 0.509017 0.439107 0.561643
979 1 0.56123 0.251313 0.938979
981 1 0.622842 0.250549 0.872165
978 1 0.56689 0.312202 0.867782
984 1 0.628514 0.308819 0.939704
946 1 0.564994 0.186041 0.87223
952 1 0.621444 0.182822 0.934388
1078 1 0.688193 0.681105 0.997502
1654 1 0.691169 0.937052 0.500238
2035 1 0.575172 0.880727 0.93494
2040 1 0.63309 0.937262 0.93426
987 1 0.814627 0.242988 0.921983
956 1 0.743316 0.186909 0.934964
988 1 0.744956 0.308652 0.934837
986 1 0.804282 0.316837 0.870865
982 1 0.68733 0.313032 0.879455
954 1 0.814051 0.181396 0.865797
983 1 0.676002 0.242639 0.932333
985 1 0.742583 0.251022 0.871815
950 1 0.675331 0.191743 0.871937
1636 1 0.996561 0.934446 0.556533
1988 1 0.996107 0.805665 0.929493
57 1 0.748366 0.128384 0.997663
990 1 0.942123 0.315923 0.871958
960 1 0.875751 0.185787 0.931563
989 1 0.871802 0.256161 0.859737
991 1 0.935184 0.249045 0.94182
992 1 0.880054 0.308983 0.931404
958 1 0.937032 0.195815 0.880616
2044 1 0.745582 0.935829 0.941205
1149 1 0.876052 0.888386 0.999134
1939 1 0.560784 0.493763 0.937832
1013 1 0.623847 0.375099 0.870339
1011 1 0.561581 0.373328 0.934128
1016 1 0.618556 0.43451 0.935036
1010 1 0.554433 0.436447 0.876406
1941 1 0.616089 0.490469 0.873301
2034 1 0.570852 0.93938 0.881022
1082 1 0.814822 0.691645 0.995144
689 1 0.501355 0.134355 0.620294
1761 1 0.995815 0.872918 0.620911
1565 1 0.875679 0.502914 0.503074
1019 1 0.817533 0.38121 0.933781
1017 1 0.751589 0.383177 0.877411
1015 1 0.696334 0.379604 0.936116
1018 1 0.813752 0.441531 0.874011
1014 1 0.681391 0.431371 0.873453
1020 1 0.752575 0.436779 0.939749
1943 1 0.683762 0.494448 0.930308
125 1 0.877707 0.375843 0.99872
1024 1 0.875355 0.438425 0.940037
1023 1 0.945297 0.370534 0.935226
1022 1 0.935465 0.438566 0.876608
1021 1 0.874953 0.375541 0.873364
1949 1 0.87371 0.504554 0.870922
2043 1 0.808663 0.882729 0.932679
2039 1 0.679766 0.878115 0.947569
2033 1 0.506457 0.874273 0.876402
1938 1 0.572084 0.569949 0.868683
1944 1 0.624154 0.562179 0.938274
1971 1 0.56257 0.623883 0.93143
1973 1 0.629614 0.631015 0.87609
2014 1 0.935837 0.816978 0.870826
1114 1 0.809649 0.817959 0.997741
90 1 0.817236 0.312171 1.00048
1947 1 0.813976 0.495971 0.944112
1945 1 0.750642 0.502348 0.874378
1942 1 0.679359 0.553231 0.870623
1946 1 0.816763 0.563298 0.872754
1977 1 0.756285 0.634297 0.874115
1979 1 0.810241 0.62178 0.939942
1948 1 0.739719 0.566405 0.931254
1975 1 0.682498 0.628592 0.935274
1982 1 0.929933 0.688096 0.880593
1951 1 0.938604 0.501517 0.937251
1950 1 0.942223 0.563311 0.870735
1981 1 0.878263 0.624553 0.879847
1983 1 0.941066 0.624223 0.936922
1952 1 0.875369 0.56067 0.931353
2013 1 0.874431 0.751431 0.869894
919 1 0.681996 0.00416497 0.942885
2037 1 0.636966 0.882277 0.872345
2008 1 0.629785 0.814399 0.943419
2003 1 0.570059 0.754984 0.944915
1976 1 0.622549 0.685609 0.937418
1970 1 0.569113 0.686048 0.870432
2002 1 0.577231 0.82242 0.871059
2005 1 0.630106 0.758136 0.873701
2016 1 0.877541 0.812205 0.939379
1665 1 0.997696 0.489668 0.620596
1889 1 0.997608 0.873329 0.763131
1980 1 0.752177 0.683021 0.937607
2010 1 0.812168 0.824387 0.869362
2009 1 0.746434 0.752801 0.87945
1974 1 0.689272 0.694959 0.876956
2006 1 0.691746 0.823416 0.88027
1978 1 0.812494 0.69588 0.877063
2011 1 0.81283 0.762778 0.929035
2012 1 0.75145 0.821691 0.93504
2007 1 0.690822 0.756628 0.939885
915 1 0.559275 0.0145139 0.937295
2001 1 0.503529 0.748724 0.869792
634 1 0.813436 0.441314 0.503532
58 1 0.803604 0.185957 0.998859
2015 1 0.932364 0.751396 0.937982
1984 1 0.875103 0.692821 0.934914
1969 1 0.503095 0.633475 0.878087
93 1 0.870028 0.250818 0.998487
644 1 0.989593 0.0679489 0.689742
1681 1 0.498189 0.500717 0.624701
1046 1 0.681651 0.557939 0.997549
1729 1 0.995612 0.754336 0.620498
1113 1 0.750883 0.748866 0.993734
1732 1 0.999364 0.814426 0.683077
1081 1 0.744571 0.616237 0.995682
932 1 0.99714 0.186906 0.932287
657 1 0.509888 0.997074 0.631166
785 1 0.500594 0.0223017 0.752366
660 1 0.502346 0.0771608 0.683696
769 1 0.997971 0.00187752 0.753016
54 1 0.678292 0.178654 0.995518
1873 1 0.505505 0.754171 0.743856
1956 1 0.996502 0.689345 0.940894
1105 1 0.499344 0.753744 0.997299
22 1 0.686978 0.0739522 0.995187
1620 1 0.505144 0.819035 0.553823
1045 1 0.627784 0.49134 0.993472
1748 1 0.501876 0.813128 0.685371
126 1 0.937662 0.444283 0.99765
1604 1 0.99702 0.816837 0.560594
673 1 0.999641 0.122918 0.623432
1697 1 0.996739 0.631902 0.623836
1556 1 0.504309 0.561917 0.565136
1668 1 0.996923 0.5616 0.695217
1077 1 0.623689 0.62425 0.995406
637 1 0.877381 0.370271 0.50822
1053 1 0.875761 0.511196 0.995611
117 1 0.622404 0.372799 0.999102
85 1 0.619939 0.245189 0.997428
1054 1 0.933612 0.571044 0.996108
86 1 0.689086 0.320973 0.995047
1146 1 0.804815 0.933162 0.994001
537 1 0.75554 0.00886688 0.501094
1150 1 0.941777 0.941938 0.99672
1118 1 0.93893 0.81264 0.999883
1042 1 0.568849 0.550491 0.997138
1049 1 0.740298 0.502532 0.999055
541 1 0.877194 0.99981 0.507761
1138 1 0.566688 0.943352 0.990632
605 1 0.88383 0.248854 0.501792
1089 1 0.996475 0.754257 0.994671
633 1 0.762716 0.378367 0.502073
565 1 0.625778 0.127064 0.504723
606 1 0.942516 0.309551 0.502453
33 1 0.991285 0.136045 0.996338
538 1 0.81848 0.0606303 0.506133
1554 1 0.562857 0.563002 0.501278
602 1 0.816524 0.312591 0.506647
|
[
"ITEM: TIMESTEP\n1500\nITEM: NUMBER OF ATOMS\n2048\nITEM: BOX BOUNDS pp pp pp\n3.7179638144627702e-01 4.6828203618547612e+01\n3.7179638144627702e-01 4.6828203618547612e+01\n3.7179638144627702e-01 4.6828203618547612e+01\nITEM: ATOMS id type xs ys zs\n8 1 0.122506 0.0616676 0.0702085\n35 1 0.0673125 0.126372 0.0590109\n130 1 0.066038 0.0664378 0.128403\n165 1 0.131273 0.119179 0.132273\n3 1 0.0759593 0.994963 0.0580912\n133 1 0.130252 0.997937 0.135408\n20 1 0.490006 0.0658088 0.0693352\n449 1 0.00177477 0.247615 0.373689\n12 1 0.25666 0.0558656 0.0598576\n39 1 0.200559 0.126113 0.0712715\n43 1 0.306891 0.126318 0.0602848\n134 1 0.191536 0.0594373 0.123808\n138 1 0.307313 0.0650693 0.122444\n169 1 0.248513 0.123428 0.139504\n1649 1 0.49553 0.876242 0.5014\n1033 1 0.244958 0.496979 0.00455496\n1265 1 0.49861 0.883951 0.126467\n16 1 0.37903 0.0651308 0.0608996\n47 1 0.440855 0.128591 0.0705798\n142 1 0.431912 0.0666677 0.139264\n173 1 0.364353 0.122605 0.121545\n15 1 0.434522 0.00514585 0.0698344\n141 1 0.369629 0.00770925 0.12703\n42 1 0.308197 0.190027 0.00257871\n397 1 0.374937 0.991822 0.37703\n1220 1 0.00339772 0.805224 0.181201\n40 1 0.131754 0.185106 0.0691189\n67 1 0.0770215 0.244332 0.0608682\n72 1 0.130189 0.317184 0.0640843\n162 1 0.0685323 0.193806 0.123728\n194 1 0.0541531 0.307424 0.116713\n197 1 0.127741 0.249351 0.130472\n13 1 0.385449 0.00480166 0.00084798\n1236 1 0.494815 0.807684 0.185551\n399 1 0.442251 0.99691 0.438793\n1457 1 0.489891 0.619199 0.372224\n1577 1 0.254908 0.620826 0.496723\n404 1 0.50114 0.0578598 0.43443\n44 1 0.253758 0.195456 0.0686689\n71 1 0.187097 0.253985 0.0626851\n75 1 0.307258 0.252861 0.0633578\n76 1 0.240451 0.314502 0.0653119\n166 1 0.189914 0.183752 0.132939\n170 1 0.309328 0.189348 0.129023\n198 1 0.182471 0.319167 0.123029\n201 1 0.241093 0.257213 0.135788\n202 1 0.302909 0.307447 0.126464\n500 1 0.49646 0.441037 0.433168\n1546 1 0.311966 0.562394 0.49584\n48 1 0.367614 0.191715 0.0654884\n79 1 0.429687 0.247514 0.0699353\n80 1 0.37849 0.31252 0.0658664\n174 1 0.434668 0.184594 0.128757\n205 1 0.369058 0.246982 0.128743\n206 1 0.440055 0.309684 0.123238\n1518 1 0.437677 0.936612 0.369773\n1520 1 0.382322 0.935025 0.437416\n244 1 0.493123 0.43251 0.181315\n9 1 0.240378 0.987569 0.0125905\n99 1 0.0571992 0.374845 0.0571279\n104 1 0.115896 0.440157 0.0566418\n226 1 0.0529092 0.437446 0.127062\n229 1 0.113658 0.382261 0.120713\n1517 1 0.380158 0.876227 0.375905\n66 1 0.0694349 0.306826 8.01478e-05\n1519 1 0.440738 0.876592 0.435602\n289 1 0.0067298 0.127927 0.248886\n103 1 0.183569 0.37911 0.0632626\n107 1 0.304556 0.368928 0.0599431\n108 1 0.24758 0.43403 0.0680997\n230 1 0.184907 0.440628 0.122798\n233 1 0.247622 0.374682 0.126779\n234 1 0.305484 0.439598 0.131699\n1031 1 0.180425 0.500208 0.0595709\n1097 1 0.245977 0.747062 0.00656892\n14 1 0.442944 0.0699623 0.000723564\n1172 1 0.494855 0.548594 0.190751\n111 1 0.433877 0.376366 0.0632987\n112 1 0.372655 0.430348 0.0608133\n237 1 0.372039 0.377706 0.124139\n238 1 0.437158 0.437419 0.12482\n388 1 0.0033361 0.0587117 0.439143\n38 1 0.188293 0.185957 0.00163708\n417 1 0.00198284 0.125831 0.385084\n1027 1 0.0547332 0.511491 0.0684469\n1521 1 0.494549 0.872195 0.367588\n1032 1 0.125793 0.55813 0.0616619\n1059 1 0.069085 0.613502 0.0603673\n1154 1 0.060225 0.564065 0.13549\n1189 1 0.12007 0.623865 0.137432\n1157 1 0.119343 0.49181 0.123964\n1036 1 0.251455 0.55932 0.0657171\n1063 1 0.173034 0.630658 0.0766565\n1067 1 0.308758 0.624548 0.0658114\n1158 1 0.180413 0.554769 0.125694\n1162 1 0.31368 0.561652 0.122002\n1193 1 0.253549 0.634747 0.126577\n1035 1 0.311738 0.494827 0.0617163\n1161 1 0.245696 0.505242 0.129213\n1473 1 0.00857143 0.743087 0.381099\n1040 1 0.370854 0.555958 0.0601095\n1071 1 0.442268 0.62859 0.0608984\n1166 1 0.434265 0.562089 0.128014\n1197 1 0.377975 0.632047 0.122107\n1165 1 0.370212 0.490963 0.121175\n1281 1 0.00152018 0.501231 0.24923\n1039 1 0.431952 0.496487 0.06299\n6 1 0.193618 0.0605467 0.000470822\n1064 1 0.1284 0.698947 0.0597927\n1091 1 0.0568844 0.756779 0.0585327\n1096 1 0.115828 0.806048 0.0592113\n1186 1 0.0627154 0.682191 0.124165\n1218 1 0.0593727 0.812914 0.12507\n1221 1 0.11024 0.748004 0.124696\n1122 1 0.0602491 0.934098 -0.0022776\n324 1 0.000978361 0.31218 0.313079\n1542 1 0.18614 0.557972 0.499156\n1068 1 0.240089 0.682753 0.0589561\n1095 1 0.183876 0.758784 0.0653921\n1099 1 0.312829 0.748815 0.0615798\n1100 1 0.253558 0.81033 0.0578296\n1190 1 0.184582 0.695813 0.12044\n1194 1 0.31561 0.692831 0.133151\n1222 1 0.188256 0.803535 0.131422\n1225 1 0.247864 0.752113 0.129388\n1226 1 0.311365 0.810136 0.126462\n1072 1 0.37721 0.697796 0.0625984\n1103 1 0.44676 0.747004 0.061531\n1104 1 0.383133 0.813369 0.0624226\n1198 1 0.440946 0.689152 0.125403\n1229 1 0.368963 0.757683 0.125765\n1230 1 0.442762 0.804805 0.118703\n1377 1 -0.00177314 0.873845 0.248954\n164 1 0.00788548 0.18704 0.192822\n1123 1 0.0596043 0.8773 0.0593343\n1128 1 0.127809 0.936211 0.0582613\n1250 1 0.0627318 0.939063 0.127873\n1253 1 0.124922 0.868711 0.122509\n1129 1 0.248776 0.868679 0.00314125\n1130 1 0.302477 0.929354 0.00408463\n1634 1 0.0654849 0.931689 0.50017\n1070 1 0.452253 0.686062 0.00224698\n137 1 0.249367 0.995542 0.128421\n11 1 0.307689 0.994469 0.0623884\n1127 1 0.185281 0.856566 0.0716325\n1131 1 0.310694 0.874352 0.0674903\n1132 1 0.241675 0.930183 0.0687533\n1254 1 0.18657 0.939355 0.140843\n1257 1 0.245223 0.886175 0.13609\n1258 1 0.316532 0.937468 0.126062\n7 1 0.177269 0.9992 0.0672\n586 1 0.310712 0.318469 0.500931\n1553 1 0.495832 0.495574 0.496547\n1135 1 0.441545 0.885524 0.0666351\n1136 1 0.373041 0.936344 0.0604851\n1261 1 0.377842 0.872496 0.125044\n1262 1 0.434078 0.946021 0.124703\n1185 1 0.0017602 0.627138 0.112849\n136 1 0.121751 0.0562408 0.191549\n163 1 0.0614944 0.121292 0.190071\n258 1 0.064937 0.0581001 0.254968\n264 1 0.122228 0.0603273 0.318807\n291 1 0.0532977 0.121715 0.316977\n293 1 0.117611 0.117219 0.256496\n261 1 0.128534 0.998489 0.252125\n139 1 0.317556 0.995792 0.188194\n140 1 0.254961 0.0523218 0.185528\n167 1 0.186808 0.125108 0.19678\n171 1 0.311977 0.131227 0.197636\n262 1 0.186168 0.0697726 0.251697\n266 1 0.318512 0.063158 0.257481\n268 1 0.251521 0.0584419 0.319894\n295 1 0.186633 0.124789 0.315503\n297 1 0.253269 0.11602 0.252853\n299 1 0.311035 0.125172 0.319872\n267 1 0.315362 0.00453774 0.322065\n265 1 0.256798 0.00561573 0.259357\n144 1 0.369775 0.0678744 0.191577\n175 1 0.439925 0.131723 0.189697\n270 1 0.433988 0.0714637 0.246394\n272 1 0.369031 0.0613734 0.318422\n301 1 0.374826 0.126225 0.264325\n303 1 0.439594 0.135301 0.321385\n276 1 0.492552 0.0661693 0.305057\n46 1 0.436308 0.187126 0.012633\n273 1 0.495935 1.00043 0.25072\n168 1 0.121486 0.185393 0.18972\n195 1 0.0595153 0.254885 0.184446\n200 1 0.121982 0.311428 0.188403\n290 1 0.0669253 0.19048 0.255712\n296 1 0.122196 0.181085 0.313729\n322 1 0.0624558 0.311905 0.24819\n323 1 0.0583294 0.246891 0.311951\n325 1 0.124726 0.253254 0.248837\n328 1 0.117957 0.308657 0.317762\n172 1 0.256294 0.19269 0.18918\n199 1 0.18244 0.248419 0.191483\n203 1 0.316421 0.250163 0.189595\n204 1 0.245142 0.316827 0.190887\n294 1 0.188161 0.185123 0.252789\n298 1 0.31407 0.190325 0.253404\n300 1 0.245124 0.187418 0.311529\n326 1 0.181853 0.316846 0.251668\n327 1 0.186849 0.250834 0.306185\n329 1 0.242228 0.247093 0.245743\n330 1 0.302705 0.310879 0.247868\n331 1 0.299613 0.248171 0.312951\n332 1 0.246723 0.314994 0.304887\n176 1 0.37331 0.190886 0.191773\n207 1 0.440945 0.253744 0.184251\n208 1 0.375167 0.314646 0.183487\n302 1 0.43725 0.191855 0.252516\n304 1 0.376565 0.194188 0.318439\n333 1 0.376385 0.256958 0.247537\n334 1 0.438966 0.315377 0.247511\n335 1 0.439068 0.253467 0.315848\n336 1 0.376128 0.309705 0.311291\n1510 1 0.183177 0.942615 0.372092\n227 1 0.0550952 0.364663 0.189725\n232 1 0.115429 0.434788 0.187888\n354 1 0.0623164 0.428504 0.253391\n355 1 0.0578367 0.375884 0.319124\n357 1 0.125629 0.375835 0.252521\n360 1 0.120437 0.430766 0.315902\n1285 1 0.12908 0.497692 0.250474\n1283 1 0.0581211 0.494219 0.30848\n1515 1 0.312435 0.873624 0.437999\n1287 1 0.183195 0.490334 0.305911\n231 1 0.185019 0.38124 0.189628\n235 1 0.309138 0.377159 0.192638\n236 1 0.245192 0.440995 0.188607\n358 1 0.181359 0.432866 0.25278\n359 1 0.189525 0.376057 0.315277\n361 1 0.245932 0.380996 0.251914\n362 1 0.307711 0.440807 0.246856\n363 1 0.311112 0.367624 0.302259\n364 1 0.256423 0.427085 0.316448\n1289 1 0.242323 0.495227 0.24887\n239 1 0.440744 0.369249 0.187298\n240 1 0.373278 0.435997 0.186289\n365 1 0.378523 0.369329 0.249592\n366 1 0.436213 0.435501 0.244044\n367 1 0.440315 0.376135 0.312224\n368 1 0.367597 0.435858 0.309025\n1065 1 0.243312 0.624084 -1.90417e-05\n610 1 0.0617822 0.433071 0.492755\n1160 1 0.12368 0.5557 0.189803\n1187 1 0.0552874 0.625174 0.190524\n1282 1 0.0586902 0.557468 0.248031\n1288 1 0.119151 0.560481 0.304054\n1315 1 0.0667845 0.63566 0.306116\n1317 1 0.125878 0.622536 0.244913\n1155 1 0.0570289 0.494142 0.19144\n1163 1 0.302803 0.504381 0.194521\n1291 1 0.310345 0.494839 0.312951\n1159 1 0.183957 0.494138 0.183996\n1164 1 0.246811 0.564139 0.183834\n1191 1 0.18554 0.620404 0.188666\n1195 1 0.313597 0.623611 0.189085\n1286 1 0.181137 0.560319 0.247685\n1290 1 0.312749 0.562916 0.257968\n1292 1 0.241424 0.557052 0.306946\n1319 1 0.184272 0.621255 0.311063\n1321 1 0.249033 0.621724 0.242654\n1323 1 0.306114 0.626136 0.318211\n1428 1 0.501614 0.566466 0.436811\n1167 1 0.432644 0.495007 0.186922\n1168 1 0.37019 0.562688 0.181007\n1199 1 0.433306 0.627853 0.180565\n1294 1 0.430318 0.562414 0.240584\n1296 1 0.373964 0.555733 0.313619\n1325 1 0.365309 0.619481 0.257195\n1327 1 0.437269 0.625707 0.304254\n1293 1 0.370872 0.500165 0.253963\n1295 1 0.435971 0.500973 0.30753\n132 1 -0.00198531 0.0686882 0.1846\n1192 1 0.127921 0.690891 0.186996\n1219 1 0.0601923 0.745095 0.18482\n1224 1 0.122366 0.809431 0.184445\n1314 1 0.0548868 0.688485 0.248706\n1320 1 0.125793 0.685527 0.3066\n1346 1 0.0566146 0.806383 0.241237\n1347 1 0.0653959 0.746161 0.309319\n1349 1 0.120765 0.755915 0.250628\n1352 1 0.127772 0.81448 0.316563\n1511 1 0.186742 0.872299 0.43541\n1513 1 0.248981 0.871872 0.370855\n1196 1 0.246753 0.694394 0.186637\n1223 1 0.187916 0.752072 0.198206\n1227 1 0.318982 0.754774 0.193965\n1228 1 0.254863 0.810062 0.192887\n1318 1 0.184797 0.683156 0.254603\n1322 1 0.312537 0.688262 0.254464\n1324 1 0.247201 0.691125 0.313542\n1350 1 0.187049 0.817001 0.252557\n1351 1 0.192054 0.751492 0.314033\n1353 1 0.251007 0.752498 0.251508\n1354 1 0.323816 0.818495 0.240274\n1355 1 0.311308 0.754803 0.316231\n1356 1 0.250777 0.815188 0.31043\n1200 1 0.372915 0.679655 0.194125\n1231 1 0.440702 0.745631 0.184007\n1232 1 0.389365 0.811413 0.186673\n1326 1 0.445726 0.687151 0.244815\n1328 1 0.386337 0.70092 0.313403\n1357 1 0.376438 0.759177 0.256521\n1358 1 0.444029 0.810945 0.252344\n1359 1 0.443633 0.754368 0.31098\n1360 1 0.375904 0.820406 0.311756\n1617 1 0.498411 0.7518 0.496678\n1514 1 0.311424 0.929325 0.369378\n1252 1 0.000447115 0.942148 0.187364\n259 1 0.0608312 0.995676 0.320297\n1251 1 0.0585569 0.872086 0.187545\n1256 1 0.11454 0.937388 0.18959\n1378 1 0.0637809 0.937709 0.255673\n1379 1 0.0606781 0.870872 0.31552\n1381 1 0.113389 0.867572 0.253261\n1384 1 0.124924 0.939011 0.304377\n131 1 0.052896 0.00642105 0.190592\n1284 1 0.00490827 0.556127 0.317283\n263 1 0.187771 0.992939 0.306527\n135 1 0.188786 0.00264093 0.200688\n1255 1 0.181276 0.868011 0.190743\n1259 1 0.314173 0.871732 0.182527\n1260 1 0.250476 0.950839 0.199195\n1382 1 0.182528 0.925053 0.240822\n1383 1 0.175793 0.8761 0.314739\n1385 1 0.2471 0.872821 0.244705\n1386 1 0.314424 0.931986 0.244239\n1387 1 0.315784 0.869994 0.301753\n1388 1 0.252261 0.933621 0.300904\n143 1 0.423171 0.00373925 0.198018\n271 1 0.430727 0.00312778 0.303801\n1263 1 0.443103 0.874301 0.183329\n1264 1 0.380781 0.93062 0.181519\n1389 1 0.382923 0.874088 0.243496\n1390 1 0.435403 0.940486 0.248066\n1391 1 0.438719 0.878378 0.306702\n1392 1 0.376086 0.935363 0.311429\n269 1 0.36745 0.994712 0.254967\n386 1 0.0576023 0.0609346 0.374566\n392 1 0.122666 0.0653702 0.443146\n419 1 0.0594213 0.119725 0.443638\n421 1 0.117568 0.125878 0.372213\n1524 1 0.494468 0.93418 0.436814\n558 1 0.434113 0.192327 0.492927\n389 1 0.122876 0.996023 0.375035\n387 1 0.06108 0.997252 0.435671\n101 1 0.123726 0.374535 -0.000754328\n1268 1 0.49566 0.941389 0.185295\n393 1 0.245299 0.00200825 0.378844\n390 1 0.187557 0.0646447 0.379495\n394 1 0.312864 0.0631309 0.383544\n396 1 0.257571 0.0640053 0.442291\n423 1 0.173736 0.135372 0.443078\n425 1 0.248714 0.127633 0.37994\n427 1 0.317796 0.130245 0.443566\n391 1 0.187203 0.00545213 0.436906\n1642 1 0.31227 0.937761 0.492864\n613 1 0.126716 0.375002 0.498158\n1516 1 0.250089 0.932145 0.435518\n398 1 0.437746 0.0564424 0.373139\n400 1 0.382801 0.0629863 0.435846\n429 1 0.381006 0.124945 0.376057\n431 1 0.441308 0.120418 0.438564\n1332 1 0.492249 0.687943 0.308758\n1541 1 0.122581 0.494023 0.498834\n418 1 0.0597534 0.182245 0.375428\n424 1 0.112864 0.192746 0.438942\n450 1 0.0599641 0.311922 0.375529\n451 1 0.0689029 0.255789 0.437007\n453 1 0.125829 0.244035 0.367447\n456 1 0.122452 0.31811 0.42746\n4 1 0.000488162 0.0574654 0.0642114\n161 1 0.00273703 0.130191 0.120629\n1606 1 0.191511 0.815706 0.495888\n422 1 0.180404 0.184515 0.377158\n426 1 0.304086 0.188131 0.372248\n428 1 0.24317 0.193359 0.436099\n454 1 0.181966 0.309867 0.370806\n455 1 0.176691 0.248114 0.432926\n457 1 0.244633 0.246908 0.37067\n458 1 0.307583 0.310143 0.365408\n459 1 0.312182 0.248512 0.438926\n460 1 0.248969 0.310991 0.435353\n1508 1 0.00374302 0.934232 0.436172\n1169 1 0.495712 0.499928 0.126913\n49 1 0.497555 0.132778 0.00624142\n395 1 0.322222 0.000358097 0.438281\n1361 1 0.499014 0.748944 0.249997\n1044 1 0.492277 0.556064 0.059834\n430 1 0.444773 0.192342 0.378222\n432 1 0.377178 0.18742 0.438446\n461 1 0.377941 0.252309 0.378405\n462 1 0.446756 0.315737 0.374707\n463 1 0.439072 0.26039 0.44305\n464 1 0.371708 0.319879 0.431667\n1570 1 0.0567564 0.679202 0.502204\n1396 1 0.499125 0.934671 0.311865\n1573 1 0.121989 0.621842 0.493958\n1509 1 0.125654 0.877959 0.37613\n483 1 0.0664608 0.380924 0.434603\n488 1 0.124701 0.434777 0.437018\n485 1 0.127545 0.375577 0.374697\n482 1 0.0493675 0.437516 0.376229\n1413 1 0.121734 0.492461 0.375677\n1411 1 0.0649851 0.494912 0.43247\n1201 1 0.497372 0.616287 0.122607\n625 1 0.496586 0.375369 0.499124\n1417 1 0.249862 0.49747 0.367471\n1415 1 0.183514 0.485588 0.437906\n492 1 0.252687 0.441974 0.430185\n491 1 0.30843 0.376621 0.4421\n486 1 0.182366 0.437767 0.366812\n489 1 0.242541 0.370184 0.379759\n490 1 0.313311 0.436846 0.378488\n487 1 0.189332 0.370783 0.442451\n2 1 0.0542735 0.0658839 -0.000155184\n129 1 0.00600734 0.00123546 0.124306\n1507 1 0.0641452 0.867003 0.435044\n495 1 0.441513 0.379173 0.427413\n496 1 0.36742 0.437171 0.443355\n493 1 0.374641 0.382878 0.373214\n494 1 0.433673 0.436594 0.367697\n1609 1 0.248461 0.759446 0.500556\n1124 1 0.000331305 0.940167 0.0672741\n1456 1 0.38315 0.685702 0.435892\n340 1 0.498874 0.314523 0.31013\n1410 1 0.0658854 0.559294 0.370955\n1416 1 0.119619 0.564391 0.428709\n1443 1 0.0620913 0.631176 0.43298\n1445 1 0.122609 0.623221 0.37387\n84 1 0.492058 0.315184 0.0588292\n1069 1 0.366918 0.628907 0.00704809\n1419 1 0.307005 0.503527 0.431937\n1449 1 0.253734 0.627141 0.378376\n1451 1 0.315692 0.628689 0.437211\n1420 1 0.245872 0.557917 0.430298\n1447 1 0.183707 0.621689 0.441281\n1414 1 0.180321 0.558631 0.364678\n1418 1 0.30635 0.558493 0.371723\n1613 1 0.379831 0.745409 0.497706\n81 1 0.497141 0.248909 0.00243865\n77 1 0.376602 0.25531 0.00476921\n1421 1 0.375879 0.49704 0.3766\n1423 1 0.429451 0.49903 0.445518\n1422 1 0.432282 0.561135 0.379\n1455 1 0.437138 0.619746 0.440019\n1424 1 0.369706 0.564669 0.432747\n1453 1 0.37678 0.628075 0.376028\n1512 1 0.132889 0.931517 0.438687\n148 1 0.50219 0.0694575 0.192913\n1545 1 0.251679 0.500712 0.496265\n1487 1 0.435223 0.750202 0.433603\n1448 1 0.132944 0.685493 0.439709\n1474 1 0.0628031 0.809679 0.37407\n1480 1 0.125996 0.799455 0.435884\n1475 1 0.0636878 0.738299 0.441805\n1442 1 0.0663725 0.688521 0.368719\n1477 1 0.124662 0.74371 0.375654\n525 1 0.380351 0.991393 0.497936\n241 1 0.496368 0.371082 0.124036\n1454 1 0.437933 0.688471 0.374555\n1485 1 0.373054 0.749508 0.378959\n1446 1 0.185815 0.681205 0.374201\n1450 1 0.312188 0.683472 0.373473\n1479 1 0.191169 0.746955 0.431775\n1452 1 0.247804 0.689843 0.438324\n1483 1 0.306318 0.746349 0.440484\n1488 1 0.378313 0.823892 0.436313\n1481 1 0.251464 0.754776 0.379042\n1478 1 0.183118 0.813734 0.377355\n1484 1 0.251319 0.821039 0.431932\n1482 1 0.313435 0.817674 0.373421\n1486 1 0.438121 0.81326 0.378183\n1506 1 0.0624125 0.927338 0.379048\n1441 1 0.00389512 0.623802 0.373425\n1412 1 0.0131757 0.564446 0.430045\n145 1 0.492987 0.0112282 0.129952\n321 1 0.000565523 0.252079 0.244692\n1 1 0.00865976 0.995812 0.00468351\n1425 1 0.491648 0.501671 0.37406\n1153 1 0.000905819 0.501442 0.130671\n1569 1 -0.00128455 0.621441 0.500485\n1140 1 0.496599 0.948499 0.0689179\n1614 1 0.437755 0.81584 0.498494\n582 1 0.184416 0.314197 0.499812\n585 1 0.24543 0.250108 0.497465\n45 1 0.374859 0.132328 0.00582776\n17 1 0.497128 0.00132085 0.00505437\n517 1 0.129553 0.99141 0.497427\n1610 1 0.311808 0.8109 0.495786\n1605 1 0.120384 0.746076 0.497389\n1637 1 0.127739 0.870203 0.493341\n109 1 0.371719 0.367725 0.00662379\n1061 1 0.132574 0.626222 0.00242393\n1030 1 0.186734 0.556661 0.0019528\n1633 1 0.000801536 0.875898 0.498031\n522 1 0.324768 0.0589172 0.49427\n1041 1 0.497048 0.495732 0.00496832\n1026 1 0.0666779 0.55676 0.00223266\n1102 1 0.443861 0.820071 0.00473293\n557 1 0.378041 0.128788 0.496564\n105 1 0.246053 0.366605 0.00314357\n1125 1 0.119383 0.872352 0.00249215\n102 1 0.185112 0.440805 0.00281566\n589 1 0.374081 0.257641 0.495868\n41 1 0.24821 0.122643 0.00315032\n37 1 0.126494 0.116291 0.00230434\n1126 1 0.193605 0.921573 0.00167859\n1581 1 0.373014 0.622238 0.494282\n1058 1 0.0623897 0.691033 0.0052758\n578 1 0.0619631 0.311598 0.497751\n110 1 0.436149 0.435592 0.0063796\n520 1 0.124923 0.0556291 0.554462\n547 1 0.0604994 0.12359 0.557763\n642 1 0.0637322 0.0574714 0.628437\n677 1 0.121755 0.123025 0.617419\n515 1 0.0623535 0.995192 0.558417\n1025 1 0.00469795 0.498495 0.994574\n612 1 0.010753 0.43394 0.555281\n524 1 0.247746 0.0611719 0.567891\n551 1 0.184164 0.129713 0.558856\n555 1 0.317417 0.12056 0.571044\n646 1 0.178669 0.0585815 0.626539\n650 1 0.312033 0.0564887 0.623534\n681 1 0.239784 0.114625 0.626827\n788 1 0.499506 0.0718837 0.817912\n836 1 0.00450208 0.311116 0.816721\n528 1 0.371387 0.0587991 0.566119\n559 1 0.428294 0.132567 0.570178\n654 1 0.439431 0.0626788 0.622386\n685 1 0.37192 0.127083 0.632601\n1796 1 0.00243525 0.563668 0.813513\n97 1 0.0045896 0.371383 0.992587\n1905 1 0.501123 0.868693 0.74242\n708 1 0.00838476 0.315552 0.681261\n552 1 0.125756 0.184503 0.560086\n579 1 0.0662357 0.244243 0.564509\n584 1 0.121382 0.312841 0.557252\n674 1 0.0620141 0.177977 0.614357\n706 1 0.0681323 0.310433 0.619145\n709 1 0.140406 0.248225 0.624808\n556 1 0.261466 0.189955 0.562979\n583 1 0.187126 0.261111 0.563531\n587 1 0.312555 0.255041 0.55916\n588 1 0.249996 0.314025 0.556857\n678 1 0.196887 0.191328 0.620684\n682 1 0.314388 0.189736 0.627777\n710 1 0.194705 0.312117 0.631366\n713 1 0.25683 0.254175 0.620839\n714 1 0.305548 0.314063 0.625123\n724 1 0.495974 0.322321 0.674225\n1572 1 0.000895679 0.684597 0.561054\n560 1 0.372868 0.188152 0.564441\n591 1 0.430643 0.255017 0.559177\n592 1 0.368447 0.316962 0.568197\n686 1 0.429759 0.188756 0.627803\n717 1 0.372551 0.25233 0.623663\n718 1 0.437295 0.317373 0.617352\n1009 1 0.496294 0.374879 0.881047\n550 1 0.188155 0.193892 0.502455\n1538 1 0.0607017 0.563915 0.500091\n868 1 0.00565354 0.436079 0.81449\n611 1 0.063534 0.374628 0.56201\n616 1 0.12004 0.436953 0.558338\n738 1 0.0600086 0.43033 0.633245\n741 1 0.118688 0.374596 0.624547\n1669 1 0.113159 0.494634 0.630159\n1539 1 0.0634962 0.499918 0.561149\n1921 1 0.0046337 0.498147 0.878281\n676 1 0.00159967 0.183543 0.681211\n1578 1 0.305696 0.690452 0.503122\n546 1 0.0597136 0.18 0.500204\n996 1 0.00369753 0.436778 0.932029\n615 1 0.187123 0.369846 0.566851\n619 1 0.312393 0.379194 0.561566\n620 1 0.24622 0.433817 0.560487\n742 1 0.178837 0.434541 0.62467\n745 1 0.250381 0.384363 0.623945\n746 1 0.316182 0.437941 0.622084\n1543 1 0.185476 0.49757 0.565457\n1673 1 0.244296 0.505328 0.623166\n1547 1 0.302959 0.496765 0.560659\n1645 1 0.377799 0.880364 0.505803\n623 1 0.44327 0.386276 0.568742\n624 1 0.371226 0.435795 0.554118\n749 1 0.372895 0.369959 0.628842\n750 1 0.435864 0.44786 0.624457\n1544 1 0.121498 0.559187 0.564655\n1571 1 0.0555704 0.627255 0.567327\n1666 1 0.0569678 0.560015 0.628323\n1701 1 0.124771 0.623469 0.62751\n772 1 0.00463161 0.0607173 0.809457\n721 1 0.49771 0.243133 0.620551\n900 1 0.00721966 0.0626131 0.930153\n1548 1 0.240717 0.560404 0.559699\n1575 1 0.178794 0.6195 0.561515\n1579 1 0.308565 0.622254 0.563351\n1670 1 0.186072 0.566805 0.620655\n1674 1 0.306547 0.56086 0.619277\n1705 1 0.249667 0.624446 0.625393\n553 1 0.259707 0.122933 0.512839\n549 1 0.120082 0.123357 0.503028\n737 1 0.000978936 0.380909 0.618379\n514 1 0.0656852 0.0520956 0.502349\n1677 1 0.372813 0.494982 0.620175\n1552 1 0.368958 0.560934 0.56449\n1583 1 0.43675 0.618666 0.565969\n1678 1 0.437727 0.555952 0.6276\n1709 1 0.372471 0.620446 0.629888\n1551 1 0.435536 0.498853 0.564738\n1908 1 0.495978 0.935337 0.819276\n1764 1 6.6931e-05 0.939612 0.686437\n1576 1 0.122974 0.685621 0.560806\n1603 1 0.0579212 0.742935 0.567844\n1608 1 0.12507 0.812454 0.56416\n1698 1 0.0612873 0.690436 0.632028\n1730 1 0.0618984 0.812341 0.625845\n1733 1 0.125521 0.750705 0.625781\n1574 1 0.189512 0.692786 0.50063\n1825 1 0.00811385 0.629902 0.753802\n961 1 0.00318251 0.246248 0.87482\n1580 1 0.250094 0.684343 0.56448\n1607 1 0.1916 0.757647 0.55882\n1611 1 0.310407 0.750806 0.560008\n1612 1 0.247829 0.813418 0.563137\n1702 1 0.184033 0.6848 0.624215\n1706 1 0.319238 0.688645 0.628038\n1734 1 0.192087 0.809237 0.630585\n1737 1 0.246712 0.753833 0.621226\n1738 1 0.311211 0.80392 0.631476\n617 1 0.245294 0.375661 0.504116\n1584 1 0.371437 0.680195 0.553421\n1615 1 0.439301 0.761097 0.557889\n1616 1 0.370153 0.817635 0.564086\n1710 1 0.431948 0.689192 0.616306\n1741 1 0.376905 0.75202 0.624635\n1742 1 0.438886 0.81199 0.620665\n1857 1 0.00282567 0.753079 0.7539\n645 1 0.119408 0.997317 0.623684\n641 1 0.00166836 0.999928 0.624647\n1635 1 0.0550548 0.875732 0.562756\n1640 1 0.128935 0.941279 0.568341\n1762 1 0.0618252 0.933879 0.615468\n1765 1 0.124163 0.873649 0.621027\n801 1 0.00354311 0.123787 0.750813\n1646 1 0.437214 0.934803 0.506211\n649 1 0.242489 0.999093 0.627428\n1639 1 0.192768 0.869984 0.566475\n1643 1 0.311807 0.873912 0.559471\n1644 1 0.256832 0.935848 0.563105\n1766 1 0.1825 0.935753 0.632933\n1769 1 0.254507 0.870692 0.628596\n1770 1 0.303111 0.937436 0.631694\n523 1 0.313027 -0.0019627 0.561195\n519 1 0.191205 0.000725099 0.567202\n980 1 0.493824 0.303172 0.930255\n1537 1 0.00950806 0.495517 0.50263\n653 1 0.378709 0.00594827 0.625278\n1647 1 0.434831 0.878177 0.563832\n1648 1 0.366497 0.931729 0.571687\n1773 1 0.37547 0.875979 0.628512\n1774 1 0.44011 0.942897 0.626221\n527 1 0.440169 0.999582 0.560544\n865 1 0.0047214 0.377467 0.758197\n1134 1 0.447396 0.931496 0.996293\n1550 1 0.433345 0.561431 0.5024\n34 1 0.0675455 0.190976 1.00125\n648 1 0.116547 0.0598784 0.683535\n675 1 0.0568727 0.11985 0.690038\n770 1 0.0645233 0.0588351 0.752494\n776 1 0.125737 0.0608684 0.811774\n803 1 0.071654 0.117898 0.813605\n805 1 0.129137 0.129491 0.743254\n771 1 0.0700643 0.0027744 0.812916\n773 1 0.12465 1.00031 0.751681\n779 1 0.313743 1.00123 0.815745\n651 1 0.314962 0.00249789 0.687027\n652 1 0.243082 0.0573783 0.687914\n679 1 0.191138 0.125959 0.69081\n683 1 0.317307 0.126484 0.684458\n774 1 0.187008 0.0659859 0.751048\n778 1 0.306068 0.0618246 0.746868\n780 1 0.250068 0.0565402 0.815691\n807 1 0.182878 0.130117 0.81571\n809 1 0.251606 0.121516 0.747043\n811 1 0.319848 0.123701 0.811782\n775 1 0.188847 0.993296 0.814711\n783 1 0.429906 0.000709371 0.81743\n656 1 0.380338 0.0619117 0.684897\n687 1 0.435917 0.126621 0.685323\n782 1 0.435224 0.0706723 0.755222\n784 1 0.373023 0.0646255 0.81694\n813 1 0.370784 0.119425 0.749504\n815 1 0.433092 0.122738 0.808614\n781 1 0.383889 0.00317311 0.746333\n655 1 0.447778 0.00810396 0.688007\n581 1 0.125785 0.246435 0.500641\n804 1 0.0076123 0.188736 0.817547\n911 1 0.441374 0.00246793 0.937401\n2030 1 0.437202 0.943014 0.875599\n707 1 0.0641306 0.238988 0.679269\n680 1 0.123622 0.180398 0.683686\n712 1 0.12909 0.312038 0.680206\n802 1 0.0629758 0.181651 0.745897\n808 1 0.124094 0.189873 0.814975\n834 1 0.0610345 0.30899 0.750232\n835 1 0.063202 0.249145 0.815094\n837 1 0.119211 0.243629 0.740098\n840 1 0.126749 0.308894 0.808725\n564 1 0.499446 0.186011 0.551921\n2032 1 0.379015 0.941006 0.938169\n684 1 0.254853 0.18406 0.690585\n711 1 0.197638 0.247374 0.684059\n715 1 0.314591 0.258188 0.687201\n716 1 0.25237 0.312638 0.691024\n806 1 0.187792 0.188173 0.748032\n810 1 0.321504 0.186685 0.749211\n812 1 0.247732 0.176877 0.806564\n838 1 0.184097 0.310716 0.749773\n839 1 0.188375 0.245412 0.805599\n841 1 0.254271 0.239964 0.752227\n842 1 0.307519 0.31069 0.750891\n843 1 0.309901 0.240243 0.810733\n844 1 0.247644 0.307628 0.804847\n688 1 0.374157 0.190047 0.692102\n719 1 0.433756 0.259498 0.687906\n720 1 0.372325 0.315448 0.68716\n814 1 0.436329 0.19087 0.755477\n816 1 0.371407 0.185886 0.818964\n845 1 0.376591 0.249605 0.748061\n846 1 0.437673 0.311583 0.749885\n847 1 0.442259 0.251933 0.816121\n848 1 0.3714 0.304369 0.812639\n590 1 0.438631 0.319616 0.510579\n618 1 0.311086 0.439224 0.502007\n78 1 0.437202 0.311666 1.00058\n2029 1 0.379685 0.884449 0.883557\n739 1 0.0640574 0.374845 0.69516\n744 1 0.128467 0.432131 0.687804\n866 1 0.0649707 0.432278 0.755411\n867 1 0.0678166 0.375462 0.821345\n869 1 0.127227 0.372419 0.748502\n872 1 0.133104 0.433184 0.799569\n1797 1 0.133584 0.500497 0.749149\n1667 1 0.0568912 0.500691 0.690266\n98 1 0.065206 0.434275 0.996587\n964 1 0.00378105 0.302603 0.937252\n1675 1 0.314118 0.49946 0.685983\n743 1 0.19346 0.371395 0.684723\n747 1 0.309095 0.375768 0.689216\n748 1 0.24564 0.44284 0.686681\n870 1 0.194989 0.439275 0.749343\n871 1 0.188602 0.375193 0.816463\n873 1 0.245679 0.375565 0.748217\n874 1 0.305884 0.427694 0.747631\n875 1 0.311753 0.368978 0.818735\n876 1 0.251125 0.427429 0.813937\n1801 1 0.253797 0.500244 0.746659\n1799 1 0.193897 0.499019 0.812666\n1671 1 0.180967 0.504845 0.687068\n1679 1 0.435693 0.500465 0.691401\n751 1 0.431781 0.383303 0.684382\n752 1 0.37105 0.440607 0.684911\n877 1 0.380704 0.373856 0.754986\n878 1 0.440671 0.437499 0.751886\n879 1 0.438944 0.37113 0.811147\n880 1 0.372176 0.4324 0.811241\n1805 1 0.36823 0.49913 0.751888\n2031 1 0.447847 0.871554 0.933546\n1038 1 0.434098 0.562651 0.992221\n993 1 0.00828726 0.366921 0.878414\n1795 1 0.0691032 0.504531 0.813812\n1672 1 0.124217 0.564 0.683708\n1699 1 0.0603867 0.631087 0.692862\n1794 1 0.0620646 0.568791 0.755161\n1800 1 0.133602 0.561014 0.814513\n1827 1 0.0664889 0.627562 0.811994\n1829 1 0.123181 0.629518 0.749462\n2022 1 0.189831 0.932854 0.875866\n1803 1 0.312872 0.48881 0.808454\n1676 1 0.252927 0.558617 0.690042\n1703 1 0.186135 0.628887 0.690239\n1707 1 0.315171 0.620854 0.688865\n1798 1 0.185536 0.566677 0.741179\n1802 1 0.316038 0.572066 0.753603\n1804 1 0.24885 0.564104 0.807577\n1831 1 0.188954 0.625209 0.808746\n1833 1 0.252799 0.629424 0.747946\n1835 1 0.312511 0.627236 0.816508\n903 1 0.184344 0.991417 0.93458\n1745 1 0.500926 0.750956 0.626686\n1680 1 0.37395 0.556694 0.679858\n1711 1 0.440257 0.622482 0.686276\n1806 1 0.442031 0.562472 0.755932\n1808 1 0.381153 0.566413 0.813881\n1837 1 0.377088 0.623501 0.74375\n1839 1 0.440929 0.629738 0.815406\n1807 1 0.43775 0.494585 0.815903\n692 1 0.494208 0.188255 0.693631\n532 1 0.49922 0.0610442 0.55908\n881 1 0.497793 0.367158 0.743359\n1704 1 0.12216 0.6869 0.685803\n1731 1 0.0570404 0.750196 0.686905\n1736 1 0.117821 0.809091 0.687231\n1826 1 0.0611927 0.692667 0.751992\n1832 1 0.123932 0.692888 0.821267\n1858 1 0.0607171 0.809852 0.756956\n1859 1 0.0581403 0.743631 0.818032\n1861 1 0.12014 0.746408 0.748469\n1864 1 0.124062 0.811979 0.814871\n2025 1 0.253392 0.878875 0.871554\n1708 1 0.262075 0.697042 0.683685\n1735 1 0.187777 0.743714 0.683248\n1739 1 0.31879 0.750001 0.70369\n1740 1 0.256229 0.807109 0.692021\n1830 1 0.19295 0.694617 0.748977\n1834 1 0.312728 0.683388 0.755273\n1836 1 0.24233 0.685818 0.8178\n1862 1 0.189305 0.816272 0.752604\n1863 1 0.182327 0.750009 0.810914\n1865 1 0.251483 0.750287 0.752238\n1866 1 0.32285 0.813658 0.752798\n1867 1 0.311549 0.742612 0.809017\n1868 1 0.257581 0.815315 0.811184\n833 1 0.00892262 0.243458 0.7421\n2027 1 0.30894 0.878655 0.940086\n2023 1 0.189442 0.869709 0.933381\n2028 1 0.246204 0.934215 0.939124\n1712 1 0.374155 0.685275 0.688071\n1743 1 0.438579 0.746316 0.693177\n1744 1 0.385444 0.804752 0.689834\n1838 1 0.436777 0.688612 0.75661\n1840 1 0.37977 0.683278 0.820061\n1869 1 0.379613 0.74762 0.755116\n1870 1 0.43286 0.817583 0.752113\n1871 1 0.440007 0.754202 0.807917\n1872 1 0.379633 0.823995 0.816817\n1940 1 0.496594 0.556306 0.937749\n643 1 0.0565139 0.00225613 0.686257\n1763 1 0.0588473 0.871526 0.687968\n1768 1 0.113957 0.933395 0.687317\n1890 1 0.0598387 0.932314 0.749068\n1891 1 0.0609171 0.87529 0.822461\n1893 1 0.123003 0.873248 0.743391\n1896 1 0.129659 0.931651 0.810177\n2026 1 0.311238 0.934234 0.879467\n74 1 0.308314 0.304635 0.992235\n647 1 0.181802 0.000674345 0.690939\n1767 1 0.191159 0.875309 0.689749\n1771 1 0.312238 0.863657 0.687832\n1772 1 0.252416 0.935642 0.695503\n1894 1 0.1871 0.939005 0.745126\n1895 1 0.190711 0.870736 0.813751\n1897 1 0.256546 0.875562 0.751008\n1898 1 0.31042 0.944871 0.755496\n1899 1 0.317958 0.879314 0.813054\n1900 1 0.246814 0.936145 0.810343\n777 1 0.242614 0.00224255 0.747314\n1775 1 0.440827 0.874362 0.68646\n1776 1 0.370061 0.942519 0.688233\n1901 1 0.374552 0.882058 0.752214\n1902 1 0.441886 0.937939 0.739212\n1903 1 0.43976 0.878871 0.817677\n1904 1 0.375768 0.933629 0.815701\n516 1 0.00320962 0.063201 0.559979\n1588 1 0.500262 0.692164 0.569933\n5 1 0.13009 0.99786 0.999911\n901 1 0.125189 0.994827 0.873792\n898 1 0.0672325 0.0543554 0.872875\n904 1 0.126996 0.0555146 0.940502\n931 1 0.0566347 0.125311 0.945114\n933 1 0.123492 0.121388 0.874\n1093 1 0.119743 0.749582 0.992779\n905 1 0.251027 0.99351 0.869856\n907 1 0.31584 0.996273 0.942216\n902 1 0.189349 0.063715 0.87876\n906 1 0.309261 0.0534238 0.87543\n908 1 0.248229 0.048832 0.934397\n935 1 0.180695 0.117459 0.942499\n937 1 0.247877 0.118378 0.875666\n939 1 0.312406 0.116856 0.938733\n2018 1 0.0631743 0.944125 0.864334\n929 1 0.00418941 0.125731 0.870657\n1638 1 0.192963 0.938146 0.502185\n10 1 0.318405 0.0595571 0.997157\n909 1 0.371898 0.00089451 0.883736\n910 1 0.436305 0.0644349 0.880276\n912 1 0.383037 0.0611406 0.943255\n941 1 0.371964 0.113102 0.879893\n943 1 0.442177 0.132466 0.939216\n1892 1 0.00299163 0.93346 0.815288\n1997 1 0.373818 0.750485 0.865998\n1828 1 0.00207346 0.686141 0.81678\n2019 1 0.0583476 0.879385 0.941495\n930 1 0.0669812 0.184538 0.872989\n936 1 0.126926 0.176366 0.938311\n962 1 0.0653878 0.306603 0.879292\n963 1 0.0575519 0.242209 0.937223\n965 1 0.123149 0.245508 0.880006\n968 1 0.128388 0.310608 0.938263\n1968 1 0.381883 0.692195 0.949469\n934 1 0.188277 0.180035 0.885317\n938 1 0.297794 0.185455 0.876444\n940 1 0.244703 0.183863 0.943054\n966 1 0.194461 0.309478 0.870023\n967 1 0.183754 0.253593 0.93014\n969 1 0.246167 0.241546 0.86537\n970 1 0.304164 0.306054 0.876946\n971 1 0.309931 0.240662 0.935127\n972 1 0.249594 0.306122 0.934473\n1966 1 0.444902 0.691935 0.873199\n1540 1 0.00323066 0.554071 0.565957\n973 1 0.373015 0.253324 0.873415\n944 1 0.372624 0.185809 0.925027\n942 1 0.437274 0.185398 0.86953\n976 1 0.376103 0.310908 0.931693\n975 1 0.429569 0.244652 0.937537\n974 1 0.439273 0.312954 0.874881\n1998 1 0.444848 0.811929 0.870726\n705 1 0.00870281 0.245961 0.623818\n899 1 0.0663287 0.986788 0.934698\n884 1 0.499006 0.431028 0.810354\n1602 1 0.0616781 0.809763 0.502278\n1923 1 0.0638624 0.495706 0.932282\n1000 1 0.132043 0.440013 0.931247\n997 1 0.130011 0.379822 0.876214\n994 1 0.0723289 0.441156 0.867021\n995 1 0.0645517 0.369805 0.93911\n1925 1 0.131995 0.501484 0.876209\n820 1 0.49381 0.188819 0.810436\n1927 1 0.191929 0.500085 0.938263\n999 1 0.186432 0.374011 0.937754\n1001 1 0.249555 0.374864 0.876543\n1004 1 0.25257 0.430408 0.9389\n1002 1 0.314061 0.42734 0.877613\n998 1 0.191134 0.432669 0.876691\n1003 1 0.314038 0.366949 0.9347\n1841 1 0.494792 0.619205 0.743736\n849 1 0.494712 0.253452 0.749065\n1090 1 0.0602918 0.811138 0.994756\n2024 1 0.123515 0.926099 0.937113\n1935 1 0.43546 0.490243 0.944624\n1008 1 0.371993 0.434023 0.936911\n1006 1 0.44188 0.436038 0.870797\n1005 1 0.37795 0.367155 0.872954\n1007 1 0.441772 0.367777 0.944044\n1999 1 0.439182 0.752018 0.931954\n580 1 0.00567911 0.308421 0.567642\n577 1 0.00438244 0.251756 0.500306\n1860 1 0.000981196 0.808374 0.821249\n1994 1 0.320238 0.817529 0.876205\n106 1 0.311696 0.430368 1.00063\n1922 1 0.0556723 0.563408 0.871766\n1928 1 0.126639 0.564106 0.929915\n1955 1 0.0610327 0.621709 0.933466\n1957 1 0.125713 0.623407 0.86994\n756 1 0.496635 0.441678 0.689027\n1929 1 0.246224 0.503045 0.868108\n1931 1 0.305895 0.497996 0.946627\n1930 1 0.30539 0.560935 0.877268\n1926 1 0.188316 0.562731 0.868105\n1963 1 0.306504 0.620143 0.935507\n1961 1 0.24551 0.625988 0.873111\n1932 1 0.252502 0.561237 0.941662\n1959 1 0.186845 0.623698 0.937215\n1793 1 0.00603649 0.500631 0.752672\n70 1 0.184421 0.309154 0.994969\n1933 1 0.374209 0.489546 0.872417\n1965 1 0.38074 0.630962 0.888939\n1934 1 0.435296 0.556737 0.876158\n1936 1 0.373424 0.552228 0.936859\n1967 1 0.438973 0.631372 0.93651\n1012 1 0.495833 0.435023 0.94724\n1986 1 0.0596201 0.815035 0.880908\n1987 1 0.0638493 0.752185 0.934872\n1954 1 0.0612761 0.68173 0.880515\n1992 1 0.121604 0.814064 0.941098\n1989 1 0.123738 0.758159 0.877563\n1960 1 0.116129 0.682081 0.940457\n2021 1 0.122793 0.873977 0.874786\n614 1 0.187507 0.435709 0.502568\n2000 1 0.380814 0.81824 0.935294\n2017 1 0.00050907 0.877513 0.883059\n1057 1 0.00377007 0.626486 0.994926\n852 1 0.496837 0.313591 0.810744\n1995 1 0.31535 0.746847 0.93953\n1996 1 0.250359 0.812572 0.936142\n1991 1 0.193425 0.750707 0.936571\n1958 1 0.185951 0.684047 0.874964\n1964 1 0.248181 0.689901 0.936541\n1993 1 0.263014 0.756456 0.870635\n1962 1 0.313744 0.693583 0.878277\n1990 1 0.190851 0.80822 0.870121\n1101 1 0.384559 0.758294 0.997368\n513 1 -0.00111045 0.992988 0.499574\n1066 1 0.307024 0.685609 0.993024\n1062 1 0.177084 0.691159 0.99251\n1098 1 0.323614 0.815032 0.997971\n1094 1 0.181971 0.811653 0.997905\n1037 1 0.376163 0.493249 0.99865\n1641 1 0.250735 0.873304 0.501111\n69 1 0.130265 0.246058 0.994491\n621 1 0.371709 0.370175 0.50187\n521 1 0.251707 0.995653 0.504092\n73 1 0.242055 0.246469 0.996925\n1034 1 0.311827 0.559331 0.999834\n1029 1 0.123917 0.49861 0.991562\n526 1 0.439939 0.0631188 0.503977\n561 1 0.495782 0.122957 0.504919\n1133 1 0.387315 0.877543 0.995346\n1549 1 0.366773 0.501339 0.504379\n622 1 0.434394 0.443043 0.49994\n554 1 0.314208 0.195529 0.504445\n609 1 0.0111024 0.368201 0.501021\n518 1 0.192639 0.0548292 0.502502\n1582 1 0.439472 0.690157 0.503075\n24 1 0.627622 0.0681381 0.0563435\n51 1 0.556684 0.122808 0.0719578\n146 1 0.562692 0.0621769 0.12826\n181 1 0.619821 0.121244 0.130893\n118 1 0.683063 0.434645 0.00136704\n50 1 0.565869 0.188932 0.000369547\n1444 1 0.999922 0.679112 0.434039\n28 1 0.747135 0.0601459 0.0643591\n55 1 0.685429 0.125421 0.0608751\n59 1 0.814435 0.123674 0.0569612\n150 1 0.69328 0.0638606 0.127271\n154 1 0.807178 0.057109 0.124243\n185 1 0.748654 0.124846 0.121325\n27 1 0.806144 0.000962202 0.0660125\n52 1 0.503253 0.184291 0.0717217\n23 1 0.684138 0.00155861 0.0644732\n153 1 0.742802 -0.000387054 0.123203\n53 1 0.613956 0.12198 -0.000616362\n32 1 0.873272 0.0645786 0.0595555\n63 1 0.932729 0.127102 0.0582275\n158 1 0.940095 0.0663564 0.129047\n189 1 0.86569 0.121313 0.123415\n157 1 0.875451 0.00233995 0.126775\n1076 1 0.513593 0.677562 0.0666785\n1492 1 0.502483 0.815347 0.431075\n56 1 0.628488 0.194044 0.0648261\n83 1 0.559217 0.253047 0.0624927\n88 1 0.632629 0.311448 0.0638155\n178 1 0.565348 0.193292 0.120259\n210 1 0.559072 0.317427 0.125689\n213 1 0.630818 0.256417 0.125622\n1108 1 0.503021 0.81179 0.0657219\n1505 1 0.99877 0.863936 0.374087\n60 1 0.751716 0.184113 0.0626611\n87 1 0.693754 0.244416 0.0526849\n91 1 0.805937 0.244003 0.0655797\n92 1 0.743446 0.322585 0.0721552\n182 1 0.691 0.185323 0.125193\n186 1 0.8059 0.184271 0.126934\n214 1 0.681191 0.319503 0.128251\n217 1 0.747773 0.253701 0.122106\n218 1 0.814723 0.309506 0.122103\n1662 1 0.938598 0.934402 0.494125\n36 1 1.00051 0.19315 0.0626937\n629 1 0.628281 0.377812 0.486847\n64 1 0.872042 0.18656 0.0630743\n95 1 0.937006 0.247127 0.0597483\n96 1 0.879039 0.307676 0.0581863\n190 1 0.933497 0.179476 0.127762\n221 1 0.875028 0.242717 0.124201\n222 1 0.938525 0.311672 0.116911\n413 1 0.87658 0.997155 0.380084\n122 1 0.803945 0.438684 0.00733658\n545 1 0.998496 0.114513 0.499821\n115 1 0.560401 0.382597 0.0670006\n120 1 0.621705 0.444464 0.0608983\n242 1 0.554523 0.440111 0.128154\n245 1 0.623103 0.381694 0.131225\n1043 1 0.562552 0.497915 0.0677658\n1173 1 0.624313 0.499334 0.125097\n193 1 0.996762 0.239512 0.124821\n1533 1 0.878808 0.870947 0.378816\n1249 1 0.993139 0.874425 0.118636\n119 1 0.670992 0.379019 0.0707094\n123 1 0.810473 0.364243 0.0595529\n124 1 0.744981 0.428813 0.0655492\n246 1 0.683146 0.439058 0.126618\n249 1 0.73748 0.377042 0.135651\n250 1 0.810952 0.435808 0.123628\n1047 1 0.689235 0.497759 0.0679115\n1177 1 0.747087 0.505134 0.126938\n1051 1 0.80736 0.501653 0.0614774\n353 1 0.998888 0.369917 0.246729\n127 1 0.943308 0.375907 0.0548147\n128 1 0.874516 0.448557 0.0554534\n253 1 0.876129 0.376564 0.11286\n254 1 0.943215 0.446076 0.120437\n1534 1 0.938574 0.934341 0.376782\n225 1 0.988405 0.374653 0.12985\n372 1 0.504385 0.4323 0.310591\n209 1 0.498402 0.248649 0.126666\n1106 1 0.560992 0.813903 0.00410081\n1048 1 0.624031 0.565101 0.0622968\n1075 1 0.559313 0.610145 0.0586557\n1170 1 0.562662 0.561916 0.128661\n1205 1 0.621858 0.626538 0.121899\n433 1 0.499957 0.126738 0.379454\n1052 1 0.751161 0.568277 0.0626529\n1079 1 0.682847 0.622383 0.0565735\n1083 1 0.817879 0.623134 0.0626916\n1174 1 0.686277 0.560271 0.124873\n1178 1 0.816478 0.561092 0.126324\n1209 1 0.749949 0.629513 0.118241\n1028 1 0.991651 0.569455 0.0544827\n1657 1 0.750042 0.873808 0.502361\n82 1 0.559811 0.313526 0.000718582\n1181 1 0.875383 0.500481 0.123989\n1055 1 0.949661 0.504588 0.0634763\n1056 1 0.876186 0.556704 0.060739\n1087 1 0.938404 0.633458 0.0645937\n1182 1 0.935315 0.55791 0.127215\n1213 1 0.879789 0.62722 0.123789\n1145 1 0.742114 0.880996 0.00332669\n468 1 0.505221 0.318072 0.437839\n1535 1 0.932683 0.865063 0.441539\n1080 1 0.624308 0.686025 0.0557501\n1107 1 0.567379 0.749328 0.0614127\n1112 1 0.622178 0.817425 0.061743\n1202 1 0.570531 0.688386 0.128178\n1234 1 0.561717 0.816755 0.127123\n1237 1 0.629551 0.755391 0.115015\n481 1 0.995874 0.371001 0.376337\n1536 1 0.872266 0.931405 0.442544\n415 1 0.937717 0.996673 0.431473\n100 1 0.999278 0.436513 0.0588353\n212 1 0.500575 0.31694 0.195393\n1084 1 0.746268 0.687713 0.0552974\n1111 1 0.68869 0.752282 0.0605918\n1115 1 0.810489 0.74919 0.060539\n1116 1 0.752007 0.81278 0.060841\n1206 1 0.680605 0.682017 0.115746\n1210 1 0.813017 0.681128 0.122183\n1238 1 0.693719 0.818025 0.122206\n1241 1 0.745442 0.748444 0.130416\n1242 1 0.812826 0.818507 0.117566\n116 1 0.49877 0.437542 0.0605507\n1088 1 0.869059 0.689588 0.0545749\n1119 1 0.933309 0.749745 0.060944\n1120 1 0.874456 0.818791 0.0605453\n1214 1 0.939725 0.688928 0.126272\n1245 1 0.872211 0.74697 0.122224\n1246 1 0.935989 0.808178 0.122087\n1060 1 0.996727 0.693281 0.0525292\n19 1 0.55275 0.0101306 0.0630798\n1139 1 0.560029 0.881001 0.0687701\n1144 1 0.617333 0.942702 0.0612847\n1266 1 0.556511 0.94669 0.125702\n1269 1 0.627043 0.883457 0.129539\n149 1 0.634386 1.00147 0.12928\n465 1 0.500919 0.252458 0.372383\n228 1 0.998434 0.432122 0.197799\n436 1 0.500583 0.183218 0.447371\n196 1 0.992803 0.304442 0.181699\n1143 1 0.682637 0.883047 0.061056\n1147 1 0.811009 0.884205 0.065622\n1148 1 0.746187 0.944005 0.05555\n1270 1 0.684868 0.937759 0.122822\n1273 1 0.751239 0.880449 0.132295\n1274 1 0.79847 0.941056 0.128616\n1316 1 0.994273 0.69129 0.320265\n31 1 0.934449 0.00437402 0.0627595\n1151 1 0.934937 0.870965 0.0654663\n1152 1 0.869443 0.942929 0.0693372\n1277 1 0.878287 0.875461 0.124912\n1278 1 0.940588 0.944914 0.127483\n114 1 0.558883 0.434914 0.0010107\n1558 1 0.689322 0.555443 0.498849\n601 1 0.746249 0.252076 0.496962\n1233 1 0.505742 0.740445 0.124161\n1313 1 0.996165 0.627529 0.255813\n152 1 0.626484 0.0630667 0.194179\n179 1 0.563489 0.127164 0.198012\n274 1 0.565429 0.0643482 0.257875\n280 1 0.630942 0.065476 0.307979\n307 1 0.565529 0.132293 0.304886\n309 1 0.63315 0.123582 0.249564\n1364 1 0.504059 0.813027 0.307527\n147 1 0.555984 0.00725905 0.195284\n630 1 0.694234 0.438401 0.496963\n156 1 0.754513 0.0622323 0.193318\n183 1 0.68612 0.13025 0.186694\n187 1 0.809399 0.123658 0.187009\n278 1 0.690218 0.0582849 0.257459\n282 1 0.812155 0.0627025 0.248929\n284 1 0.761664 0.0555587 0.312345\n311 1 0.697675 0.123637 0.317849\n313 1 0.74273 0.126041 0.245127\n315 1 0.82381 0.119779 0.30889\n155 1 0.81597 0.00263902 0.185479\n308 1 0.503776 0.197016 0.314904\n285 1 0.881557 0.00834046 0.252766\n160 1 0.881313 0.0659471 0.193293\n191 1 0.939526 0.120816 0.191551\n286 1 0.944753 0.0650458 0.259256\n288 1 0.88044 0.0611024 0.314166\n317 1 0.881252 0.124408 0.247687\n319 1 0.939071 0.119643 0.324433\n159 1 0.937633 0.0083299 0.189404\n287 1 0.933704 0.992222 0.316093\n452 1 0.992346 0.30607 0.43644\n570 1 0.814709 0.18761 0.499496\n184 1 0.626934 0.187032 0.190966\n211 1 0.562464 0.259126 0.18786\n216 1 0.62188 0.312552 0.184148\n306 1 0.569654 0.195814 0.246913\n312 1 0.625228 0.190664 0.319857\n338 1 0.565632 0.312346 0.250447\n339 1 0.567398 0.254929 0.310737\n341 1 0.625318 0.257232 0.253806\n344 1 0.623279 0.316028 0.313509\n385 1 0.997718 0.995455 0.373582\n530 1 0.557246 0.0684699 0.491089\n18 1 0.554409 0.0671832 0.00408847\n188 1 0.751304 0.193126 0.187429\n215 1 0.683506 0.251997 0.191041\n219 1 0.820016 0.248164 0.182245\n220 1 0.749181 0.303162 0.188361\n310 1 0.686137 0.191128 0.256188\n314 1 0.813844 0.177502 0.245413\n316 1 0.748284 0.188544 0.313912\n342 1 0.687583 0.311241 0.251757\n343 1 0.680182 0.255586 0.321426\n345 1 0.755442 0.243366 0.252325\n346 1 0.815133 0.307778 0.253894\n347 1 0.820727 0.244767 0.308983\n348 1 0.746559 0.319191 0.309744\n1618 1 0.561285 0.817569 0.495437\n192 1 0.877469 0.181368 0.190275\n223 1 0.934246 0.244411 0.186753\n224 1 0.872018 0.308603 0.18613\n318 1 0.9394 0.186193 0.250219\n320 1 0.87913 0.183182 0.316999\n349 1 0.877102 0.238435 0.248328\n350 1 0.936934 0.308952 0.254086\n351 1 0.942687 0.25002 0.318492\n352 1 0.870599 0.315907 0.314802\n305 1 0.502572 0.133429 0.253235\n1489 1 0.501893 0.753706 0.374142\n409 1 0.758233 0.00112369 0.375024\n1329 1 0.504533 0.62619 0.246758\n257 1 0.99653 0.995826 0.257795\n243 1 0.563174 0.379764 0.195606\n248 1 0.630437 0.443618 0.198467\n370 1 0.559585 0.439733 0.252842\n371 1 0.560294 0.373608 0.311217\n373 1 0.62619 0.370888 0.253472\n376 1 0.616913 0.431847 0.30942\n566 1 0.688771 0.189215 0.496954\n1171 1 0.564382 0.497805 0.193388\n247 1 0.682932 0.373476 0.193257\n251 1 0.818902 0.372314 0.176036\n252 1 0.754078 0.440855 0.181397\n374 1 0.693089 0.442107 0.254025\n375 1 0.686812 0.374413 0.309146\n377 1 0.759912 0.381769 0.245039\n378 1 0.808252 0.44446 0.24487\n379 1 0.815339 0.3753 0.311457\n380 1 0.756548 0.439794 0.316422\n1305 1 0.755188 0.499786 0.253844\n598 1 0.692712 0.312638 0.493429\n255 1 0.935375 0.370622 0.191127\n256 1 0.870663 0.438243 0.182734\n381 1 0.872053 0.369099 0.245815\n382 1 0.937375 0.430371 0.247471\n383 1 0.944152 0.376243 0.306566\n384 1 0.876851 0.442336 0.308733\n1309 1 0.877006 0.505575 0.249492\n1393 1 0.507827 0.873911 0.242326\n1142 1 0.682405 0.945027 0.00108158\n574 1 0.935063 0.188652 0.495939\n1176 1 0.631011 0.567549 0.187001\n1203 1 0.567149 0.626812 0.194492\n1298 1 0.562627 0.561276 0.251354\n1304 1 0.625921 0.55587 0.316676\n1331 1 0.559545 0.617665 0.313587\n1333 1 0.619897 0.617814 0.253183\n1299 1 0.559845 0.500499 0.311961\n1301 1 0.628191 0.501292 0.257872\n1179 1 0.811374 0.505471 0.19271\n1303 1 0.687729 0.495992 0.317438\n1307 1 0.819782 0.504713 0.319875\n1180 1 0.751908 0.561711 0.189884\n1207 1 0.692464 0.630565 0.18482\n1211 1 0.813915 0.614659 0.194679\n1302 1 0.693435 0.557176 0.248354\n1306 1 0.816816 0.553215 0.256882\n1308 1 0.757504 0.563177 0.309617\n1335 1 0.679746 0.621428 0.307915\n1337 1 0.748154 0.620297 0.255568\n1339 1 0.817704 0.62072 0.313516\n1175 1 0.692781 0.50153 0.189121\n407 1 0.682413 0.994889 0.436221\n1183 1 0.936779 0.495204 0.18923\n1311 1 0.939115 0.499058 0.317551\n1184 1 0.878781 0.560293 0.181358\n1215 1 0.933916 0.619457 0.191274\n1310 1 0.935284 0.559003 0.250732\n1312 1 0.880811 0.560108 0.318344\n1341 1 0.878183 0.623694 0.255083\n1343 1 0.940795 0.622926 0.314761\n1476 1 0.991918 0.813258 0.439466\n260 1 0.995085 0.0583321 0.321193\n1208 1 0.634355 0.694451 0.188554\n1235 1 0.56654 0.749248 0.188291\n1240 1 0.634167 0.81784 0.183254\n1330 1 0.563623 0.690159 0.255608\n1336 1 0.616104 0.685487 0.31154\n1362 1 0.565163 0.815468 0.247979\n1363 1 0.558247 0.74518 0.315122\n1365 1 0.623657 0.75086 0.256714\n1368 1 0.626762 0.819604 0.31614\n401 1 0.503762 0.00341344 0.370181\n1212 1 0.754972 0.683377 0.188335\n1239 1 0.692512 0.756682 0.194277\n1243 1 0.815847 0.751122 0.184974\n1244 1 0.751721 0.816956 0.19433\n1334 1 0.690739 0.682939 0.251808\n1338 1 0.822502 0.685765 0.253299\n1340 1 0.756387 0.687908 0.314006\n1366 1 0.689001 0.810598 0.254133\n1367 1 0.685612 0.740133 0.318564\n1369 1 0.755692 0.751966 0.247436\n1370 1 0.808379 0.812196 0.260058\n1371 1 0.810794 0.754499 0.320812\n1372 1 0.746086 0.808172 0.319943\n497 1 0.506718 0.372601 0.373456\n1380 1 0.998063 0.931256 0.316885\n1216 1 0.878039 0.690635 0.190423\n1247 1 0.934958 0.746871 0.187504\n1248 1 0.88073 0.81675 0.180875\n1342 1 0.935916 0.688382 0.251981\n1344 1 0.87205 0.691879 0.313117\n1373 1 0.869855 0.753321 0.252059\n1374 1 0.933815 0.806474 0.243076\n1375 1 0.925871 0.748754 0.312155\n1376 1 0.874342 0.816118 0.314207\n1531 1 0.800069 0.882095 0.436705\n1188 1 0.996314 0.690246 0.192862\n275 1 0.564657 0.00403318 0.313741\n1267 1 0.567365 0.878238 0.18565\n1272 1 0.624333 0.941566 0.187328\n1394 1 0.563633 0.939116 0.24589\n1395 1 0.563209 0.879587 0.31509\n1397 1 0.625072 0.881023 0.256576\n1400 1 0.625356 0.934702 0.313615\n277 1 0.62503 0.00445727 0.246262\n1348 1 0.997612 0.813073 0.315547\n369 1 0.502164 0.376419 0.245039\n337 1 0.501905 0.250293 0.249251\n283 1 0.821651 0.998091 0.316958\n281 1 0.761533 0.997049 0.248765\n151 1 0.691816 0.0051821 0.193925\n1271 1 0.692619 0.875072 0.189874\n1275 1 0.812192 0.879381 0.184461\n1276 1 0.747869 0.943877 0.190787\n1398 1 0.678858 0.939978 0.250921\n1399 1 0.693628 0.876915 0.308361\n1401 1 0.746459 0.884919 0.24779\n1402 1 0.821903 0.942333 0.250886\n1403 1 0.8155 0.873418 0.306844\n1404 1 0.757067 0.938359 0.310353\n279 1 0.695169 0.997064 0.312819\n180 1 0.50068 0.186067 0.195612\n1204 1 0.506375 0.68241 0.189067\n1279 1 0.936272 0.875546 0.18211\n1280 1 0.870772 0.941208 0.185156\n1405 1 0.878088 0.879088 0.245155\n1406 1 0.936501 0.934935 0.251951\n1407 1 0.933142 0.87724 0.308966\n1408 1 0.871977 0.934413 0.320082\n1529 1 0.755164 0.878022 0.37015\n403 1 0.563934 -0.00147318 0.438432\n402 1 0.56557 0.0631387 0.377908\n408 1 0.631185 0.0666954 0.439227\n435 1 0.560039 0.130153 0.439721\n437 1 0.619449 0.12401 0.370547\n405 1 0.62361 0.00258448 0.366568\n420 1 1.0006 0.188523 0.440329\n406 1 0.686935 0.0667628 0.379802\n410 1 0.817882 0.0660507 0.384344\n412 1 0.755219 0.0630135 0.438527\n439 1 0.689116 0.13181 0.432785\n441 1 0.75267 0.128648 0.374246\n443 1 0.814953 0.122553 0.447375\n1532 1 0.755893 0.951467 0.43696\n1561 1 0.753462 0.501769 0.495677\n414 1 0.936914 0.0618531 0.385325\n416 1 0.875155 0.0633061 0.439216\n445 1 0.873332 0.12965 0.382321\n447 1 0.937134 0.128593 0.434416\n597 1 0.628514 0.251111 0.49811\n1527 1 0.695249 0.883045 0.437503\n1345 1 0.991752 0.753637 0.25144\n467 1 0.562773 0.253219 0.435898\n434 1 0.560642 0.195002 0.37835\n469 1 0.622874 0.250042 0.379796\n472 1 0.62859 0.312167 0.437169\n466 1 0.558852 0.307656 0.371578\n440 1 0.628325 0.182979 0.435138\n569 1 0.750487 0.130131 0.499183\n1300 1 0.500669 0.559454 0.306838\n471 1 0.695334 0.254153 0.426677\n444 1 0.750098 0.189023 0.430644\n474 1 0.812284 0.312859 0.376303\n442 1 0.812314 0.186096 0.370318\n473 1 0.751581 0.257224 0.370798\n476 1 0.754551 0.322331 0.436277\n438 1 0.684047 0.187057 0.371974\n470 1 0.69168 0.321567 0.372003\n475 1 0.806459 0.247933 0.431306\n1217 1 1.0013 0.747692 0.121284\n1653 1 0.637469 0.878141 0.500739\n1530 1 0.817159 0.936241 0.378073\n1526 1 0.683188 0.938528 0.375696\n1460 1 0.500646 0.686921 0.444153\n1525 1 0.62642 0.874088 0.381726\n594 1 0.567038 0.31663 0.494032\n480 1 0.875359 0.304632 0.442115\n478 1 0.932386 0.314368 0.373801\n477 1 0.875142 0.24853 0.37133\n479 1 0.933995 0.243304 0.438313\n446 1 0.934892 0.187034 0.380366\n448 1 0.872462 0.185946 0.443466\n29 1 0.871998 0.998532 0.0017033\n1522 1 0.560778 0.939556 0.370491\n504 1 0.630263 0.436106 0.431834\n498 1 0.560438 0.439064 0.372196\n501 1 0.627952 0.371666 0.370331\n499 1 0.562502 0.384678 0.434373\n68 1 0.997271 0.310988 0.0565089\n1156 1 0.99425 0.564543 0.185879\n356 1 0.997174 0.43879 0.307571\n1523 1 0.559661 0.884132 0.43509\n1650 1 0.55997 0.930457 0.497021\n502 1 0.691823 0.43736 0.375722\n506 1 0.816028 0.435392 0.376541\n505 1 0.755314 0.378451 0.369933\n503 1 0.695635 0.376412 0.438173\n508 1 0.748162 0.434223 0.431893\n507 1 0.816746 0.375948 0.436102\n1431 1 0.692462 0.504115 0.433061\n1433 1 0.755932 0.501703 0.370053\n1092 1 1 0.815333 0.0627421\n512 1 0.874823 0.438321 0.433819\n511 1 0.932123 0.372798 0.432118\n510 1 0.938622 0.43911 0.375534\n509 1 0.872735 0.375089 0.373121\n1439 1 0.937341 0.497198 0.437601\n1409 1 0.997047 0.501987 0.373321\n1470 1 0.941189 0.683663 0.380853\n1429 1 0.626796 0.499472 0.378523\n1461 1 0.620751 0.620064 0.369016\n1426 1 0.557128 0.556192 0.372117\n1432 1 0.624185 0.560031 0.433499\n1459 1 0.56215 0.624021 0.442178\n1427 1 0.55865 0.497658 0.434248\n1501 1 0.877964 0.743732 0.382746\n1504 1 0.871305 0.801898 0.44496\n1435 1 0.821832 0.503892 0.437302\n1430 1 0.689798 0.564402 0.370803\n1463 1 0.685878 0.625125 0.435033\n1436 1 0.751306 0.562948 0.432784\n1434 1 0.818731 0.56313 0.375379\n1465 1 0.75698 0.621566 0.373582\n1467 1 0.814375 0.62625 0.443659\n1503 1 0.932095 0.739723 0.441745\n1437 1 0.880035 0.499453 0.377479\n1440 1 0.878337 0.56605 0.441919\n1469 1 0.88743 0.620392 0.381141\n1471 1 0.93955 0.617256 0.441738\n1438 1 0.944016 0.561677 0.380803\n1626 1 0.808341 0.804429 0.495426\n177 1 0.503759 0.13189 0.132604\n411 1 0.819386 0.998708 0.44039\n1458 1 0.554138 0.676665 0.374637\n1490 1 0.564346 0.814778 0.374335\n1496 1 0.622818 0.810445 0.439118\n1491 1 0.561619 0.747867 0.437116\n1493 1 0.62001 0.756041 0.369445\n1464 1 0.627064 0.683875 0.432015\n1494 1 0.684663 0.806685 0.378815\n1502 1 0.931836 0.805533 0.37337\n1472 1 0.876101 0.679363 0.438305\n1528 1 0.621669 0.935497 0.442366\n1495 1 0.684944 0.738116 0.438519\n1498 1 0.813862 0.814629 0.376972\n1462 1 0.689243 0.678755 0.37101\n1468 1 0.748955 0.685393 0.44263\n1500 1 0.751261 0.810172 0.43258\n1497 1 0.748322 0.741341 0.376848\n1466 1 0.819746 0.679043 0.371652\n1499 1 0.812895 0.737261 0.438532\n484 1 0.991057 0.439154 0.441714\n1297 1 0.500919 0.495322 0.25045\n292 1 0.991114 0.183902 0.321386\n1117 1 0.875449 0.755366 0.00234754\n1622 1 0.688931 0.814043 0.496022\n638 1 0.933993 0.43165 0.498995\n1585 1 0.502135 0.620818 0.496375\n1557 1 0.625039 0.498023 0.49224\n1621 1 0.626655 0.744917 0.496511\n529 1 0.506395 0.993105 0.493265\n113 1 0.498406 0.377871 0.00611899\n626 1 0.561591 0.435389 0.495432\n533 1 0.627424 0.00511218 0.497481\n534 1 0.693142 0.0717999 0.495337\n1630 1 0.930074 0.807155 0.498983\n1110 1 0.698215 0.813072 0.00058474\n30 1 0.934165 0.0661533 0.0030658\n1073 1 0.505135 0.612804 0.00187212\n542 1 0.9401 0.0571921 0.495933\n573 1 0.881147 0.123118 0.498567\n26 1 0.816237 0.0595393 0.0048998\n25 1 0.747324 0.00271601 0.00154262\n1661 1 0.86023 0.870311 0.497183\n1141 1 0.625925 0.876984 0.0060009\n536 1 0.625936 0.0665007 0.559719\n563 1 0.561363 0.131202 0.561483\n658 1 0.5655 0.0632903 0.622549\n693 1 0.623986 0.1254 0.621285\n661 1 0.631129 0.993962 0.620751\n1652 1 0.503811 0.936598 0.5686\n927 1 0.93697 0.010106 0.9418\n2045 1 0.881599 0.879269 0.873197\n1597 1 0.870248 0.618111 0.504505\n540 1 0.751828 0.0635681 0.562761\n567 1 0.68968 0.136899 0.561133\n571 1 0.81449 0.129093 0.554005\n662 1 0.688228 0.07153 0.624238\n666 1 0.807144 0.0614012 0.621988\n697 1 0.750795 0.130761 0.614456\n535 1 0.683349 0.00977208 0.555591\n1937 1 0.505995 0.499087 0.870334\n21 1 0.624015 0.0111931 0.998211\n740 1 0.998751 0.434461 0.691248\n544 1 0.877108 0.0690291 0.562355\n575 1 0.942126 0.129901 0.555099\n670 1 0.940854 0.0598247 0.620014\n701 1 0.874201 0.126024 0.624188\n669 1 0.875487 0.00880054 0.621415\n1590 1 0.68914 0.679517 0.500556\n568 1 0.620156 0.188117 0.565352\n595 1 0.568464 0.255061 0.556291\n600 1 0.627756 0.316894 0.557746\n690 1 0.564057 0.193823 0.626957\n722 1 0.553967 0.311176 0.624202\n725 1 0.631112 0.249125 0.621056\n1953 1 0.994028 0.624673 0.872984\n1566 1 0.949599 0.557326 0.503925\n1625 1 0.742712 0.750248 0.50457\n1562 1 0.809516 0.557481 0.501654\n548 1 0.99922 0.188925 0.558225\n572 1 0.755965 0.195792 0.55402\n599 1 0.689424 0.248384 0.559552\n603 1 0.816383 0.252218 0.558494\n604 1 0.753254 0.309808 0.561115\n694 1 0.690993 0.193555 0.625549\n698 1 0.814358 0.18573 0.615992\n726 1 0.689068 0.309161 0.622094\n729 1 0.754975 0.250138 0.61548\n730 1 0.815948 0.309985 0.627632\n1589 1 0.62842 0.617404 0.50155\n576 1 0.881852 0.186545 0.561722\n607 1 0.945641 0.248484 0.556964\n608 1 0.889148 0.314412 0.565135\n702 1 0.940253 0.182949 0.61343\n733 1 0.879331 0.2537 0.622355\n734 1 0.945246 0.312419 0.628755\n945 1 0.499579 0.129707 0.877784\n596 1 0.497797 0.31268 0.562914\n593 1 0.50044 0.253193 0.502763\n1555 1 0.564666 0.507585 0.56214\n627 1 0.564275 0.373318 0.562022\n632 1 0.625174 0.430721 0.55782\n754 1 0.560621 0.442758 0.623824\n757 1 0.627963 0.37357 0.622968\n2020 1 0.995389 0.940582 0.936305\n1658 1 0.808401 0.938281 0.506601\n1924 1 0.998006 0.563148 0.940104\n631 1 0.690289 0.369504 0.549885\n635 1 0.818341 0.375928 0.574217\n636 1 0.74662 0.433504 0.559092\n758 1 0.678591 0.44188 0.624814\n761 1 0.743436 0.377362 0.623271\n762 1 0.809009 0.444173 0.623304\n1563 1 0.819012 0.503155 0.565724\n1689 1 0.753227 0.504163 0.626264\n1559 1 0.687282 0.501295 0.561515\n977 1 0.50425 0.246591 0.875752\n639 1 0.945277 0.377774 0.561195\n640 1 0.878796 0.437579 0.56649\n765 1 0.876066 0.369869 0.638179\n766 1 0.935584 0.431071 0.629022\n1567 1 0.933928 0.495918 0.557292\n1693 1 0.87972 0.50299 0.626964\n1713 1 0.508305 0.61998 0.631209\n121 1 0.75365 0.371292 0.998445\n1685 1 0.625628 0.505327 0.62956\n1560 1 0.628687 0.561181 0.561449\n1587 1 0.561405 0.620866 0.559235\n1682 1 0.566953 0.567409 0.630367\n1717 1 0.620385 0.632704 0.639691\n2046 1 0.938055 0.946312 0.881474\n1716 1 0.504081 0.690316 0.692095\n1564 1 0.754286 0.559158 0.562178\n1591 1 0.689432 0.617791 0.56827\n1595 1 0.810937 0.619302 0.567219\n1686 1 0.688899 0.565965 0.629357\n1690 1 0.819585 0.570386 0.631319\n1721 1 0.748262 0.628764 0.628165\n1876 1 0.499692 0.811771 0.805783\n89 1 0.75086 0.24881 0.991812\n1780 1 0.500945 0.933381 0.684805\n1985 1 0.992301 0.743776 0.880159\n1568 1 0.876737 0.565559 0.57043\n1599 1 0.939195 0.619859 0.56377\n1694 1 0.941819 0.558205 0.632022\n1725 1 0.88014 0.625559 0.632252\n753 1 0.509237 0.376557 0.619454\n916 1 0.504249 0.0712712 0.937216\n913 1 0.504602 0.0054716 0.872285\n1592 1 0.626548 0.686409 0.560883\n1619 1 0.553539 0.755075 0.559603\n1624 1 0.622085 0.814803 0.56637\n1714 1 0.564185 0.69591 0.626227\n1746 1 0.56282 0.807878 0.623986\n1749 1 0.628535 0.747332 0.63216\n2047 1 0.938769 0.872503 0.940177\n1137 1 0.509696 0.88142 1.00021\n562 1 0.56596 0.191506 0.500419\n1596 1 0.745201 0.683648 0.56377\n1623 1 0.684943 0.748695 0.56193\n1627 1 0.805496 0.752971 0.561531\n1628 1 0.750711 0.812016 0.562939\n1718 1 0.689558 0.692668 0.624208\n1722 1 0.814592 0.690501 0.617633\n1750 1 0.693739 0.810476 0.626418\n1753 1 0.75203 0.754318 0.621587\n1754 1 0.808673 0.816243 0.622396\n65 1 0.995871 0.253523 0.997836\n1600 1 0.880772 0.683651 0.568463\n1631 1 0.93544 0.751606 0.562879\n1632 1 0.871312 0.814846 0.557771\n1726 1 0.934618 0.688676 0.628769\n1757 1 0.873145 0.755847 0.622356\n1758 1 0.936401 0.81593 0.616956\n1629 1 0.870633 0.738304 0.500065\n531 1 0.562984 0.00354613 0.55515\n1651 1 0.563428 0.875236 0.556896\n1656 1 0.621731 0.936207 0.559365\n1778 1 0.563519 0.937632 0.627172\n1781 1 0.623102 0.874411 0.623452\n62 1 0.927724 0.191202 0.998293\n1777 1 0.501732 0.870141 0.621568\n539 1 0.811777 -6.838e-05 0.568121\n1655 1 0.68798 0.870309 0.567674\n1659 1 0.807495 0.881242 0.56946\n1660 1 0.740551 0.952089 0.565365\n1782 1 0.678988 0.933564 0.623863\n1785 1 0.745742 0.881492 0.630429\n1786 1 0.817153 0.940653 0.624509\n665 1 0.740562 0.00621942 0.629171\n94 1 0.934067 0.312428 0.998831\n543 1 0.941352 0.997269 0.560527\n1663 1 0.92657 0.878163 0.551752\n1664 1 0.877368 0.936113 0.561235\n1789 1 0.869443 0.878925 0.626859\n1790 1 0.930409 0.934605 0.624598\n61 1 0.866233 0.126404 0.994179\n2004 1 0.509895 0.80944 0.926427\n1684 1 0.500424 0.557604 0.68746\n1844 1 0.507196 0.691418 0.804608\n664 1 0.623048 0.0739922 0.690033\n691 1 0.557817 0.129527 0.684036\n786 1 0.565727 0.0686743 0.746483\n792 1 0.630616 0.0612294 0.812678\n819 1 0.567811 0.12914 0.812355\n821 1 0.622453 0.128868 0.752212\n787 1 0.561484 0.99123 0.80905\n789 1 0.620942 0.00444563 0.746269\n2048 1 0.873987 0.945581 0.93525\n668 1 0.748761 0.0656692 0.684849\n695 1 0.686518 0.127317 0.684963\n699 1 0.803096 0.131419 0.682399\n790 1 0.692993 0.0716656 0.7509\n794 1 0.802803 0.0646308 0.743945\n796 1 0.744778 0.0589769 0.8111\n823 1 0.686242 0.130553 0.808182\n825 1 0.74554 0.131507 0.753634\n827 1 0.809459 0.123933 0.805822\n791 1 0.677532 0.995988 0.809531\n795 1 0.811444 0.995163 0.807976\n1812 1 0.500506 0.562299 0.816273\n2036 1 0.502681 0.942607 0.931813\n1598 1 0.934131 0.685822 0.499963\n799 1 0.937066 0.00370873 0.816175\n672 1 0.880908 0.0695675 0.684007\n703 1 0.93225 0.132629 0.685817\n798 1 0.935024 0.068444 0.747799\n800 1 0.872831 0.0588407 0.814172\n829 1 0.880938 0.128897 0.748099\n831 1 0.940024 0.126245 0.818468\n671 1 0.935269 0.00203386 0.674556\n797 1 0.870898 0.00646758 0.747284\n1085 1 0.872692 0.622535 0.997587\n1594 1 0.806226 0.686263 0.499236\n696 1 0.62024 0.191045 0.68784\n723 1 0.551457 0.255148 0.685576\n728 1 0.618919 0.308834 0.680625\n818 1 0.553534 0.189132 0.740041\n824 1 0.625955 0.188677 0.811387\n850 1 0.565336 0.30762 0.745328\n851 1 0.558449 0.255676 0.810665\n853 1 0.621047 0.244914 0.753631\n856 1 0.622245 0.311033 0.811209\n817 1 0.499236 0.126212 0.750469\n700 1 0.750938 0.191715 0.684003\n727 1 0.690994 0.255558 0.68569\n731 1 0.809224 0.251703 0.684835\n732 1 0.75161 0.309162 0.690231\n822 1 0.682221 0.189088 0.739167\n826 1 0.808529 0.190973 0.747885\n828 1 0.74703 0.193526 0.813361\n854 1 0.675714 0.312809 0.751662\n855 1 0.685931 0.251469 0.805933\n857 1 0.750379 0.251501 0.749887\n858 1 0.818343 0.30655 0.747636\n859 1 0.80384 0.25214 0.813585\n860 1 0.745607 0.312886 0.808423\n1121 1 0.999448 0.872719 0.999959\n704 1 0.866684 0.196243 0.68541\n735 1 0.949033 0.24274 0.682474\n736 1 0.879707 0.303799 0.691045\n830 1 0.946094 0.182402 0.745653\n832 1 0.875952 0.183677 0.808483\n861 1 0.879988 0.253557 0.757161\n862 1 0.949165 0.308498 0.742198\n863 1 0.9461 0.242346 0.810534\n864 1 0.880082 0.31876 0.810652\n1586 1 0.564428 0.692864 0.499711\n1813 1 0.623531 0.491548 0.740469\n755 1 0.563366 0.37914 0.685055\n760 1 0.616006 0.438757 0.68162\n882 1 0.563138 0.435566 0.743478\n883 1 0.562365 0.366699 0.800843\n885 1 0.621957 0.377271 0.743707\n888 1 0.6193 0.426629 0.808105\n1811 1 0.568652 0.495229 0.80571\n1086 1 0.937677 0.681608 0.996795\n1815 1 0.68285 0.49526 0.809447\n759 1 0.683331 0.364617 0.68469\n763 1 0.81129 0.379237 0.689014\n764 1 0.753316 0.440682 0.691235\n886 1 0.685375 0.430304 0.746431\n887 1 0.69184 0.370519 0.814792\n889 1 0.760938 0.376456 0.753789\n890 1 0.820829 0.439921 0.75744\n891 1 0.81017 0.377801 0.816205\n892 1 0.738277 0.435793 0.809585\n1817 1 0.752243 0.49522 0.757265\n1695 1 0.939482 0.496639 0.693954\n767 1 0.938618 0.378943 0.696694\n768 1 0.873081 0.441786 0.691003\n893 1 0.882151 0.375648 0.749435\n894 1 0.941793 0.438818 0.747803\n895 1 0.948622 0.374486 0.813194\n896 1 0.880644 0.43344 0.817144\n1821 1 0.878123 0.499601 0.756013\n1683 1 0.563507 0.506105 0.686218\n1688 1 0.630593 0.562616 0.70312\n1715 1 0.564581 0.627326 0.696706\n1810 1 0.560163 0.555459 0.745753\n1816 1 0.619697 0.561999 0.804132\n1843 1 0.55753 0.623958 0.809026\n1845 1 0.630256 0.63035 0.751687\n1691 1 0.809595 0.498152 0.69352\n1819 1 0.807315 0.498697 0.814573\n1687 1 0.690385 0.507818 0.684893\n1692 1 0.758607 0.570545 0.698997\n1719 1 0.694912 0.626776 0.691026\n1723 1 0.817159 0.638734 0.683151\n1814 1 0.694818 0.564877 0.758726\n1818 1 0.821432 0.563036 0.750408\n1820 1 0.758617 0.564612 0.811511\n1847 1 0.691795 0.637172 0.807649\n1849 1 0.75727 0.637065 0.745232\n1851 1 0.819548 0.625851 0.802379\n1593 1 0.75074 0.622688 0.505906\n1823 1 0.942217 0.503052 0.816604\n1696 1 0.87828 0.560902 0.695143\n1727 1 0.938441 0.62456 0.688595\n1822 1 0.941685 0.560066 0.757018\n1824 1 0.885004 0.565025 0.815946\n1853 1 0.878737 0.629258 0.746458\n1855 1 0.940538 0.634618 0.811192\n2038 1 0.691176 0.945431 0.876576\n1720 1 0.630691 0.692365 0.694102\n1747 1 0.559641 0.755724 0.689987\n1752 1 0.627743 0.815384 0.680523\n1842 1 0.572955 0.691167 0.749456\n1848 1 0.634329 0.694291 0.8132\n1874 1 0.565302 0.809723 0.75286\n1875 1 0.565893 0.751222 0.816604\n1877 1 0.632321 0.758056 0.745391\n1880 1 0.63515 0.81552 0.817292\n1724 1 0.756765 0.689891 0.685044\n1751 1 0.694994 0.757292 0.689586\n1755 1 0.811903 0.753489 0.68285\n1756 1 0.761466 0.819847 0.690531\n1846 1 0.696693 0.692217 0.744848\n1850 1 0.819111 0.69091 0.750607\n1852 1 0.761018 0.688455 0.81299\n1878 1 0.692902 0.820008 0.750554\n1879 1 0.701328 0.75188 0.804715\n1881 1 0.759554 0.753203 0.745519\n1882 1 0.813721 0.818022 0.761864\n1883 1 0.813422 0.756026 0.813056\n1884 1 0.750824 0.816752 0.812824\n1700 1 0.995647 0.685185 0.692998\n897 1 0.999772 0.00367949 0.876676\n1601 1 0.994218 0.750757 0.501117\n1728 1 0.875409 0.694653 0.68997\n1759 1 0.939675 0.755036 0.684858\n1760 1 0.87766 0.813067 0.683927\n1854 1 0.942142 0.68791 0.747579\n1856 1 0.882464 0.68677 0.807212\n1885 1 0.877755 0.753936 0.747729\n1886 1 0.940235 0.810966 0.745349\n1887 1 0.936441 0.749593 0.808897\n1888 1 0.881978 0.810117 0.806326\n1109 1 0.631789 0.74921 0.998875\n659 1 0.572501 0.00643844 0.68271\n1779 1 0.559831 0.874671 0.687442\n1784 1 0.620805 0.93576 0.683412\n1906 1 0.555551 0.932522 0.752948\n1907 1 0.558055 0.868745 0.805806\n1909 1 0.622499 0.868106 0.745876\n1912 1 0.620677 0.931861 0.809531\n2042 1 0.812343 0.938745 0.875706\n667 1 0.807994 0.00381242 0.682724\n793 1 0.741849 0.00456894 0.740402\n663 1 0.677919 0.00695365 0.685281\n1783 1 0.688177 0.879891 0.685208\n1787 1 0.823165 0.872634 0.688857\n1788 1 0.758118 0.939405 0.690962\n1910 1 0.674875 0.943914 0.74049\n1911 1 0.689143 0.875026 0.810934\n1913 1 0.753891 0.876911 0.749386\n1914 1 0.814115 0.941384 0.746563\n1915 1 0.816419 0.885454 0.813007\n1916 1 0.74673 0.940738 0.809293\n1809 1 0.501211 0.49279 0.750276\n1791 1 0.939572 0.884687 0.687313\n1792 1 0.878108 0.936049 0.69223\n1917 1 0.882182 0.873694 0.751971\n1918 1 0.938314 0.939776 0.757708\n1919 1 0.938922 0.879155 0.819297\n1920 1 0.878147 0.937284 0.813288\n914 1 0.562167 0.0666896 0.871346\n920 1 0.618664 0.0712888 0.932464\n947 1 0.556803 0.13227 0.942697\n949 1 0.634923 0.121109 0.870595\n917 1 0.622221 0.00321902 0.875685\n1074 1 0.558299 0.682783 0.994567\n923 1 0.809796 0.998425 0.94283\n918 1 0.686934 0.0562751 0.86703\n922 1 0.81121 0.062483 0.871778\n924 1 0.751308 0.0564471 0.9341\n951 1 0.692691 0.122163 0.925005\n953 1 0.746012 0.130592 0.863586\n955 1 0.798235 0.129561 0.933273\n921 1 0.753485 0.000726633 0.868118\n1972 1 0.501286 0.692191 0.934071\n1050 1 0.806828 0.555231 0.998861\n925 1 0.869938 0.998208 0.875897\n2041 1 0.747217 0.881776 0.868877\n948 1 0.502392 0.192614 0.94237\n926 1 0.934732 0.0648407 0.875538\n928 1 0.869324 0.0659902 0.936809\n957 1 0.871025 0.121209 0.878058\n959 1 0.933723 0.122616 0.938679\n628 1 0.509017 0.439107 0.561643\n979 1 0.56123 0.251313 0.938979\n981 1 0.622842 0.250549 0.872165\n978 1 0.56689 0.312202 0.867782\n984 1 0.628514 0.308819 0.939704\n946 1 0.564994 0.186041 0.87223\n952 1 0.621444 0.182822 0.934388\n1078 1 0.688193 0.681105 0.997502\n1654 1 0.691169 0.937052 0.500238\n2035 1 0.575172 0.880727 0.93494\n2040 1 0.63309 0.937262 0.93426\n987 1 0.814627 0.242988 0.921983\n956 1 0.743316 0.186909 0.934964\n988 1 0.744956 0.308652 0.934837\n986 1 0.804282 0.316837 0.870865\n982 1 0.68733 0.313032 0.879455\n954 1 0.814051 0.181396 0.865797\n983 1 0.676002 0.242639 0.932333\n985 1 0.742583 0.251022 0.871815\n950 1 0.675331 0.191743 0.871937\n1636 1 0.996561 0.934446 0.556533\n1988 1 0.996107 0.805665 0.929493\n57 1 0.748366 0.128384 0.997663\n990 1 0.942123 0.315923 0.871958\n960 1 0.875751 0.185787 0.931563\n989 1 0.871802 0.256161 0.859737\n991 1 0.935184 0.249045 0.94182\n992 1 0.880054 0.308983 0.931404\n958 1 0.937032 0.195815 0.880616\n2044 1 0.745582 0.935829 0.941205\n1149 1 0.876052 0.888386 0.999134\n1939 1 0.560784 0.493763 0.937832\n1013 1 0.623847 0.375099 0.870339\n1011 1 0.561581 0.373328 0.934128\n1016 1 0.618556 0.43451 0.935036\n1010 1 0.554433 0.436447 0.876406\n1941 1 0.616089 0.490469 0.873301\n2034 1 0.570852 0.93938 0.881022\n1082 1 0.814822 0.691645 0.995144\n689 1 0.501355 0.134355 0.620294\n1761 1 0.995815 0.872918 0.620911\n1565 1 0.875679 0.502914 0.503074\n1019 1 0.817533 0.38121 0.933781\n1017 1 0.751589 0.383177 0.877411\n1015 1 0.696334 0.379604 0.936116\n1018 1 0.813752 0.441531 0.874011\n1014 1 0.681391 0.431371 0.873453\n1020 1 0.752575 0.436779 0.939749\n1943 1 0.683762 0.494448 0.930308\n125 1 0.877707 0.375843 0.99872\n1024 1 0.875355 0.438425 0.940037\n1023 1 0.945297 0.370534 0.935226\n1022 1 0.935465 0.438566 0.876608\n1021 1 0.874953 0.375541 0.873364\n1949 1 0.87371 0.504554 0.870922\n2043 1 0.808663 0.882729 0.932679\n2039 1 0.679766 0.878115 0.947569\n2033 1 0.506457 0.874273 0.876402\n1938 1 0.572084 0.569949 0.868683\n1944 1 0.624154 0.562179 0.938274\n1971 1 0.56257 0.623883 0.93143\n1973 1 0.629614 0.631015 0.87609\n2014 1 0.935837 0.816978 0.870826\n1114 1 0.809649 0.817959 0.997741\n90 1 0.817236 0.312171 1.00048\n1947 1 0.813976 0.495971 0.944112\n1945 1 0.750642 0.502348 0.874378\n1942 1 0.679359 0.553231 0.870623\n1946 1 0.816763 0.563298 0.872754\n1977 1 0.756285 0.634297 0.874115\n1979 1 0.810241 0.62178 0.939942\n1948 1 0.739719 0.566405 0.931254\n1975 1 0.682498 0.628592 0.935274\n1982 1 0.929933 0.688096 0.880593\n1951 1 0.938604 0.501517 0.937251\n1950 1 0.942223 0.563311 0.870735\n1981 1 0.878263 0.624553 0.879847\n1983 1 0.941066 0.624223 0.936922\n1952 1 0.875369 0.56067 0.931353\n2013 1 0.874431 0.751431 0.869894\n919 1 0.681996 0.00416497 0.942885\n2037 1 0.636966 0.882277 0.872345\n2008 1 0.629785 0.814399 0.943419\n2003 1 0.570059 0.754984 0.944915\n1976 1 0.622549 0.685609 0.937418\n1970 1 0.569113 0.686048 0.870432\n2002 1 0.577231 0.82242 0.871059\n2005 1 0.630106 0.758136 0.873701\n2016 1 0.877541 0.812205 0.939379\n1665 1 0.997696 0.489668 0.620596\n1889 1 0.997608 0.873329 0.763131\n1980 1 0.752177 0.683021 0.937607\n2010 1 0.812168 0.824387 0.869362\n2009 1 0.746434 0.752801 0.87945\n1974 1 0.689272 0.694959 0.876956\n2006 1 0.691746 0.823416 0.88027\n1978 1 0.812494 0.69588 0.877063\n2011 1 0.81283 0.762778 0.929035\n2012 1 0.75145 0.821691 0.93504\n2007 1 0.690822 0.756628 0.939885\n915 1 0.559275 0.0145139 0.937295\n2001 1 0.503529 0.748724 0.869792\n634 1 0.813436 0.441314 0.503532\n58 1 0.803604 0.185957 0.998859\n2015 1 0.932364 0.751396 0.937982\n1984 1 0.875103 0.692821 0.934914\n1969 1 0.503095 0.633475 0.878087\n93 1 0.870028 0.250818 0.998487\n644 1 0.989593 0.0679489 0.689742\n1681 1 0.498189 0.500717 0.624701\n1046 1 0.681651 0.557939 0.997549\n1729 1 0.995612 0.754336 0.620498\n1113 1 0.750883 0.748866 0.993734\n1732 1 0.999364 0.814426 0.683077\n1081 1 0.744571 0.616237 0.995682\n932 1 0.99714 0.186906 0.932287\n657 1 0.509888 0.997074 0.631166\n785 1 0.500594 0.0223017 0.752366\n660 1 0.502346 0.0771608 0.683696\n769 1 0.997971 0.00187752 0.753016\n54 1 0.678292 0.178654 0.995518\n1873 1 0.505505 0.754171 0.743856\n1956 1 0.996502 0.689345 0.940894\n1105 1 0.499344 0.753744 0.997299\n22 1 0.686978 0.0739522 0.995187\n1620 1 0.505144 0.819035 0.553823\n1045 1 0.627784 0.49134 0.993472\n1748 1 0.501876 0.813128 0.685371\n126 1 0.937662 0.444283 0.99765\n1604 1 0.99702 0.816837 0.560594\n673 1 0.999641 0.122918 0.623432\n1697 1 0.996739 0.631902 0.623836\n1556 1 0.504309 0.561917 0.565136\n1668 1 0.996923 0.5616 0.695217\n1077 1 0.623689 0.62425 0.995406\n637 1 0.877381 0.370271 0.50822\n1053 1 0.875761 0.511196 0.995611\n117 1 0.622404 0.372799 0.999102\n85 1 0.619939 0.245189 0.997428\n1054 1 0.933612 0.571044 0.996108\n86 1 0.689086 0.320973 0.995047\n1146 1 0.804815 0.933162 0.994001\n537 1 0.75554 0.00886688 0.501094\n1150 1 0.941777 0.941938 0.99672\n1118 1 0.93893 0.81264 0.999883\n1042 1 0.568849 0.550491 0.997138\n1049 1 0.740298 0.502532 0.999055\n541 1 0.877194 0.99981 0.507761\n1138 1 0.566688 0.943352 0.990632\n605 1 0.88383 0.248854 0.501792\n1089 1 0.996475 0.754257 0.994671\n633 1 0.762716 0.378367 0.502073\n565 1 0.625778 0.127064 0.504723\n606 1 0.942516 0.309551 0.502453\n33 1 0.991285 0.136045 0.996338\n538 1 0.81848 0.0606303 0.506133\n1554 1 0.562857 0.563002 0.501278\n602 1 0.816524 0.312591 0.506647\n"
] | true |
99,663 |
de4dc590ee8c369fc1e5ab3b573e5a6641831996
|
N, C = map(int, input().split())
channels = [[] for _ in range(C)]
maxT = 0
for _ in range(N) :
s, t, c = map(int, input().split())
maxT = max(maxT, t)
channels[c - 1].append((s, t))
maxT = maxT * 2 + 10
tvSum = [0 for _ in range(maxT)]
for tv in channels :
imos = [0 for _ in range(maxT)]
for (s, t) in tv :
imos[s * 2 - 1] += 1
imos[t * 2] -= 1
for i in range(1, maxT) :
imos[i] += imos[i - 1]
for i in range(maxT) :
if imos[i] > 0 :
tvSum[i] += 1
print(max(tvSum))
|
[
"N, C = map(int, input().split())\n\nchannels = [[] for _ in range(C)]\nmaxT = 0\n\nfor _ in range(N) :\n s, t, c = map(int, input().split())\n maxT = max(maxT, t)\n channels[c - 1].append((s, t))\n\nmaxT = maxT * 2 + 10\ntvSum = [0 for _ in range(maxT)]\n\nfor tv in channels :\n imos = [0 for _ in range(maxT)]\n for (s, t) in tv :\n imos[s * 2 - 1] += 1\n imos[t * 2] -= 1\n for i in range(1, maxT) :\n imos[i] += imos[i - 1]\n for i in range(maxT) :\n if imos[i] > 0 :\n tvSum[i] += 1\n\nprint(max(tvSum))\n",
"N, C = map(int, input().split())\nchannels = [[] for _ in range(C)]\nmaxT = 0\nfor _ in range(N):\n s, t, c = map(int, input().split())\n maxT = max(maxT, t)\n channels[c - 1].append((s, t))\nmaxT = maxT * 2 + 10\ntvSum = [(0) for _ in range(maxT)]\nfor tv in channels:\n imos = [(0) for _ in range(maxT)]\n for s, t in tv:\n imos[s * 2 - 1] += 1\n imos[t * 2] -= 1\n for i in range(1, maxT):\n imos[i] += imos[i - 1]\n for i in range(maxT):\n if imos[i] > 0:\n tvSum[i] += 1\nprint(max(tvSum))\n",
"<assignment token>\nfor _ in range(N):\n s, t, c = map(int, input().split())\n maxT = max(maxT, t)\n channels[c - 1].append((s, t))\n<assignment token>\nfor tv in channels:\n imos = [(0) for _ in range(maxT)]\n for s, t in tv:\n imos[s * 2 - 1] += 1\n imos[t * 2] -= 1\n for i in range(1, maxT):\n imos[i] += imos[i - 1]\n for i in range(maxT):\n if imos[i] > 0:\n tvSum[i] += 1\nprint(max(tvSum))\n",
"<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,664 |
443df7d6d233a110631fa651a6a651fedd40ae75
|
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Movie)
admin.site.register(Theatre)
admin.site.register(Customer)
admin.site.register(Booking)
admin.site.register(Review)
admin.site.register(Category)
|
[
"from django.contrib import admin\nfrom .models import *\n\n# Register your models here.\nadmin.site.register(Movie)\nadmin.site.register(Theatre)\nadmin.site.register(Customer)\nadmin.site.register(Booking)\nadmin.site.register(Review)\nadmin.site.register(Category)\n\n",
"from django.contrib import admin\nfrom .models import *\nadmin.site.register(Movie)\nadmin.site.register(Theatre)\nadmin.site.register(Customer)\nadmin.site.register(Booking)\nadmin.site.register(Review)\nadmin.site.register(Category)\n",
"<import token>\nadmin.site.register(Movie)\nadmin.site.register(Theatre)\nadmin.site.register(Customer)\nadmin.site.register(Booking)\nadmin.site.register(Review)\nadmin.site.register(Category)\n",
"<import token>\n<code token>\n"
] | false |
99,665 |
035b43208676f3c41a7099bd80b46d86d4fd42c5
|
from django.urls import path
from . import views
urlpatterns = [
path('detail/<int:uid>',views.user_detail),
path('update/<int:uid>',views.user_update),
]
|
[
"from django.urls import path\nfrom . import views\nurlpatterns = [\n path('detail/<int:uid>',views.user_detail),\n path('update/<int:uid>',views.user_update),\n]",
"from django.urls import path\nfrom . import views\nurlpatterns = [path('detail/<int:uid>', views.user_detail), path(\n 'update/<int:uid>', views.user_update)]\n",
"<import token>\nurlpatterns = [path('detail/<int:uid>', views.user_detail), path(\n 'update/<int:uid>', views.user_update)]\n",
"<import token>\n<assignment token>\n"
] | false |
99,666 |
90f212b1e10069fa0bdcea267898d0640e617b8d
|
from flask import Flask, jsonify, render_template
import jsonrpclib, os, platform, ctypes
from Maraschino import app
from settings import *
from maraschino.noneditable import *
from maraschino.tools import *
from maraschino.models import Disk
@app.route('/xhr/diskspace')
@requires_auth
def xhr_diskspace():
disks = []
disks_db = Disk.query.order_by(Disk.position)
if disks_db.count() > 0:
for disk_db in disks_db:
disk = disk_usage(disk_db.path)
disk['path'] = disk_db.path
disk['id'] = disk_db.id
disks.append(disk)
return render_template('diskspace.html',
disks = disks,
)
@app.route('/xhr/add_disk_dialog')
@requires_auth
def add_disk_dialog():
return add_edit_disk_dialog()
@app.route('/xhr/edit_disk_dialog/<disk_id>')
@requires_auth
def edit_disk_dialog(disk_id):
return add_edit_disk_dialog(disk_id)
def add_edit_disk_dialog(disk_id=None):
disk = None
if disk_id:
try:
disk = Disk.query.filter(Disk.id == disk_id).first()
except:
pass
return render_template('add_edit_disk_dialog.html',
disk = disk,
)
@app.route('/xhr/add_edit_disk', methods=['POST'])
@requires_auth
def add_edit_disk():
path = request.form['path']
position = request.form['position']
if path == '':
return jsonify({ 'status': 'error' })
if position == '':
position = None
if 'disk_id' in request.form:
disk = Disk.query.filter(Disk.id == request.form['disk_id']).first()
disk.path = path
disk.position = position
else:
disk = Disk(
path,
position,
)
try:
disk_usage(disk.path)
db_session.add(disk)
db_session.commit()
except:
return jsonify({ 'status': 'error' })
return xhr_diskspace()
@app.route('/xhr/delete_disk/<disk_id>', methods=['POST'])
@requires_auth
def delete_disk(disk_id):
try:
disk = Disk.query.filter(Disk.id == disk_id).first()
db_session.delete(disk)
db_session.commit()
except:
return jsonify({ 'status': 'error' })
return xhr_diskspace()
def disk_usage(path):
if platform.system() == 'Windows':
freeuser = ctypes.c_int64()
total = ctypes.c_int64()
free = ctypes.c_int64()
ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(path), ctypes.byref(freeuser), ctypes.byref(total), ctypes.byref(free))
used = (total.value - free.value) / (1024*1024*1024)
total = total.value / (1024*1024*1024)
free = free.value / (1024*1024*1024)
return {
'total': "%.2f" % total,
'used': "%.2f" % used,
'free': "%.2f" % free,
'percentage_used': int((float(used)/float(total))*100),
}
else:
st = os.statvfs(path)
free = float(st.f_bavail * st.f_frsize) / 1073741824
total = float(st.f_blocks * st.f_frsize) / 1073741824
used = float((st.f_blocks - st.f_bfree) * st.f_frsize) / 1073741824
return {
'total': "%.2f" % total,
'used': "%.2f" % used,
'free': "%.2f" % free,
'percentage_used': int(used/total * 100),
}
|
[
"from flask import Flask, jsonify, render_template\nimport jsonrpclib, os, platform, ctypes\n\nfrom Maraschino import app\nfrom settings import *\nfrom maraschino.noneditable import *\nfrom maraschino.tools import *\n\nfrom maraschino.models import Disk\n\[email protected]('/xhr/diskspace')\n@requires_auth\ndef xhr_diskspace():\n disks = []\n disks_db = Disk.query.order_by(Disk.position)\n\n if disks_db.count() > 0:\n for disk_db in disks_db:\n disk = disk_usage(disk_db.path)\n disk['path'] = disk_db.path\n disk['id'] = disk_db.id\n disks.append(disk)\n\n return render_template('diskspace.html',\n disks = disks,\n )\n\[email protected]('/xhr/add_disk_dialog')\n@requires_auth\ndef add_disk_dialog():\n return add_edit_disk_dialog()\n\[email protected]('/xhr/edit_disk_dialog/<disk_id>')\n@requires_auth\ndef edit_disk_dialog(disk_id):\n return add_edit_disk_dialog(disk_id)\n\ndef add_edit_disk_dialog(disk_id=None):\n disk = None\n\n if disk_id:\n try:\n disk = Disk.query.filter(Disk.id == disk_id).first()\n\n except:\n pass\n\n return render_template('add_edit_disk_dialog.html',\n disk = disk,\n )\n\[email protected]('/xhr/add_edit_disk', methods=['POST'])\n@requires_auth\ndef add_edit_disk():\n path = request.form['path']\n position = request.form['position']\n\n if path == '':\n return jsonify({ 'status': 'error' })\n\n if position == '':\n position = None\n\n if 'disk_id' in request.form:\n disk = Disk.query.filter(Disk.id == request.form['disk_id']).first()\n disk.path = path\n disk.position = position\n\n else:\n disk = Disk(\n path,\n position,\n )\n\n try:\n disk_usage(disk.path)\n db_session.add(disk)\n db_session.commit()\n\n except:\n return jsonify({ 'status': 'error' })\n\n return xhr_diskspace()\n\[email protected]('/xhr/delete_disk/<disk_id>', methods=['POST'])\n@requires_auth\ndef delete_disk(disk_id):\n try:\n disk = Disk.query.filter(Disk.id == disk_id).first()\n db_session.delete(disk)\n db_session.commit()\n\n except:\n return jsonify({ 'status': 'error' })\n\n return xhr_diskspace()\n\ndef disk_usage(path):\n if platform.system() == 'Windows':\n freeuser = ctypes.c_int64()\n total = ctypes.c_int64()\n free = ctypes.c_int64()\n ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(path), ctypes.byref(freeuser), ctypes.byref(total), ctypes.byref(free))\n used = (total.value - free.value) / (1024*1024*1024)\n total = total.value / (1024*1024*1024)\n free = free.value / (1024*1024*1024)\n\n return {\n 'total': \"%.2f\" % total,\n 'used': \"%.2f\" % used,\n 'free': \"%.2f\" % free,\n 'percentage_used': int((float(used)/float(total))*100),\n }\n\n else:\n st = os.statvfs(path)\n\n free = float(st.f_bavail * st.f_frsize) / 1073741824\n total = float(st.f_blocks * st.f_frsize) / 1073741824\n used = float((st.f_blocks - st.f_bfree) * st.f_frsize) / 1073741824\n\n return {\n 'total': \"%.2f\" % total,\n 'used': \"%.2f\" % used,\n 'free': \"%.2f\" % free,\n 'percentage_used': int(used/total * 100),\n }\n",
"from flask import Flask, jsonify, render_template\nimport jsonrpclib, os, platform, ctypes\nfrom Maraschino import app\nfrom settings import *\nfrom maraschino.noneditable import *\nfrom maraschino.tools import *\nfrom maraschino.models import Disk\n\n\[email protected]('/xhr/diskspace')\n@requires_auth\ndef xhr_diskspace():\n disks = []\n disks_db = Disk.query.order_by(Disk.position)\n if disks_db.count() > 0:\n for disk_db in disks_db:\n disk = disk_usage(disk_db.path)\n disk['path'] = disk_db.path\n disk['id'] = disk_db.id\n disks.append(disk)\n return render_template('diskspace.html', disks=disks)\n\n\[email protected]('/xhr/add_disk_dialog')\n@requires_auth\ndef add_disk_dialog():\n return add_edit_disk_dialog()\n\n\[email protected]('/xhr/edit_disk_dialog/<disk_id>')\n@requires_auth\ndef edit_disk_dialog(disk_id):\n return add_edit_disk_dialog(disk_id)\n\n\ndef add_edit_disk_dialog(disk_id=None):\n disk = None\n if disk_id:\n try:\n disk = Disk.query.filter(Disk.id == disk_id).first()\n except:\n pass\n return render_template('add_edit_disk_dialog.html', disk=disk)\n\n\[email protected]('/xhr/add_edit_disk', methods=['POST'])\n@requires_auth\ndef add_edit_disk():\n path = request.form['path']\n position = request.form['position']\n if path == '':\n return jsonify({'status': 'error'})\n if position == '':\n position = None\n if 'disk_id' in request.form:\n disk = Disk.query.filter(Disk.id == request.form['disk_id']).first()\n disk.path = path\n disk.position = position\n else:\n disk = Disk(path, position)\n try:\n disk_usage(disk.path)\n db_session.add(disk)\n db_session.commit()\n except:\n return jsonify({'status': 'error'})\n return xhr_diskspace()\n\n\[email protected]('/xhr/delete_disk/<disk_id>', methods=['POST'])\n@requires_auth\ndef delete_disk(disk_id):\n try:\n disk = Disk.query.filter(Disk.id == disk_id).first()\n db_session.delete(disk)\n db_session.commit()\n except:\n return jsonify({'status': 'error'})\n return xhr_diskspace()\n\n\ndef disk_usage(path):\n if platform.system() == 'Windows':\n freeuser = ctypes.c_int64()\n total = ctypes.c_int64()\n free = ctypes.c_int64()\n ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(path),\n ctypes.byref(freeuser), ctypes.byref(total), ctypes.byref(free))\n used = (total.value - free.value) / (1024 * 1024 * 1024)\n total = total.value / (1024 * 1024 * 1024)\n free = free.value / (1024 * 1024 * 1024)\n return {'total': '%.2f' % total, 'used': '%.2f' % used, 'free': \n '%.2f' % free, 'percentage_used': int(float(used) / float(total\n ) * 100)}\n else:\n st = os.statvfs(path)\n free = float(st.f_bavail * st.f_frsize) / 1073741824\n total = float(st.f_blocks * st.f_frsize) / 1073741824\n used = float((st.f_blocks - st.f_bfree) * st.f_frsize) / 1073741824\n return {'total': '%.2f' % total, 'used': '%.2f' % used, 'free': \n '%.2f' % free, 'percentage_used': int(used / total * 100)}\n",
"<import token>\n\n\[email protected]('/xhr/diskspace')\n@requires_auth\ndef xhr_diskspace():\n disks = []\n disks_db = Disk.query.order_by(Disk.position)\n if disks_db.count() > 0:\n for disk_db in disks_db:\n disk = disk_usage(disk_db.path)\n disk['path'] = disk_db.path\n disk['id'] = disk_db.id\n disks.append(disk)\n return render_template('diskspace.html', disks=disks)\n\n\[email protected]('/xhr/add_disk_dialog')\n@requires_auth\ndef add_disk_dialog():\n return add_edit_disk_dialog()\n\n\[email protected]('/xhr/edit_disk_dialog/<disk_id>')\n@requires_auth\ndef edit_disk_dialog(disk_id):\n return add_edit_disk_dialog(disk_id)\n\n\ndef add_edit_disk_dialog(disk_id=None):\n disk = None\n if disk_id:\n try:\n disk = Disk.query.filter(Disk.id == disk_id).first()\n except:\n pass\n return render_template('add_edit_disk_dialog.html', disk=disk)\n\n\[email protected]('/xhr/add_edit_disk', methods=['POST'])\n@requires_auth\ndef add_edit_disk():\n path = request.form['path']\n position = request.form['position']\n if path == '':\n return jsonify({'status': 'error'})\n if position == '':\n position = None\n if 'disk_id' in request.form:\n disk = Disk.query.filter(Disk.id == request.form['disk_id']).first()\n disk.path = path\n disk.position = position\n else:\n disk = Disk(path, position)\n try:\n disk_usage(disk.path)\n db_session.add(disk)\n db_session.commit()\n except:\n return jsonify({'status': 'error'})\n return xhr_diskspace()\n\n\[email protected]('/xhr/delete_disk/<disk_id>', methods=['POST'])\n@requires_auth\ndef delete_disk(disk_id):\n try:\n disk = Disk.query.filter(Disk.id == disk_id).first()\n db_session.delete(disk)\n db_session.commit()\n except:\n return jsonify({'status': 'error'})\n return xhr_diskspace()\n\n\ndef disk_usage(path):\n if platform.system() == 'Windows':\n freeuser = ctypes.c_int64()\n total = ctypes.c_int64()\n free = ctypes.c_int64()\n ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(path),\n ctypes.byref(freeuser), ctypes.byref(total), ctypes.byref(free))\n used = (total.value - free.value) / (1024 * 1024 * 1024)\n total = total.value / (1024 * 1024 * 1024)\n free = free.value / (1024 * 1024 * 1024)\n return {'total': '%.2f' % total, 'used': '%.2f' % used, 'free': \n '%.2f' % free, 'percentage_used': int(float(used) / float(total\n ) * 100)}\n else:\n st = os.statvfs(path)\n free = float(st.f_bavail * st.f_frsize) / 1073741824\n total = float(st.f_blocks * st.f_frsize) / 1073741824\n used = float((st.f_blocks - st.f_bfree) * st.f_frsize) / 1073741824\n return {'total': '%.2f' % total, 'used': '%.2f' % used, 'free': \n '%.2f' % free, 'percentage_used': int(used / total * 100)}\n",
"<import token>\n\n\[email protected]('/xhr/diskspace')\n@requires_auth\ndef xhr_diskspace():\n disks = []\n disks_db = Disk.query.order_by(Disk.position)\n if disks_db.count() > 0:\n for disk_db in disks_db:\n disk = disk_usage(disk_db.path)\n disk['path'] = disk_db.path\n disk['id'] = disk_db.id\n disks.append(disk)\n return render_template('diskspace.html', disks=disks)\n\n\[email protected]('/xhr/add_disk_dialog')\n@requires_auth\ndef add_disk_dialog():\n return add_edit_disk_dialog()\n\n\[email protected]('/xhr/edit_disk_dialog/<disk_id>')\n@requires_auth\ndef edit_disk_dialog(disk_id):\n return add_edit_disk_dialog(disk_id)\n\n\ndef add_edit_disk_dialog(disk_id=None):\n disk = None\n if disk_id:\n try:\n disk = Disk.query.filter(Disk.id == disk_id).first()\n except:\n pass\n return render_template('add_edit_disk_dialog.html', disk=disk)\n\n\[email protected]('/xhr/add_edit_disk', methods=['POST'])\n@requires_auth\ndef add_edit_disk():\n path = request.form['path']\n position = request.form['position']\n if path == '':\n return jsonify({'status': 'error'})\n if position == '':\n position = None\n if 'disk_id' in request.form:\n disk = Disk.query.filter(Disk.id == request.form['disk_id']).first()\n disk.path = path\n disk.position = position\n else:\n disk = Disk(path, position)\n try:\n disk_usage(disk.path)\n db_session.add(disk)\n db_session.commit()\n except:\n return jsonify({'status': 'error'})\n return xhr_diskspace()\n\n\[email protected]('/xhr/delete_disk/<disk_id>', methods=['POST'])\n@requires_auth\ndef delete_disk(disk_id):\n try:\n disk = Disk.query.filter(Disk.id == disk_id).first()\n db_session.delete(disk)\n db_session.commit()\n except:\n return jsonify({'status': 'error'})\n return xhr_diskspace()\n\n\n<function token>\n",
"<import token>\n\n\[email protected]('/xhr/diskspace')\n@requires_auth\ndef xhr_diskspace():\n disks = []\n disks_db = Disk.query.order_by(Disk.position)\n if disks_db.count() > 0:\n for disk_db in disks_db:\n disk = disk_usage(disk_db.path)\n disk['path'] = disk_db.path\n disk['id'] = disk_db.id\n disks.append(disk)\n return render_template('diskspace.html', disks=disks)\n\n\n<function token>\n\n\[email protected]('/xhr/edit_disk_dialog/<disk_id>')\n@requires_auth\ndef edit_disk_dialog(disk_id):\n return add_edit_disk_dialog(disk_id)\n\n\ndef add_edit_disk_dialog(disk_id=None):\n disk = None\n if disk_id:\n try:\n disk = Disk.query.filter(Disk.id == disk_id).first()\n except:\n pass\n return render_template('add_edit_disk_dialog.html', disk=disk)\n\n\[email protected]('/xhr/add_edit_disk', methods=['POST'])\n@requires_auth\ndef add_edit_disk():\n path = request.form['path']\n position = request.form['position']\n if path == '':\n return jsonify({'status': 'error'})\n if position == '':\n position = None\n if 'disk_id' in request.form:\n disk = Disk.query.filter(Disk.id == request.form['disk_id']).first()\n disk.path = path\n disk.position = position\n else:\n disk = Disk(path, position)\n try:\n disk_usage(disk.path)\n db_session.add(disk)\n db_session.commit()\n except:\n return jsonify({'status': 'error'})\n return xhr_diskspace()\n\n\[email protected]('/xhr/delete_disk/<disk_id>', methods=['POST'])\n@requires_auth\ndef delete_disk(disk_id):\n try:\n disk = Disk.query.filter(Disk.id == disk_id).first()\n db_session.delete(disk)\n db_session.commit()\n except:\n return jsonify({'status': 'error'})\n return xhr_diskspace()\n\n\n<function token>\n",
"<import token>\n\n\[email protected]('/xhr/diskspace')\n@requires_auth\ndef xhr_diskspace():\n disks = []\n disks_db = Disk.query.order_by(Disk.position)\n if disks_db.count() > 0:\n for disk_db in disks_db:\n disk = disk_usage(disk_db.path)\n disk['path'] = disk_db.path\n disk['id'] = disk_db.id\n disks.append(disk)\n return render_template('diskspace.html', disks=disks)\n\n\n<function token>\n<function token>\n\n\ndef add_edit_disk_dialog(disk_id=None):\n disk = None\n if disk_id:\n try:\n disk = Disk.query.filter(Disk.id == disk_id).first()\n except:\n pass\n return render_template('add_edit_disk_dialog.html', disk=disk)\n\n\[email protected]('/xhr/add_edit_disk', methods=['POST'])\n@requires_auth\ndef add_edit_disk():\n path = request.form['path']\n position = request.form['position']\n if path == '':\n return jsonify({'status': 'error'})\n if position == '':\n position = None\n if 'disk_id' in request.form:\n disk = Disk.query.filter(Disk.id == request.form['disk_id']).first()\n disk.path = path\n disk.position = position\n else:\n disk = Disk(path, position)\n try:\n disk_usage(disk.path)\n db_session.add(disk)\n db_session.commit()\n except:\n return jsonify({'status': 'error'})\n return xhr_diskspace()\n\n\[email protected]('/xhr/delete_disk/<disk_id>', methods=['POST'])\n@requires_auth\ndef delete_disk(disk_id):\n try:\n disk = Disk.query.filter(Disk.id == disk_id).first()\n db_session.delete(disk)\n db_session.commit()\n except:\n return jsonify({'status': 'error'})\n return xhr_diskspace()\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef add_edit_disk_dialog(disk_id=None):\n disk = None\n if disk_id:\n try:\n disk = Disk.query.filter(Disk.id == disk_id).first()\n except:\n pass\n return render_template('add_edit_disk_dialog.html', disk=disk)\n\n\[email protected]('/xhr/add_edit_disk', methods=['POST'])\n@requires_auth\ndef add_edit_disk():\n path = request.form['path']\n position = request.form['position']\n if path == '':\n return jsonify({'status': 'error'})\n if position == '':\n position = None\n if 'disk_id' in request.form:\n disk = Disk.query.filter(Disk.id == request.form['disk_id']).first()\n disk.path = path\n disk.position = position\n else:\n disk = Disk(path, position)\n try:\n disk_usage(disk.path)\n db_session.add(disk)\n db_session.commit()\n except:\n return jsonify({'status': 'error'})\n return xhr_diskspace()\n\n\[email protected]('/xhr/delete_disk/<disk_id>', methods=['POST'])\n@requires_auth\ndef delete_disk(disk_id):\n try:\n disk = Disk.query.filter(Disk.id == disk_id).first()\n db_session.delete(disk)\n db_session.commit()\n except:\n return jsonify({'status': 'error'})\n return xhr_diskspace()\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\[email protected]('/xhr/add_edit_disk', methods=['POST'])\n@requires_auth\ndef add_edit_disk():\n path = request.form['path']\n position = request.form['position']\n if path == '':\n return jsonify({'status': 'error'})\n if position == '':\n position = None\n if 'disk_id' in request.form:\n disk = Disk.query.filter(Disk.id == request.form['disk_id']).first()\n disk.path = path\n disk.position = position\n else:\n disk = Disk(path, position)\n try:\n disk_usage(disk.path)\n db_session.add(disk)\n db_session.commit()\n except:\n return jsonify({'status': 'error'})\n return xhr_diskspace()\n\n\[email protected]('/xhr/delete_disk/<disk_id>', methods=['POST'])\n@requires_auth\ndef delete_disk(disk_id):\n try:\n disk = Disk.query.filter(Disk.id == disk_id).first()\n db_session.delete(disk)\n db_session.commit()\n except:\n return jsonify({'status': 'error'})\n return xhr_diskspace()\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\[email protected]('/xhr/add_edit_disk', methods=['POST'])\n@requires_auth\ndef add_edit_disk():\n path = request.form['path']\n position = request.form['position']\n if path == '':\n return jsonify({'status': 'error'})\n if position == '':\n position = None\n if 'disk_id' in request.form:\n disk = Disk.query.filter(Disk.id == request.form['disk_id']).first()\n disk.path = path\n disk.position = position\n else:\n disk = Disk(path, position)\n try:\n disk_usage(disk.path)\n db_session.add(disk)\n db_session.commit()\n except:\n return jsonify({'status': 'error'})\n return xhr_diskspace()\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,667 |
694e1475bfa8ed7e5c4eb4e97e51971e5424ed7b
|
#!/usr/bin/env python3
"""
Usage: setup-cluster-images image-archive [num_nodes [targetdir]]
image-archive - zip file as downloaded from raspberry-pi.org
num_nodes - number of nodes in the cluster [4]
node_prefix - prefix for the cluster nodes [gg]
targetdir - destination directory [current directory]
"""
import sys
from _sha256 import sha256
from contextlib import contextmanager
from logging import info, basicConfig, debug, DEBUG
from os import chdir, getcwd, makedirs, mkdir, geteuid, chmod, chown, stat, unlink, \
listdir, rename
from shutil import rmtree, copy2
from subprocess import check_output, check_call
from tempfile import mkdtemp
from urllib import request
from zipfile import ZipFile
from os.path import join, abspath, isdir, dirname, isfile
# Number of raspberries in the cluster
BASE_IP = '192.168.8.2'
NODE_COUNT = 4
# prefix for the node names.
# nodes will be named <prefix>-master, <prefix>-node1, <prefix>-node2, ...
NODE_PREFIX = 'gg'
USR_LOCAL_BIN=join('usr', 'local', 'bin')
SETUP_NODE_SH=join(USR_LOCAL_BIN, 'setup_node.sh')
CFSSL_PROGS_SHA256 = """
0725a1cca3857392158807b543b75dc6388e2102e8a189792c4da7ac19f750b5 cfssl-bundle
48685e849565cd7d27ac2daf68faa835a5151fd3feac87c6715bcb92d58dc280 cfssl-certinfo
4106c11c61aa9e98b1967adab6db711d2b50a0f02f844329e9ad44f199bdf135 cfssl-newkey
71e41ef447f49ad236d75ec42152625c6fcf6c37122784740bd19b0a7c399560 cfssl-scan
11c708acaf48a69abf6f896f5c6158f7547a3c1bf44e14ca3b3ab440c1f808f1 cfssl
e138102329d96f5a67aa168034a256a8376febf4ecde7b8e837c3f2e08b1cd19 cfssljson
dac738390bc346b94c497b62a82f75cb05f0dafd5dad7d9dd63dedb9bc31092a mkbundle
d53bbc0d2ac2d57c089d4f730d9e7b2d365701adc1bb417153a5f70a16bd10d6 multirootca
"""
# Shell script to setup the necessary software for kubernetes
# FIXME - howto add a static IP
# TODO - add static certificates
# TODO - add kubeadm call for master
PKG_SETUP = """\
#!/bin/sh
setup_params="$1"
setup_machine_id() {
sudo rm -f /etc/machine-id /var/lib/dbus/machine-id
sudo dbus-uuidgen --ensure=/etc/machine-id
}
setup_static_ip() {
}
set -e
nodename=`awk -F= '/^nodename=/ { print $2 }' "$setup_params"`
ipaddress=`awk -F= '/^ip=/ { print $2 }' "$setup_params"`
sudo hostname "$nodename"
setup_static_ip "$ipaddress"
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update -y
sudo apt-get install -y policykit-1 docker-ce
setup_machine_id
sudo dphys-swapfile swapoff
sudo dphys-swapfile uninstall
sudo update-rc.d dphys-swapfile remove
echo "Getting kubernetes packages"
sudo apt-get install -y kubelet kubeadm kubectl kubernetes-cni
sudo /usr/bin/raspi-config --expand-rootfs
"""
SETUP_SCRIPT = """
if [[ -e /boot/setup.txt ]] ; then
tmp=`mktemp`
mv /boot/setup.txt "$tmp"
sh -x "/%s" "$tmp" >/boot/setup.log 2>&1
rm -f "$tmp"
fi
""" % SETUP_NODE_SH
def absjoin(*params):
return abspath(join(*params))
# FIXME - add comments to the methods
class ClusterSetup:
def __call__(self, archive, node_names, targetdir, ipbase):
targetinfo = stat(targetdir)
with self._mktemp():
info('Download cfssl')
cfssldir = abspath('cfssl')
self._download_cfssl(cfssldir)
ipaddress = ipbase
for name in node_names:
node_image = absjoin(targetdir, '%s.img' % name)
info('prepare image for node %s in %s' % (name, node_image))
info('Unpacking archive %s' % archive)
self._unzip(archive, node_image)
try:
self._prepare_node_image(node_image, name, node_names[0], ipaddress, cfssldir)
except Exception as e:
unlink(node_image)
raise
chown(node_image, targetinfo.st_uid, targetinfo.st_gid)
ipaddress = self._increment_ip(ipaddress)
info('done')
def _setup_cgroups(self):
debug('setup cgrops in %s' % getcwd())
with open(absjoin('boot', 'cmdline.txt'), 'a') as cmdline:
cmdline.write('cgroup_enable=cpuset cgroup_memory=1')
def _enable_ssh(self):
debug('enable ssh in %s' % getcwd())
with open(absjoin('boot', 'ssh'), 'w') as ssh:
ssh.write('')
def _prepare_node_image(self, image, nodename, master, ipadddress, cfssl):
with self._mount(image):
self._setup_nodename(master, nodename)
self._enable_ssh()
self._setup_cgroups()
debug('install cfssl to %s' % absjoin('system', USR_LOCAL_BIN))
self._copytree(cfssl, absjoin('system', USR_LOCAL_BIN))
self._init_first_boot(ipadddress, nodename)
def _copytree(self, srcdir, dstdir):
for f in listdir(srcdir):
copy2(absjoin(srcdir, f), dstdir)
def _setup_nodename(self, master, nodename):
debug('setup nodename %s in %s' % (nodename, getcwd()))
with open(absjoin('system', 'etc', 'hostname'), 'w') as hostname:
print(nodename, file=hostname)
with open(absjoin('system', 'etc', 'hosts'), 'w') as hosts:
print('127.0.1.1 %(nodename)s' % locals(), file=hosts)
if nodename != master:
print('10.0.0.1 %(master)s' % locals(), file=hosts)
def _init_first_boot(self, ipadddress, nodename):
debug('Prepare first boot in %s' % getcwd())
with self._executable(absjoin('system', SETUP_NODE_SH)) as fname:
self.create_setup_script(fname)
with self._executable(absjoin('system', 'etc', 'rc.local')) as rclocal:
self.setup_rclocal(rclocal)
self._create_setup_txt(absjoin('boot', 'setup.txt'), ipadddress, nodename)
def create_setup_script(self, setup_node_sh):
with open(setup_node_sh, 'x') as setup_node:
print(PKG_SETUP % locals(), file=setup_node)
def setup_rclocal(self, rc_local):
with open(rc_local, 'r+') as script:
script.write(self._edit(script.read(), SETUP_SCRIPT))
def _create_setup_txt(self, fname, ipadddress, nodename):
with open(fname, 'w') as setup:
print('nodename=%s' % nodename, file=setup)
print('ip=%s' % ipadddress, file=setup)
def _edit(self, setup_script, setup_node_sh):
lines = [l.rstrip() for l in setup_script.splitlines()]
if 'exit 0' in lines:
exit_line = lines.index('exit 0')
lines.insert(exit_line, setup_node_sh)
else:
lines.append(setup_node_sh)
lines.append('exit 0')
return '\n'.join(lines)
def _download_cfssl(self, dstdir):
if not isdir(dstdir):
makedirs(dstdir)
for line in CFSSL_PROGS_SHA256.splitlines():
if line:
checksum, fname = line.split()
dstfile = absjoin(dstdir, fname)
self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' % fname, dstfile, checksum)
chmod(dstfile, 0o755)
def _download(self, url, dstfile, checksum):
request.urlretrieve(url, dstfile)
m = sha256()
with open(dstfile, 'rb') as f:
hash = m.update(f.read())
if checksum != m.hexdigest():
raise RuntimeError('Checksum of %s does not match!' % dstfile)
@staticmethod
def _unzip(archive, dst_image):
with ZipFile(archive) as image_archive:
for name in image_archive.namelist():
if name.endswith('.img'):
image = image_archive.extract(name, dirname(dst_image))
if isfile(dst_image):
unlink(dst_image)
rename(image, dst_image)
return dst_image
raise RuntimeError('No image file contained in archive %s' % archive)
@contextmanager
def _mktemp(self):
here = getcwd()
tempdir = mkdtemp()
try:
chdir(tempdir)
yield tempdir, here
finally:
chdir(here)
rmtree(tempdir)
@contextmanager
def _mount(self, image):
with self._kpartx(abspath(image)) as nodes:
with self._mktemp() as (here, cwd):
for d in nodes.keys():
mkdir(d)
boot = abspath('boot')
system = abspath('system')
with self._mounted(nodes['boot'], boot) as boot:
with self._mounted(nodes['system'], system) as system:
chdir(here)
yield boot, system
@contextmanager
def _kpartx(self, image):
output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image), universal_newlines=True)
# $ sudo kpartx -a -v -s 2018-03-13-raspbian-stretch-lite.img
# add map loop1p1 (252:7): 0 85611 linear /dev/loop1 8192
# add map loop1p2 (252:8): 0 3530752 linear /dev/loop1 98304
try:
nodes = []
for l in output.splitlines():
if l:
fields = l.split()
nodes.append((fields[2], fields[5]))
assert len(nodes) == 2
# sort nodes by size - the smaller node is 'boot'
nodes.sort(key=lambda t: t[1], reverse=True)
yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': '/dev/mapper/%s' % nodes[1][0]}
finally:
check_call(('sudo', 'kpartx', '-d', '-s', image))
@contextmanager
def _mounted(self, mapping, mountpoint):
try:
debug('mount %s on %s' % (mapping, mountpoint))
check_call(('sudo', 'mount', mapping, mountpoint))
yield mountpoint
finally:
check_call(('sudo', 'umount', mountpoint))
@contextmanager
def _executable(self, param):
yield param
chmod(param, 0o755)
def _increment_ip(self, ipbase):
octets = [int(o) for o in ipbase.split('.')]
octets[3] += 1
return '.'.join([str(o) for o in octets])
def _check_ip(param):
octets = [int(o) for o in param.split('.')]
for o in octets:
if 0 <= o <= 255:
continue
raise RuntimeError('Invalid IP address: %s' % param)
return param
def main(*args):
targetdir = getcwd() if len(args) < 4 else args[3]
nodenames = prepare_names(
NODE_COUNT if len(args) < 2 else int(args[1]),
NODE_PREFIX if len(args) < 3 else args[2])
ipaddress = BASE_IP if len(args) < 5 else _check_ip(args[4])
raspbian_archive = abspath(args[0])
setup = ClusterSetup()
setup(raspbian_archive, nodenames, targetdir, ipaddress)
if __name__ == '__main__':
def prepare_names(num_nodes, prefix):
result = [prefix + '-master']
for i in range(1, num_nodes):
result += ['%s-node-%d' % (prefix, i)]
return tuple(result)
if len(sys.argv) < 2:
exit(__doc__)
if geteuid() != 0:
exit("You must be root to use this software")
basicConfig(level=DEBUG)
try:
main(*sys.argv[1:])
except RuntimeError as e:
exit('\n'.join((str(e), __doc__)))
|
[
"#!/usr/bin/env python3\n\"\"\"\nUsage: setup-cluster-images image-archive [num_nodes [targetdir]]\n image-archive - zip file as downloaded from raspberry-pi.org\n num_nodes - number of nodes in the cluster [4]\n node_prefix - prefix for the cluster nodes [gg]\n targetdir - destination directory [current directory]\n\"\"\"\nimport sys\nfrom _sha256 import sha256\nfrom contextlib import contextmanager\nfrom logging import info, basicConfig, debug, DEBUG\nfrom os import chdir, getcwd, makedirs, mkdir, geteuid, chmod, chown, stat, unlink, \\\n listdir, rename\nfrom shutil import rmtree, copy2\nfrom subprocess import check_output, check_call\nfrom tempfile import mkdtemp\nfrom urllib import request\nfrom zipfile import ZipFile\n\nfrom os.path import join, abspath, isdir, dirname, isfile\n\n# Number of raspberries in the cluster\nBASE_IP = '192.168.8.2'\nNODE_COUNT = 4\n# prefix for the node names.\n# nodes will be named <prefix>-master, <prefix>-node1, <prefix>-node2, ...\nNODE_PREFIX = 'gg'\n\nUSR_LOCAL_BIN=join('usr', 'local', 'bin')\nSETUP_NODE_SH=join(USR_LOCAL_BIN, 'setup_node.sh')\n\nCFSSL_PROGS_SHA256 = \"\"\"\n0725a1cca3857392158807b543b75dc6388e2102e8a189792c4da7ac19f750b5 cfssl-bundle\n48685e849565cd7d27ac2daf68faa835a5151fd3feac87c6715bcb92d58dc280 cfssl-certinfo\n4106c11c61aa9e98b1967adab6db711d2b50a0f02f844329e9ad44f199bdf135 cfssl-newkey\n71e41ef447f49ad236d75ec42152625c6fcf6c37122784740bd19b0a7c399560 cfssl-scan\n11c708acaf48a69abf6f896f5c6158f7547a3c1bf44e14ca3b3ab440c1f808f1 cfssl\ne138102329d96f5a67aa168034a256a8376febf4ecde7b8e837c3f2e08b1cd19 cfssljson\ndac738390bc346b94c497b62a82f75cb05f0dafd5dad7d9dd63dedb9bc31092a mkbundle\nd53bbc0d2ac2d57c089d4f730d9e7b2d365701adc1bb417153a5f70a16bd10d6 multirootca\n\"\"\"\n\n# Shell script to setup the necessary software for kubernetes\n# FIXME - howto add a static IP\n# TODO - add static certificates\n# TODO - add kubeadm call for master\nPKG_SETUP = \"\"\"\\\n#!/bin/sh\nsetup_params=\"$1\"\n\nsetup_machine_id() {\n sudo rm -f /etc/machine-id /var/lib/dbus/machine-id\n sudo dbus-uuidgen --ensure=/etc/machine-id\n}\n\nsetup_static_ip() {\n}\n\nset -e\nnodename=`awk -F= '/^nodename=/ { print $2 }' \"$setup_params\"`\nipaddress=`awk -F= '/^ip=/ { print $2 }' \"$setup_params\"`\nsudo hostname \"$nodename\"\nsetup_static_ip \"$ipaddress\"\ncurl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -\necho \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" | sudo tee /etc/apt/sources.list.d/kubernetes.list\nsudo apt-get update -y\nsudo apt-get install -y policykit-1 docker-ce\nsetup_machine_id\nsudo dphys-swapfile swapoff\nsudo dphys-swapfile uninstall\nsudo update-rc.d dphys-swapfile remove\necho \"Getting kubernetes packages\"\nsudo apt-get install -y kubelet kubeadm kubectl kubernetes-cni\nsudo /usr/bin/raspi-config --expand-rootfs\n\"\"\"\n\nSETUP_SCRIPT = \"\"\"\nif [[ -e /boot/setup.txt ]] ; then\n tmp=`mktemp`\n mv /boot/setup.txt \"$tmp\"\n \n sh -x \"/%s\" \"$tmp\" >/boot/setup.log 2>&1\n rm -f \"$tmp\"\nfi\n\"\"\" % SETUP_NODE_SH\n\n\ndef absjoin(*params):\n return abspath(join(*params))\n\n\n# FIXME - add comments to the methods\nclass ClusterSetup:\n def __call__(self, archive, node_names, targetdir, ipbase):\n targetinfo = stat(targetdir)\n with self._mktemp():\n info('Download cfssl')\n cfssldir = abspath('cfssl')\n self._download_cfssl(cfssldir)\n ipaddress = ipbase\n for name in node_names:\n node_image = absjoin(targetdir, '%s.img' % name)\n info('prepare image for node %s in %s' % (name, node_image))\n info('Unpacking archive %s' % archive)\n self._unzip(archive, node_image)\n try:\n self._prepare_node_image(node_image, name, node_names[0], ipaddress, cfssldir)\n except Exception as e:\n unlink(node_image)\n raise\n\n chown(node_image, targetinfo.st_uid, targetinfo.st_gid)\n ipaddress = self._increment_ip(ipaddress)\n\n info('done')\n\n def _setup_cgroups(self):\n debug('setup cgrops in %s' % getcwd())\n with open(absjoin('boot', 'cmdline.txt'), 'a') as cmdline:\n cmdline.write('cgroup_enable=cpuset cgroup_memory=1')\n\n def _enable_ssh(self):\n debug('enable ssh in %s' % getcwd())\n with open(absjoin('boot', 'ssh'), 'w') as ssh:\n ssh.write('')\n\n def _prepare_node_image(self, image, nodename, master, ipadddress, cfssl):\n with self._mount(image):\n self._setup_nodename(master, nodename)\n self._enable_ssh()\n self._setup_cgroups()\n debug('install cfssl to %s' % absjoin('system', USR_LOCAL_BIN))\n self._copytree(cfssl, absjoin('system', USR_LOCAL_BIN))\n self._init_first_boot(ipadddress, nodename)\n\n def _copytree(self, srcdir, dstdir):\n for f in listdir(srcdir):\n copy2(absjoin(srcdir, f), dstdir)\n\n def _setup_nodename(self, master, nodename):\n debug('setup nodename %s in %s' % (nodename, getcwd()))\n with open(absjoin('system', 'etc', 'hostname'), 'w') as hostname:\n print(nodename, file=hostname)\n with open(absjoin('system', 'etc', 'hosts'), 'w') as hosts:\n print('127.0.1.1 %(nodename)s' % locals(), file=hosts)\n if nodename != master:\n print('10.0.0.1 %(master)s' % locals(), file=hosts)\n\n def _init_first_boot(self, ipadddress, nodename):\n debug('Prepare first boot in %s' % getcwd())\n with self._executable(absjoin('system', SETUP_NODE_SH)) as fname:\n self.create_setup_script(fname)\n with self._executable(absjoin('system', 'etc', 'rc.local')) as rclocal:\n self.setup_rclocal(rclocal)\n self._create_setup_txt(absjoin('boot', 'setup.txt'), ipadddress, nodename)\n\n def create_setup_script(self, setup_node_sh):\n with open(setup_node_sh, 'x') as setup_node:\n print(PKG_SETUP % locals(), file=setup_node)\n\n def setup_rclocal(self, rc_local):\n with open(rc_local, 'r+') as script:\n script.write(self._edit(script.read(), SETUP_SCRIPT))\n\n def _create_setup_txt(self, fname, ipadddress, nodename):\n with open(fname, 'w') as setup:\n print('nodename=%s' % nodename, file=setup)\n print('ip=%s' % ipadddress, file=setup)\n\n def _edit(self, setup_script, setup_node_sh):\n lines = [l.rstrip() for l in setup_script.splitlines()]\n if 'exit 0' in lines:\n exit_line = lines.index('exit 0')\n lines.insert(exit_line, setup_node_sh)\n else:\n lines.append(setup_node_sh)\n lines.append('exit 0')\n\n return '\\n'.join(lines)\n\n def _download_cfssl(self, dstdir):\n if not isdir(dstdir):\n makedirs(dstdir)\n\n for line in CFSSL_PROGS_SHA256.splitlines():\n if line:\n checksum, fname = line.split()\n dstfile = absjoin(dstdir, fname)\n self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' % fname, dstfile, checksum)\n chmod(dstfile, 0o755)\n\n def _download(self, url, dstfile, checksum):\n request.urlretrieve(url, dstfile)\n m = sha256()\n with open(dstfile, 'rb') as f:\n hash = m.update(f.read())\n\n if checksum != m.hexdigest():\n raise RuntimeError('Checksum of %s does not match!' % dstfile)\n\n @staticmethod\n def _unzip(archive, dst_image):\n with ZipFile(archive) as image_archive:\n for name in image_archive.namelist():\n if name.endswith('.img'):\n image = image_archive.extract(name, dirname(dst_image))\n if isfile(dst_image):\n unlink(dst_image)\n\n rename(image, dst_image)\n return dst_image\n\n raise RuntimeError('No image file contained in archive %s' % archive)\n\n @contextmanager\n def _mktemp(self):\n here = getcwd()\n tempdir = mkdtemp()\n try:\n chdir(tempdir)\n yield tempdir, here\n finally:\n chdir(here)\n rmtree(tempdir)\n\n @contextmanager\n def _mount(self, image):\n with self._kpartx(abspath(image)) as nodes:\n with self._mktemp() as (here, cwd):\n for d in nodes.keys():\n mkdir(d)\n\n boot = abspath('boot')\n system = abspath('system')\n with self._mounted(nodes['boot'], boot) as boot:\n with self._mounted(nodes['system'], system) as system:\n chdir(here)\n yield boot, system\n\n @contextmanager\n def _kpartx(self, image):\n output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image), universal_newlines=True)\n # $ sudo kpartx -a -v -s 2018-03-13-raspbian-stretch-lite.img\n # add map loop1p1 (252:7): 0 85611 linear /dev/loop1 8192\n # add map loop1p2 (252:8): 0 3530752 linear /dev/loop1 98304\n try:\n nodes = []\n for l in output.splitlines():\n if l:\n fields = l.split()\n nodes.append((fields[2], fields[5]))\n\n assert len(nodes) == 2\n # sort nodes by size - the smaller node is 'boot'\n nodes.sort(key=lambda t: t[1], reverse=True)\n yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': '/dev/mapper/%s' % nodes[1][0]}\n finally:\n check_call(('sudo', 'kpartx', '-d', '-s', image))\n\n @contextmanager\n def _mounted(self, mapping, mountpoint):\n try:\n debug('mount %s on %s' % (mapping, mountpoint))\n check_call(('sudo', 'mount', mapping, mountpoint))\n yield mountpoint\n finally:\n check_call(('sudo', 'umount', mountpoint))\n\n @contextmanager\n def _executable(self, param):\n yield param\n chmod(param, 0o755)\n\n def _increment_ip(self, ipbase):\n octets = [int(o) for o in ipbase.split('.')]\n octets[3] += 1\n return '.'.join([str(o) for o in octets])\n\n\ndef _check_ip(param):\n octets = [int(o) for o in param.split('.')]\n for o in octets:\n if 0 <= o <= 255:\n continue\n\n raise RuntimeError('Invalid IP address: %s' % param)\n\n return param\n\n\ndef main(*args):\n targetdir = getcwd() if len(args) < 4 else args[3]\n nodenames = prepare_names(\n NODE_COUNT if len(args) < 2 else int(args[1]),\n NODE_PREFIX if len(args) < 3 else args[2])\n ipaddress = BASE_IP if len(args) < 5 else _check_ip(args[4])\n raspbian_archive = abspath(args[0])\n setup = ClusterSetup()\n setup(raspbian_archive, nodenames, targetdir, ipaddress)\n\n\nif __name__ == '__main__':\n def prepare_names(num_nodes, prefix):\n result = [prefix + '-master']\n for i in range(1, num_nodes):\n result += ['%s-node-%d' % (prefix, i)]\n\n return tuple(result)\n\n if len(sys.argv) < 2:\n exit(__doc__)\n\n if geteuid() != 0:\n exit(\"You must be root to use this software\")\n\n basicConfig(level=DEBUG)\n try:\n main(*sys.argv[1:])\n except RuntimeError as e:\n exit('\\n'.join((str(e), __doc__)))\n",
"<docstring token>\nimport sys\nfrom _sha256 import sha256\nfrom contextlib import contextmanager\nfrom logging import info, basicConfig, debug, DEBUG\nfrom os import chdir, getcwd, makedirs, mkdir, geteuid, chmod, chown, stat, unlink, listdir, rename\nfrom shutil import rmtree, copy2\nfrom subprocess import check_output, check_call\nfrom tempfile import mkdtemp\nfrom urllib import request\nfrom zipfile import ZipFile\nfrom os.path import join, abspath, isdir, dirname, isfile\nBASE_IP = '192.168.8.2'\nNODE_COUNT = 4\nNODE_PREFIX = 'gg'\nUSR_LOCAL_BIN = join('usr', 'local', 'bin')\nSETUP_NODE_SH = join(USR_LOCAL_BIN, 'setup_node.sh')\nCFSSL_PROGS_SHA256 = \"\"\"\n0725a1cca3857392158807b543b75dc6388e2102e8a189792c4da7ac19f750b5 cfssl-bundle\n48685e849565cd7d27ac2daf68faa835a5151fd3feac87c6715bcb92d58dc280 cfssl-certinfo\n4106c11c61aa9e98b1967adab6db711d2b50a0f02f844329e9ad44f199bdf135 cfssl-newkey\n71e41ef447f49ad236d75ec42152625c6fcf6c37122784740bd19b0a7c399560 cfssl-scan\n11c708acaf48a69abf6f896f5c6158f7547a3c1bf44e14ca3b3ab440c1f808f1 cfssl\ne138102329d96f5a67aa168034a256a8376febf4ecde7b8e837c3f2e08b1cd19 cfssljson\ndac738390bc346b94c497b62a82f75cb05f0dafd5dad7d9dd63dedb9bc31092a mkbundle\nd53bbc0d2ac2d57c089d4f730d9e7b2d365701adc1bb417153a5f70a16bd10d6 multirootca\n\"\"\"\nPKG_SETUP = \"\"\"#!/bin/sh\nsetup_params=\"$1\"\n\nsetup_machine_id() {\n sudo rm -f /etc/machine-id /var/lib/dbus/machine-id\n sudo dbus-uuidgen --ensure=/etc/machine-id\n}\n\nsetup_static_ip() {\n}\n\nset -e\nnodename=`awk -F= '/^nodename=/ { print $2 }' \"$setup_params\"`\nipaddress=`awk -F= '/^ip=/ { print $2 }' \"$setup_params\"`\nsudo hostname \"$nodename\"\nsetup_static_ip \"$ipaddress\"\ncurl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -\necho \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" | sudo tee /etc/apt/sources.list.d/kubernetes.list\nsudo apt-get update -y\nsudo apt-get install -y policykit-1 docker-ce\nsetup_machine_id\nsudo dphys-swapfile swapoff\nsudo dphys-swapfile uninstall\nsudo update-rc.d dphys-swapfile remove\necho \"Getting kubernetes packages\"\nsudo apt-get install -y kubelet kubeadm kubectl kubernetes-cni\nsudo /usr/bin/raspi-config --expand-rootfs\n\"\"\"\nSETUP_SCRIPT = (\n \"\"\"\nif [[ -e /boot/setup.txt ]] ; then\n tmp=`mktemp`\n mv /boot/setup.txt \"$tmp\"\n \n sh -x \"/%s\" \"$tmp\" >/boot/setup.log 2>&1\n rm -f \"$tmp\"\nfi\n\"\"\"\n % SETUP_NODE_SH)\n\n\ndef absjoin(*params):\n return abspath(join(*params))\n\n\nclass ClusterSetup:\n\n def __call__(self, archive, node_names, targetdir, ipbase):\n targetinfo = stat(targetdir)\n with self._mktemp():\n info('Download cfssl')\n cfssldir = abspath('cfssl')\n self._download_cfssl(cfssldir)\n ipaddress = ipbase\n for name in node_names:\n node_image = absjoin(targetdir, '%s.img' % name)\n info('prepare image for node %s in %s' % (name, node_image))\n info('Unpacking archive %s' % archive)\n self._unzip(archive, node_image)\n try:\n self._prepare_node_image(node_image, name, node_names[0\n ], ipaddress, cfssldir)\n except Exception as e:\n unlink(node_image)\n raise\n chown(node_image, targetinfo.st_uid, targetinfo.st_gid)\n ipaddress = self._increment_ip(ipaddress)\n info('done')\n\n def _setup_cgroups(self):\n debug('setup cgrops in %s' % getcwd())\n with open(absjoin('boot', 'cmdline.txt'), 'a') as cmdline:\n cmdline.write('cgroup_enable=cpuset cgroup_memory=1')\n\n def _enable_ssh(self):\n debug('enable ssh in %s' % getcwd())\n with open(absjoin('boot', 'ssh'), 'w') as ssh:\n ssh.write('')\n\n def _prepare_node_image(self, image, nodename, master, ipadddress, cfssl):\n with self._mount(image):\n self._setup_nodename(master, nodename)\n self._enable_ssh()\n self._setup_cgroups()\n debug('install cfssl to %s' % absjoin('system', USR_LOCAL_BIN))\n self._copytree(cfssl, absjoin('system', USR_LOCAL_BIN))\n self._init_first_boot(ipadddress, nodename)\n\n def _copytree(self, srcdir, dstdir):\n for f in listdir(srcdir):\n copy2(absjoin(srcdir, f), dstdir)\n\n def _setup_nodename(self, master, nodename):\n debug('setup nodename %s in %s' % (nodename, getcwd()))\n with open(absjoin('system', 'etc', 'hostname'), 'w') as hostname:\n print(nodename, file=hostname)\n with open(absjoin('system', 'etc', 'hosts'), 'w') as hosts:\n print('127.0.1.1 %(nodename)s' % locals(), file=hosts)\n if nodename != master:\n print('10.0.0.1 %(master)s' % locals(), file=hosts)\n\n def _init_first_boot(self, ipadddress, nodename):\n debug('Prepare first boot in %s' % getcwd())\n with self._executable(absjoin('system', SETUP_NODE_SH)) as fname:\n self.create_setup_script(fname)\n with self._executable(absjoin('system', 'etc', 'rc.local')) as rclocal:\n self.setup_rclocal(rclocal)\n self._create_setup_txt(absjoin('boot', 'setup.txt'), ipadddress,\n nodename)\n\n def create_setup_script(self, setup_node_sh):\n with open(setup_node_sh, 'x') as setup_node:\n print(PKG_SETUP % locals(), file=setup_node)\n\n def setup_rclocal(self, rc_local):\n with open(rc_local, 'r+') as script:\n script.write(self._edit(script.read(), SETUP_SCRIPT))\n\n def _create_setup_txt(self, fname, ipadddress, nodename):\n with open(fname, 'w') as setup:\n print('nodename=%s' % nodename, file=setup)\n print('ip=%s' % ipadddress, file=setup)\n\n def _edit(self, setup_script, setup_node_sh):\n lines = [l.rstrip() for l in setup_script.splitlines()]\n if 'exit 0' in lines:\n exit_line = lines.index('exit 0')\n lines.insert(exit_line, setup_node_sh)\n else:\n lines.append(setup_node_sh)\n lines.append('exit 0')\n return '\\n'.join(lines)\n\n def _download_cfssl(self, dstdir):\n if not isdir(dstdir):\n makedirs(dstdir)\n for line in CFSSL_PROGS_SHA256.splitlines():\n if line:\n checksum, fname = line.split()\n dstfile = absjoin(dstdir, fname)\n self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' %\n fname, dstfile, checksum)\n chmod(dstfile, 493)\n\n def _download(self, url, dstfile, checksum):\n request.urlretrieve(url, dstfile)\n m = sha256()\n with open(dstfile, 'rb') as f:\n hash = m.update(f.read())\n if checksum != m.hexdigest():\n raise RuntimeError('Checksum of %s does not match!' % dstfile)\n\n @staticmethod\n def _unzip(archive, dst_image):\n with ZipFile(archive) as image_archive:\n for name in image_archive.namelist():\n if name.endswith('.img'):\n image = image_archive.extract(name, dirname(dst_image))\n if isfile(dst_image):\n unlink(dst_image)\n rename(image, dst_image)\n return dst_image\n raise RuntimeError('No image file contained in archive %s' % archive)\n\n @contextmanager\n def _mktemp(self):\n here = getcwd()\n tempdir = mkdtemp()\n try:\n chdir(tempdir)\n yield tempdir, here\n finally:\n chdir(here)\n rmtree(tempdir)\n\n @contextmanager\n def _mount(self, image):\n with self._kpartx(abspath(image)) as nodes:\n with self._mktemp() as (here, cwd):\n for d in nodes.keys():\n mkdir(d)\n boot = abspath('boot')\n system = abspath('system')\n with self._mounted(nodes['boot'], boot) as boot:\n with self._mounted(nodes['system'], system) as system:\n chdir(here)\n yield boot, system\n\n @contextmanager\n def _kpartx(self, image):\n output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image),\n universal_newlines=True)\n try:\n nodes = []\n for l in output.splitlines():\n if l:\n fields = l.split()\n nodes.append((fields[2], fields[5]))\n assert len(nodes) == 2\n nodes.sort(key=lambda t: t[1], reverse=True)\n yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': \n '/dev/mapper/%s' % nodes[1][0]}\n finally:\n check_call(('sudo', 'kpartx', '-d', '-s', image))\n\n @contextmanager\n def _mounted(self, mapping, mountpoint):\n try:\n debug('mount %s on %s' % (mapping, mountpoint))\n check_call(('sudo', 'mount', mapping, mountpoint))\n yield mountpoint\n finally:\n check_call(('sudo', 'umount', mountpoint))\n\n @contextmanager\n def _executable(self, param):\n yield param\n chmod(param, 493)\n\n def _increment_ip(self, ipbase):\n octets = [int(o) for o in ipbase.split('.')]\n octets[3] += 1\n return '.'.join([str(o) for o in octets])\n\n\ndef _check_ip(param):\n octets = [int(o) for o in param.split('.')]\n for o in octets:\n if 0 <= o <= 255:\n continue\n raise RuntimeError('Invalid IP address: %s' % param)\n return param\n\n\ndef main(*args):\n targetdir = getcwd() if len(args) < 4 else args[3]\n nodenames = prepare_names(NODE_COUNT if len(args) < 2 else int(args[1]),\n NODE_PREFIX if len(args) < 3 else args[2])\n ipaddress = BASE_IP if len(args) < 5 else _check_ip(args[4])\n raspbian_archive = abspath(args[0])\n setup = ClusterSetup()\n setup(raspbian_archive, nodenames, targetdir, ipaddress)\n\n\nif __name__ == '__main__':\n\n def prepare_names(num_nodes, prefix):\n result = [prefix + '-master']\n for i in range(1, num_nodes):\n result += ['%s-node-%d' % (prefix, i)]\n return tuple(result)\n if len(sys.argv) < 2:\n exit(__doc__)\n if geteuid() != 0:\n exit('You must be root to use this software')\n basicConfig(level=DEBUG)\n try:\n main(*sys.argv[1:])\n except RuntimeError as e:\n exit('\\n'.join((str(e), __doc__)))\n",
"<docstring token>\n<import token>\nBASE_IP = '192.168.8.2'\nNODE_COUNT = 4\nNODE_PREFIX = 'gg'\nUSR_LOCAL_BIN = join('usr', 'local', 'bin')\nSETUP_NODE_SH = join(USR_LOCAL_BIN, 'setup_node.sh')\nCFSSL_PROGS_SHA256 = \"\"\"\n0725a1cca3857392158807b543b75dc6388e2102e8a189792c4da7ac19f750b5 cfssl-bundle\n48685e849565cd7d27ac2daf68faa835a5151fd3feac87c6715bcb92d58dc280 cfssl-certinfo\n4106c11c61aa9e98b1967adab6db711d2b50a0f02f844329e9ad44f199bdf135 cfssl-newkey\n71e41ef447f49ad236d75ec42152625c6fcf6c37122784740bd19b0a7c399560 cfssl-scan\n11c708acaf48a69abf6f896f5c6158f7547a3c1bf44e14ca3b3ab440c1f808f1 cfssl\ne138102329d96f5a67aa168034a256a8376febf4ecde7b8e837c3f2e08b1cd19 cfssljson\ndac738390bc346b94c497b62a82f75cb05f0dafd5dad7d9dd63dedb9bc31092a mkbundle\nd53bbc0d2ac2d57c089d4f730d9e7b2d365701adc1bb417153a5f70a16bd10d6 multirootca\n\"\"\"\nPKG_SETUP = \"\"\"#!/bin/sh\nsetup_params=\"$1\"\n\nsetup_machine_id() {\n sudo rm -f /etc/machine-id /var/lib/dbus/machine-id\n sudo dbus-uuidgen --ensure=/etc/machine-id\n}\n\nsetup_static_ip() {\n}\n\nset -e\nnodename=`awk -F= '/^nodename=/ { print $2 }' \"$setup_params\"`\nipaddress=`awk -F= '/^ip=/ { print $2 }' \"$setup_params\"`\nsudo hostname \"$nodename\"\nsetup_static_ip \"$ipaddress\"\ncurl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -\necho \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" | sudo tee /etc/apt/sources.list.d/kubernetes.list\nsudo apt-get update -y\nsudo apt-get install -y policykit-1 docker-ce\nsetup_machine_id\nsudo dphys-swapfile swapoff\nsudo dphys-swapfile uninstall\nsudo update-rc.d dphys-swapfile remove\necho \"Getting kubernetes packages\"\nsudo apt-get install -y kubelet kubeadm kubectl kubernetes-cni\nsudo /usr/bin/raspi-config --expand-rootfs\n\"\"\"\nSETUP_SCRIPT = (\n \"\"\"\nif [[ -e /boot/setup.txt ]] ; then\n tmp=`mktemp`\n mv /boot/setup.txt \"$tmp\"\n \n sh -x \"/%s\" \"$tmp\" >/boot/setup.log 2>&1\n rm -f \"$tmp\"\nfi\n\"\"\"\n % SETUP_NODE_SH)\n\n\ndef absjoin(*params):\n return abspath(join(*params))\n\n\nclass ClusterSetup:\n\n def __call__(self, archive, node_names, targetdir, ipbase):\n targetinfo = stat(targetdir)\n with self._mktemp():\n info('Download cfssl')\n cfssldir = abspath('cfssl')\n self._download_cfssl(cfssldir)\n ipaddress = ipbase\n for name in node_names:\n node_image = absjoin(targetdir, '%s.img' % name)\n info('prepare image for node %s in %s' % (name, node_image))\n info('Unpacking archive %s' % archive)\n self._unzip(archive, node_image)\n try:\n self._prepare_node_image(node_image, name, node_names[0\n ], ipaddress, cfssldir)\n except Exception as e:\n unlink(node_image)\n raise\n chown(node_image, targetinfo.st_uid, targetinfo.st_gid)\n ipaddress = self._increment_ip(ipaddress)\n info('done')\n\n def _setup_cgroups(self):\n debug('setup cgrops in %s' % getcwd())\n with open(absjoin('boot', 'cmdline.txt'), 'a') as cmdline:\n cmdline.write('cgroup_enable=cpuset cgroup_memory=1')\n\n def _enable_ssh(self):\n debug('enable ssh in %s' % getcwd())\n with open(absjoin('boot', 'ssh'), 'w') as ssh:\n ssh.write('')\n\n def _prepare_node_image(self, image, nodename, master, ipadddress, cfssl):\n with self._mount(image):\n self._setup_nodename(master, nodename)\n self._enable_ssh()\n self._setup_cgroups()\n debug('install cfssl to %s' % absjoin('system', USR_LOCAL_BIN))\n self._copytree(cfssl, absjoin('system', USR_LOCAL_BIN))\n self._init_first_boot(ipadddress, nodename)\n\n def _copytree(self, srcdir, dstdir):\n for f in listdir(srcdir):\n copy2(absjoin(srcdir, f), dstdir)\n\n def _setup_nodename(self, master, nodename):\n debug('setup nodename %s in %s' % (nodename, getcwd()))\n with open(absjoin('system', 'etc', 'hostname'), 'w') as hostname:\n print(nodename, file=hostname)\n with open(absjoin('system', 'etc', 'hosts'), 'w') as hosts:\n print('127.0.1.1 %(nodename)s' % locals(), file=hosts)\n if nodename != master:\n print('10.0.0.1 %(master)s' % locals(), file=hosts)\n\n def _init_first_boot(self, ipadddress, nodename):\n debug('Prepare first boot in %s' % getcwd())\n with self._executable(absjoin('system', SETUP_NODE_SH)) as fname:\n self.create_setup_script(fname)\n with self._executable(absjoin('system', 'etc', 'rc.local')) as rclocal:\n self.setup_rclocal(rclocal)\n self._create_setup_txt(absjoin('boot', 'setup.txt'), ipadddress,\n nodename)\n\n def create_setup_script(self, setup_node_sh):\n with open(setup_node_sh, 'x') as setup_node:\n print(PKG_SETUP % locals(), file=setup_node)\n\n def setup_rclocal(self, rc_local):\n with open(rc_local, 'r+') as script:\n script.write(self._edit(script.read(), SETUP_SCRIPT))\n\n def _create_setup_txt(self, fname, ipadddress, nodename):\n with open(fname, 'w') as setup:\n print('nodename=%s' % nodename, file=setup)\n print('ip=%s' % ipadddress, file=setup)\n\n def _edit(self, setup_script, setup_node_sh):\n lines = [l.rstrip() for l in setup_script.splitlines()]\n if 'exit 0' in lines:\n exit_line = lines.index('exit 0')\n lines.insert(exit_line, setup_node_sh)\n else:\n lines.append(setup_node_sh)\n lines.append('exit 0')\n return '\\n'.join(lines)\n\n def _download_cfssl(self, dstdir):\n if not isdir(dstdir):\n makedirs(dstdir)\n for line in CFSSL_PROGS_SHA256.splitlines():\n if line:\n checksum, fname = line.split()\n dstfile = absjoin(dstdir, fname)\n self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' %\n fname, dstfile, checksum)\n chmod(dstfile, 493)\n\n def _download(self, url, dstfile, checksum):\n request.urlretrieve(url, dstfile)\n m = sha256()\n with open(dstfile, 'rb') as f:\n hash = m.update(f.read())\n if checksum != m.hexdigest():\n raise RuntimeError('Checksum of %s does not match!' % dstfile)\n\n @staticmethod\n def _unzip(archive, dst_image):\n with ZipFile(archive) as image_archive:\n for name in image_archive.namelist():\n if name.endswith('.img'):\n image = image_archive.extract(name, dirname(dst_image))\n if isfile(dst_image):\n unlink(dst_image)\n rename(image, dst_image)\n return dst_image\n raise RuntimeError('No image file contained in archive %s' % archive)\n\n @contextmanager\n def _mktemp(self):\n here = getcwd()\n tempdir = mkdtemp()\n try:\n chdir(tempdir)\n yield tempdir, here\n finally:\n chdir(here)\n rmtree(tempdir)\n\n @contextmanager\n def _mount(self, image):\n with self._kpartx(abspath(image)) as nodes:\n with self._mktemp() as (here, cwd):\n for d in nodes.keys():\n mkdir(d)\n boot = abspath('boot')\n system = abspath('system')\n with self._mounted(nodes['boot'], boot) as boot:\n with self._mounted(nodes['system'], system) as system:\n chdir(here)\n yield boot, system\n\n @contextmanager\n def _kpartx(self, image):\n output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image),\n universal_newlines=True)\n try:\n nodes = []\n for l in output.splitlines():\n if l:\n fields = l.split()\n nodes.append((fields[2], fields[5]))\n assert len(nodes) == 2\n nodes.sort(key=lambda t: t[1], reverse=True)\n yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': \n '/dev/mapper/%s' % nodes[1][0]}\n finally:\n check_call(('sudo', 'kpartx', '-d', '-s', image))\n\n @contextmanager\n def _mounted(self, mapping, mountpoint):\n try:\n debug('mount %s on %s' % (mapping, mountpoint))\n check_call(('sudo', 'mount', mapping, mountpoint))\n yield mountpoint\n finally:\n check_call(('sudo', 'umount', mountpoint))\n\n @contextmanager\n def _executable(self, param):\n yield param\n chmod(param, 493)\n\n def _increment_ip(self, ipbase):\n octets = [int(o) for o in ipbase.split('.')]\n octets[3] += 1\n return '.'.join([str(o) for o in octets])\n\n\ndef _check_ip(param):\n octets = [int(o) for o in param.split('.')]\n for o in octets:\n if 0 <= o <= 255:\n continue\n raise RuntimeError('Invalid IP address: %s' % param)\n return param\n\n\ndef main(*args):\n targetdir = getcwd() if len(args) < 4 else args[3]\n nodenames = prepare_names(NODE_COUNT if len(args) < 2 else int(args[1]),\n NODE_PREFIX if len(args) < 3 else args[2])\n ipaddress = BASE_IP if len(args) < 5 else _check_ip(args[4])\n raspbian_archive = abspath(args[0])\n setup = ClusterSetup()\n setup(raspbian_archive, nodenames, targetdir, ipaddress)\n\n\nif __name__ == '__main__':\n\n def prepare_names(num_nodes, prefix):\n result = [prefix + '-master']\n for i in range(1, num_nodes):\n result += ['%s-node-%d' % (prefix, i)]\n return tuple(result)\n if len(sys.argv) < 2:\n exit(__doc__)\n if geteuid() != 0:\n exit('You must be root to use this software')\n basicConfig(level=DEBUG)\n try:\n main(*sys.argv[1:])\n except RuntimeError as e:\n exit('\\n'.join((str(e), __doc__)))\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef absjoin(*params):\n return abspath(join(*params))\n\n\nclass ClusterSetup:\n\n def __call__(self, archive, node_names, targetdir, ipbase):\n targetinfo = stat(targetdir)\n with self._mktemp():\n info('Download cfssl')\n cfssldir = abspath('cfssl')\n self._download_cfssl(cfssldir)\n ipaddress = ipbase\n for name in node_names:\n node_image = absjoin(targetdir, '%s.img' % name)\n info('prepare image for node %s in %s' % (name, node_image))\n info('Unpacking archive %s' % archive)\n self._unzip(archive, node_image)\n try:\n self._prepare_node_image(node_image, name, node_names[0\n ], ipaddress, cfssldir)\n except Exception as e:\n unlink(node_image)\n raise\n chown(node_image, targetinfo.st_uid, targetinfo.st_gid)\n ipaddress = self._increment_ip(ipaddress)\n info('done')\n\n def _setup_cgroups(self):\n debug('setup cgrops in %s' % getcwd())\n with open(absjoin('boot', 'cmdline.txt'), 'a') as cmdline:\n cmdline.write('cgroup_enable=cpuset cgroup_memory=1')\n\n def _enable_ssh(self):\n debug('enable ssh in %s' % getcwd())\n with open(absjoin('boot', 'ssh'), 'w') as ssh:\n ssh.write('')\n\n def _prepare_node_image(self, image, nodename, master, ipadddress, cfssl):\n with self._mount(image):\n self._setup_nodename(master, nodename)\n self._enable_ssh()\n self._setup_cgroups()\n debug('install cfssl to %s' % absjoin('system', USR_LOCAL_BIN))\n self._copytree(cfssl, absjoin('system', USR_LOCAL_BIN))\n self._init_first_boot(ipadddress, nodename)\n\n def _copytree(self, srcdir, dstdir):\n for f in listdir(srcdir):\n copy2(absjoin(srcdir, f), dstdir)\n\n def _setup_nodename(self, master, nodename):\n debug('setup nodename %s in %s' % (nodename, getcwd()))\n with open(absjoin('system', 'etc', 'hostname'), 'w') as hostname:\n print(nodename, file=hostname)\n with open(absjoin('system', 'etc', 'hosts'), 'w') as hosts:\n print('127.0.1.1 %(nodename)s' % locals(), file=hosts)\n if nodename != master:\n print('10.0.0.1 %(master)s' % locals(), file=hosts)\n\n def _init_first_boot(self, ipadddress, nodename):\n debug('Prepare first boot in %s' % getcwd())\n with self._executable(absjoin('system', SETUP_NODE_SH)) as fname:\n self.create_setup_script(fname)\n with self._executable(absjoin('system', 'etc', 'rc.local')) as rclocal:\n self.setup_rclocal(rclocal)\n self._create_setup_txt(absjoin('boot', 'setup.txt'), ipadddress,\n nodename)\n\n def create_setup_script(self, setup_node_sh):\n with open(setup_node_sh, 'x') as setup_node:\n print(PKG_SETUP % locals(), file=setup_node)\n\n def setup_rclocal(self, rc_local):\n with open(rc_local, 'r+') as script:\n script.write(self._edit(script.read(), SETUP_SCRIPT))\n\n def _create_setup_txt(self, fname, ipadddress, nodename):\n with open(fname, 'w') as setup:\n print('nodename=%s' % nodename, file=setup)\n print('ip=%s' % ipadddress, file=setup)\n\n def _edit(self, setup_script, setup_node_sh):\n lines = [l.rstrip() for l in setup_script.splitlines()]\n if 'exit 0' in lines:\n exit_line = lines.index('exit 0')\n lines.insert(exit_line, setup_node_sh)\n else:\n lines.append(setup_node_sh)\n lines.append('exit 0')\n return '\\n'.join(lines)\n\n def _download_cfssl(self, dstdir):\n if not isdir(dstdir):\n makedirs(dstdir)\n for line in CFSSL_PROGS_SHA256.splitlines():\n if line:\n checksum, fname = line.split()\n dstfile = absjoin(dstdir, fname)\n self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' %\n fname, dstfile, checksum)\n chmod(dstfile, 493)\n\n def _download(self, url, dstfile, checksum):\n request.urlretrieve(url, dstfile)\n m = sha256()\n with open(dstfile, 'rb') as f:\n hash = m.update(f.read())\n if checksum != m.hexdigest():\n raise RuntimeError('Checksum of %s does not match!' % dstfile)\n\n @staticmethod\n def _unzip(archive, dst_image):\n with ZipFile(archive) as image_archive:\n for name in image_archive.namelist():\n if name.endswith('.img'):\n image = image_archive.extract(name, dirname(dst_image))\n if isfile(dst_image):\n unlink(dst_image)\n rename(image, dst_image)\n return dst_image\n raise RuntimeError('No image file contained in archive %s' % archive)\n\n @contextmanager\n def _mktemp(self):\n here = getcwd()\n tempdir = mkdtemp()\n try:\n chdir(tempdir)\n yield tempdir, here\n finally:\n chdir(here)\n rmtree(tempdir)\n\n @contextmanager\n def _mount(self, image):\n with self._kpartx(abspath(image)) as nodes:\n with self._mktemp() as (here, cwd):\n for d in nodes.keys():\n mkdir(d)\n boot = abspath('boot')\n system = abspath('system')\n with self._mounted(nodes['boot'], boot) as boot:\n with self._mounted(nodes['system'], system) as system:\n chdir(here)\n yield boot, system\n\n @contextmanager\n def _kpartx(self, image):\n output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image),\n universal_newlines=True)\n try:\n nodes = []\n for l in output.splitlines():\n if l:\n fields = l.split()\n nodes.append((fields[2], fields[5]))\n assert len(nodes) == 2\n nodes.sort(key=lambda t: t[1], reverse=True)\n yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': \n '/dev/mapper/%s' % nodes[1][0]}\n finally:\n check_call(('sudo', 'kpartx', '-d', '-s', image))\n\n @contextmanager\n def _mounted(self, mapping, mountpoint):\n try:\n debug('mount %s on %s' % (mapping, mountpoint))\n check_call(('sudo', 'mount', mapping, mountpoint))\n yield mountpoint\n finally:\n check_call(('sudo', 'umount', mountpoint))\n\n @contextmanager\n def _executable(self, param):\n yield param\n chmod(param, 493)\n\n def _increment_ip(self, ipbase):\n octets = [int(o) for o in ipbase.split('.')]\n octets[3] += 1\n return '.'.join([str(o) for o in octets])\n\n\ndef _check_ip(param):\n octets = [int(o) for o in param.split('.')]\n for o in octets:\n if 0 <= o <= 255:\n continue\n raise RuntimeError('Invalid IP address: %s' % param)\n return param\n\n\ndef main(*args):\n targetdir = getcwd() if len(args) < 4 else args[3]\n nodenames = prepare_names(NODE_COUNT if len(args) < 2 else int(args[1]),\n NODE_PREFIX if len(args) < 3 else args[2])\n ipaddress = BASE_IP if len(args) < 5 else _check_ip(args[4])\n raspbian_archive = abspath(args[0])\n setup = ClusterSetup()\n setup(raspbian_archive, nodenames, targetdir, ipaddress)\n\n\nif __name__ == '__main__':\n\n def prepare_names(num_nodes, prefix):\n result = [prefix + '-master']\n for i in range(1, num_nodes):\n result += ['%s-node-%d' % (prefix, i)]\n return tuple(result)\n if len(sys.argv) < 2:\n exit(__doc__)\n if geteuid() != 0:\n exit('You must be root to use this software')\n basicConfig(level=DEBUG)\n try:\n main(*sys.argv[1:])\n except RuntimeError as e:\n exit('\\n'.join((str(e), __doc__)))\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef absjoin(*params):\n return abspath(join(*params))\n\n\nclass ClusterSetup:\n\n def __call__(self, archive, node_names, targetdir, ipbase):\n targetinfo = stat(targetdir)\n with self._mktemp():\n info('Download cfssl')\n cfssldir = abspath('cfssl')\n self._download_cfssl(cfssldir)\n ipaddress = ipbase\n for name in node_names:\n node_image = absjoin(targetdir, '%s.img' % name)\n info('prepare image for node %s in %s' % (name, node_image))\n info('Unpacking archive %s' % archive)\n self._unzip(archive, node_image)\n try:\n self._prepare_node_image(node_image, name, node_names[0\n ], ipaddress, cfssldir)\n except Exception as e:\n unlink(node_image)\n raise\n chown(node_image, targetinfo.st_uid, targetinfo.st_gid)\n ipaddress = self._increment_ip(ipaddress)\n info('done')\n\n def _setup_cgroups(self):\n debug('setup cgrops in %s' % getcwd())\n with open(absjoin('boot', 'cmdline.txt'), 'a') as cmdline:\n cmdline.write('cgroup_enable=cpuset cgroup_memory=1')\n\n def _enable_ssh(self):\n debug('enable ssh in %s' % getcwd())\n with open(absjoin('boot', 'ssh'), 'w') as ssh:\n ssh.write('')\n\n def _prepare_node_image(self, image, nodename, master, ipadddress, cfssl):\n with self._mount(image):\n self._setup_nodename(master, nodename)\n self._enable_ssh()\n self._setup_cgroups()\n debug('install cfssl to %s' % absjoin('system', USR_LOCAL_BIN))\n self._copytree(cfssl, absjoin('system', USR_LOCAL_BIN))\n self._init_first_boot(ipadddress, nodename)\n\n def _copytree(self, srcdir, dstdir):\n for f in listdir(srcdir):\n copy2(absjoin(srcdir, f), dstdir)\n\n def _setup_nodename(self, master, nodename):\n debug('setup nodename %s in %s' % (nodename, getcwd()))\n with open(absjoin('system', 'etc', 'hostname'), 'w') as hostname:\n print(nodename, file=hostname)\n with open(absjoin('system', 'etc', 'hosts'), 'w') as hosts:\n print('127.0.1.1 %(nodename)s' % locals(), file=hosts)\n if nodename != master:\n print('10.0.0.1 %(master)s' % locals(), file=hosts)\n\n def _init_first_boot(self, ipadddress, nodename):\n debug('Prepare first boot in %s' % getcwd())\n with self._executable(absjoin('system', SETUP_NODE_SH)) as fname:\n self.create_setup_script(fname)\n with self._executable(absjoin('system', 'etc', 'rc.local')) as rclocal:\n self.setup_rclocal(rclocal)\n self._create_setup_txt(absjoin('boot', 'setup.txt'), ipadddress,\n nodename)\n\n def create_setup_script(self, setup_node_sh):\n with open(setup_node_sh, 'x') as setup_node:\n print(PKG_SETUP % locals(), file=setup_node)\n\n def setup_rclocal(self, rc_local):\n with open(rc_local, 'r+') as script:\n script.write(self._edit(script.read(), SETUP_SCRIPT))\n\n def _create_setup_txt(self, fname, ipadddress, nodename):\n with open(fname, 'w') as setup:\n print('nodename=%s' % nodename, file=setup)\n print('ip=%s' % ipadddress, file=setup)\n\n def _edit(self, setup_script, setup_node_sh):\n lines = [l.rstrip() for l in setup_script.splitlines()]\n if 'exit 0' in lines:\n exit_line = lines.index('exit 0')\n lines.insert(exit_line, setup_node_sh)\n else:\n lines.append(setup_node_sh)\n lines.append('exit 0')\n return '\\n'.join(lines)\n\n def _download_cfssl(self, dstdir):\n if not isdir(dstdir):\n makedirs(dstdir)\n for line in CFSSL_PROGS_SHA256.splitlines():\n if line:\n checksum, fname = line.split()\n dstfile = absjoin(dstdir, fname)\n self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' %\n fname, dstfile, checksum)\n chmod(dstfile, 493)\n\n def _download(self, url, dstfile, checksum):\n request.urlretrieve(url, dstfile)\n m = sha256()\n with open(dstfile, 'rb') as f:\n hash = m.update(f.read())\n if checksum != m.hexdigest():\n raise RuntimeError('Checksum of %s does not match!' % dstfile)\n\n @staticmethod\n def _unzip(archive, dst_image):\n with ZipFile(archive) as image_archive:\n for name in image_archive.namelist():\n if name.endswith('.img'):\n image = image_archive.extract(name, dirname(dst_image))\n if isfile(dst_image):\n unlink(dst_image)\n rename(image, dst_image)\n return dst_image\n raise RuntimeError('No image file contained in archive %s' % archive)\n\n @contextmanager\n def _mktemp(self):\n here = getcwd()\n tempdir = mkdtemp()\n try:\n chdir(tempdir)\n yield tempdir, here\n finally:\n chdir(here)\n rmtree(tempdir)\n\n @contextmanager\n def _mount(self, image):\n with self._kpartx(abspath(image)) as nodes:\n with self._mktemp() as (here, cwd):\n for d in nodes.keys():\n mkdir(d)\n boot = abspath('boot')\n system = abspath('system')\n with self._mounted(nodes['boot'], boot) as boot:\n with self._mounted(nodes['system'], system) as system:\n chdir(here)\n yield boot, system\n\n @contextmanager\n def _kpartx(self, image):\n output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image),\n universal_newlines=True)\n try:\n nodes = []\n for l in output.splitlines():\n if l:\n fields = l.split()\n nodes.append((fields[2], fields[5]))\n assert len(nodes) == 2\n nodes.sort(key=lambda t: t[1], reverse=True)\n yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': \n '/dev/mapper/%s' % nodes[1][0]}\n finally:\n check_call(('sudo', 'kpartx', '-d', '-s', image))\n\n @contextmanager\n def _mounted(self, mapping, mountpoint):\n try:\n debug('mount %s on %s' % (mapping, mountpoint))\n check_call(('sudo', 'mount', mapping, mountpoint))\n yield mountpoint\n finally:\n check_call(('sudo', 'umount', mountpoint))\n\n @contextmanager\n def _executable(self, param):\n yield param\n chmod(param, 493)\n\n def _increment_ip(self, ipbase):\n octets = [int(o) for o in ipbase.split('.')]\n octets[3] += 1\n return '.'.join([str(o) for o in octets])\n\n\ndef _check_ip(param):\n octets = [int(o) for o in param.split('.')]\n for o in octets:\n if 0 <= o <= 255:\n continue\n raise RuntimeError('Invalid IP address: %s' % param)\n return param\n\n\ndef main(*args):\n targetdir = getcwd() if len(args) < 4 else args[3]\n nodenames = prepare_names(NODE_COUNT if len(args) < 2 else int(args[1]),\n NODE_PREFIX if len(args) < 3 else args[2])\n ipaddress = BASE_IP if len(args) < 5 else _check_ip(args[4])\n raspbian_archive = abspath(args[0])\n setup = ClusterSetup()\n setup(raspbian_archive, nodenames, targetdir, ipaddress)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass ClusterSetup:\n\n def __call__(self, archive, node_names, targetdir, ipbase):\n targetinfo = stat(targetdir)\n with self._mktemp():\n info('Download cfssl')\n cfssldir = abspath('cfssl')\n self._download_cfssl(cfssldir)\n ipaddress = ipbase\n for name in node_names:\n node_image = absjoin(targetdir, '%s.img' % name)\n info('prepare image for node %s in %s' % (name, node_image))\n info('Unpacking archive %s' % archive)\n self._unzip(archive, node_image)\n try:\n self._prepare_node_image(node_image, name, node_names[0\n ], ipaddress, cfssldir)\n except Exception as e:\n unlink(node_image)\n raise\n chown(node_image, targetinfo.st_uid, targetinfo.st_gid)\n ipaddress = self._increment_ip(ipaddress)\n info('done')\n\n def _setup_cgroups(self):\n debug('setup cgrops in %s' % getcwd())\n with open(absjoin('boot', 'cmdline.txt'), 'a') as cmdline:\n cmdline.write('cgroup_enable=cpuset cgroup_memory=1')\n\n def _enable_ssh(self):\n debug('enable ssh in %s' % getcwd())\n with open(absjoin('boot', 'ssh'), 'w') as ssh:\n ssh.write('')\n\n def _prepare_node_image(self, image, nodename, master, ipadddress, cfssl):\n with self._mount(image):\n self._setup_nodename(master, nodename)\n self._enable_ssh()\n self._setup_cgroups()\n debug('install cfssl to %s' % absjoin('system', USR_LOCAL_BIN))\n self._copytree(cfssl, absjoin('system', USR_LOCAL_BIN))\n self._init_first_boot(ipadddress, nodename)\n\n def _copytree(self, srcdir, dstdir):\n for f in listdir(srcdir):\n copy2(absjoin(srcdir, f), dstdir)\n\n def _setup_nodename(self, master, nodename):\n debug('setup nodename %s in %s' % (nodename, getcwd()))\n with open(absjoin('system', 'etc', 'hostname'), 'w') as hostname:\n print(nodename, file=hostname)\n with open(absjoin('system', 'etc', 'hosts'), 'w') as hosts:\n print('127.0.1.1 %(nodename)s' % locals(), file=hosts)\n if nodename != master:\n print('10.0.0.1 %(master)s' % locals(), file=hosts)\n\n def _init_first_boot(self, ipadddress, nodename):\n debug('Prepare first boot in %s' % getcwd())\n with self._executable(absjoin('system', SETUP_NODE_SH)) as fname:\n self.create_setup_script(fname)\n with self._executable(absjoin('system', 'etc', 'rc.local')) as rclocal:\n self.setup_rclocal(rclocal)\n self._create_setup_txt(absjoin('boot', 'setup.txt'), ipadddress,\n nodename)\n\n def create_setup_script(self, setup_node_sh):\n with open(setup_node_sh, 'x') as setup_node:\n print(PKG_SETUP % locals(), file=setup_node)\n\n def setup_rclocal(self, rc_local):\n with open(rc_local, 'r+') as script:\n script.write(self._edit(script.read(), SETUP_SCRIPT))\n\n def _create_setup_txt(self, fname, ipadddress, nodename):\n with open(fname, 'w') as setup:\n print('nodename=%s' % nodename, file=setup)\n print('ip=%s' % ipadddress, file=setup)\n\n def _edit(self, setup_script, setup_node_sh):\n lines = [l.rstrip() for l in setup_script.splitlines()]\n if 'exit 0' in lines:\n exit_line = lines.index('exit 0')\n lines.insert(exit_line, setup_node_sh)\n else:\n lines.append(setup_node_sh)\n lines.append('exit 0')\n return '\\n'.join(lines)\n\n def _download_cfssl(self, dstdir):\n if not isdir(dstdir):\n makedirs(dstdir)\n for line in CFSSL_PROGS_SHA256.splitlines():\n if line:\n checksum, fname = line.split()\n dstfile = absjoin(dstdir, fname)\n self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' %\n fname, dstfile, checksum)\n chmod(dstfile, 493)\n\n def _download(self, url, dstfile, checksum):\n request.urlretrieve(url, dstfile)\n m = sha256()\n with open(dstfile, 'rb') as f:\n hash = m.update(f.read())\n if checksum != m.hexdigest():\n raise RuntimeError('Checksum of %s does not match!' % dstfile)\n\n @staticmethod\n def _unzip(archive, dst_image):\n with ZipFile(archive) as image_archive:\n for name in image_archive.namelist():\n if name.endswith('.img'):\n image = image_archive.extract(name, dirname(dst_image))\n if isfile(dst_image):\n unlink(dst_image)\n rename(image, dst_image)\n return dst_image\n raise RuntimeError('No image file contained in archive %s' % archive)\n\n @contextmanager\n def _mktemp(self):\n here = getcwd()\n tempdir = mkdtemp()\n try:\n chdir(tempdir)\n yield tempdir, here\n finally:\n chdir(here)\n rmtree(tempdir)\n\n @contextmanager\n def _mount(self, image):\n with self._kpartx(abspath(image)) as nodes:\n with self._mktemp() as (here, cwd):\n for d in nodes.keys():\n mkdir(d)\n boot = abspath('boot')\n system = abspath('system')\n with self._mounted(nodes['boot'], boot) as boot:\n with self._mounted(nodes['system'], system) as system:\n chdir(here)\n yield boot, system\n\n @contextmanager\n def _kpartx(self, image):\n output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image),\n universal_newlines=True)\n try:\n nodes = []\n for l in output.splitlines():\n if l:\n fields = l.split()\n nodes.append((fields[2], fields[5]))\n assert len(nodes) == 2\n nodes.sort(key=lambda t: t[1], reverse=True)\n yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': \n '/dev/mapper/%s' % nodes[1][0]}\n finally:\n check_call(('sudo', 'kpartx', '-d', '-s', image))\n\n @contextmanager\n def _mounted(self, mapping, mountpoint):\n try:\n debug('mount %s on %s' % (mapping, mountpoint))\n check_call(('sudo', 'mount', mapping, mountpoint))\n yield mountpoint\n finally:\n check_call(('sudo', 'umount', mountpoint))\n\n @contextmanager\n def _executable(self, param):\n yield param\n chmod(param, 493)\n\n def _increment_ip(self, ipbase):\n octets = [int(o) for o in ipbase.split('.')]\n octets[3] += 1\n return '.'.join([str(o) for o in octets])\n\n\ndef _check_ip(param):\n octets = [int(o) for o in param.split('.')]\n for o in octets:\n if 0 <= o <= 255:\n continue\n raise RuntimeError('Invalid IP address: %s' % param)\n return param\n\n\ndef main(*args):\n targetdir = getcwd() if len(args) < 4 else args[3]\n nodenames = prepare_names(NODE_COUNT if len(args) < 2 else int(args[1]),\n NODE_PREFIX if len(args) < 3 else args[2])\n ipaddress = BASE_IP if len(args) < 5 else _check_ip(args[4])\n raspbian_archive = abspath(args[0])\n setup = ClusterSetup()\n setup(raspbian_archive, nodenames, targetdir, ipaddress)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass ClusterSetup:\n\n def __call__(self, archive, node_names, targetdir, ipbase):\n targetinfo = stat(targetdir)\n with self._mktemp():\n info('Download cfssl')\n cfssldir = abspath('cfssl')\n self._download_cfssl(cfssldir)\n ipaddress = ipbase\n for name in node_names:\n node_image = absjoin(targetdir, '%s.img' % name)\n info('prepare image for node %s in %s' % (name, node_image))\n info('Unpacking archive %s' % archive)\n self._unzip(archive, node_image)\n try:\n self._prepare_node_image(node_image, name, node_names[0\n ], ipaddress, cfssldir)\n except Exception as e:\n unlink(node_image)\n raise\n chown(node_image, targetinfo.st_uid, targetinfo.st_gid)\n ipaddress = self._increment_ip(ipaddress)\n info('done')\n\n def _setup_cgroups(self):\n debug('setup cgrops in %s' % getcwd())\n with open(absjoin('boot', 'cmdline.txt'), 'a') as cmdline:\n cmdline.write('cgroup_enable=cpuset cgroup_memory=1')\n\n def _enable_ssh(self):\n debug('enable ssh in %s' % getcwd())\n with open(absjoin('boot', 'ssh'), 'w') as ssh:\n ssh.write('')\n\n def _prepare_node_image(self, image, nodename, master, ipadddress, cfssl):\n with self._mount(image):\n self._setup_nodename(master, nodename)\n self._enable_ssh()\n self._setup_cgroups()\n debug('install cfssl to %s' % absjoin('system', USR_LOCAL_BIN))\n self._copytree(cfssl, absjoin('system', USR_LOCAL_BIN))\n self._init_first_boot(ipadddress, nodename)\n\n def _copytree(self, srcdir, dstdir):\n for f in listdir(srcdir):\n copy2(absjoin(srcdir, f), dstdir)\n\n def _setup_nodename(self, master, nodename):\n debug('setup nodename %s in %s' % (nodename, getcwd()))\n with open(absjoin('system', 'etc', 'hostname'), 'w') as hostname:\n print(nodename, file=hostname)\n with open(absjoin('system', 'etc', 'hosts'), 'w') as hosts:\n print('127.0.1.1 %(nodename)s' % locals(), file=hosts)\n if nodename != master:\n print('10.0.0.1 %(master)s' % locals(), file=hosts)\n\n def _init_first_boot(self, ipadddress, nodename):\n debug('Prepare first boot in %s' % getcwd())\n with self._executable(absjoin('system', SETUP_NODE_SH)) as fname:\n self.create_setup_script(fname)\n with self._executable(absjoin('system', 'etc', 'rc.local')) as rclocal:\n self.setup_rclocal(rclocal)\n self._create_setup_txt(absjoin('boot', 'setup.txt'), ipadddress,\n nodename)\n\n def create_setup_script(self, setup_node_sh):\n with open(setup_node_sh, 'x') as setup_node:\n print(PKG_SETUP % locals(), file=setup_node)\n\n def setup_rclocal(self, rc_local):\n with open(rc_local, 'r+') as script:\n script.write(self._edit(script.read(), SETUP_SCRIPT))\n\n def _create_setup_txt(self, fname, ipadddress, nodename):\n with open(fname, 'w') as setup:\n print('nodename=%s' % nodename, file=setup)\n print('ip=%s' % ipadddress, file=setup)\n\n def _edit(self, setup_script, setup_node_sh):\n lines = [l.rstrip() for l in setup_script.splitlines()]\n if 'exit 0' in lines:\n exit_line = lines.index('exit 0')\n lines.insert(exit_line, setup_node_sh)\n else:\n lines.append(setup_node_sh)\n lines.append('exit 0')\n return '\\n'.join(lines)\n\n def _download_cfssl(self, dstdir):\n if not isdir(dstdir):\n makedirs(dstdir)\n for line in CFSSL_PROGS_SHA256.splitlines():\n if line:\n checksum, fname = line.split()\n dstfile = absjoin(dstdir, fname)\n self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' %\n fname, dstfile, checksum)\n chmod(dstfile, 493)\n\n def _download(self, url, dstfile, checksum):\n request.urlretrieve(url, dstfile)\n m = sha256()\n with open(dstfile, 'rb') as f:\n hash = m.update(f.read())\n if checksum != m.hexdigest():\n raise RuntimeError('Checksum of %s does not match!' % dstfile)\n\n @staticmethod\n def _unzip(archive, dst_image):\n with ZipFile(archive) as image_archive:\n for name in image_archive.namelist():\n if name.endswith('.img'):\n image = image_archive.extract(name, dirname(dst_image))\n if isfile(dst_image):\n unlink(dst_image)\n rename(image, dst_image)\n return dst_image\n raise RuntimeError('No image file contained in archive %s' % archive)\n\n @contextmanager\n def _mktemp(self):\n here = getcwd()\n tempdir = mkdtemp()\n try:\n chdir(tempdir)\n yield tempdir, here\n finally:\n chdir(here)\n rmtree(tempdir)\n\n @contextmanager\n def _mount(self, image):\n with self._kpartx(abspath(image)) as nodes:\n with self._mktemp() as (here, cwd):\n for d in nodes.keys():\n mkdir(d)\n boot = abspath('boot')\n system = abspath('system')\n with self._mounted(nodes['boot'], boot) as boot:\n with self._mounted(nodes['system'], system) as system:\n chdir(here)\n yield boot, system\n\n @contextmanager\n def _kpartx(self, image):\n output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image),\n universal_newlines=True)\n try:\n nodes = []\n for l in output.splitlines():\n if l:\n fields = l.split()\n nodes.append((fields[2], fields[5]))\n assert len(nodes) == 2\n nodes.sort(key=lambda t: t[1], reverse=True)\n yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': \n '/dev/mapper/%s' % nodes[1][0]}\n finally:\n check_call(('sudo', 'kpartx', '-d', '-s', image))\n\n @contextmanager\n def _mounted(self, mapping, mountpoint):\n try:\n debug('mount %s on %s' % (mapping, mountpoint))\n check_call(('sudo', 'mount', mapping, mountpoint))\n yield mountpoint\n finally:\n check_call(('sudo', 'umount', mountpoint))\n\n @contextmanager\n def _executable(self, param):\n yield param\n chmod(param, 493)\n\n def _increment_ip(self, ipbase):\n octets = [int(o) for o in ipbase.split('.')]\n octets[3] += 1\n return '.'.join([str(o) for o in octets])\n\n\ndef _check_ip(param):\n octets = [int(o) for o in param.split('.')]\n for o in octets:\n if 0 <= o <= 255:\n continue\n raise RuntimeError('Invalid IP address: %s' % param)\n return param\n\n\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass ClusterSetup:\n\n def __call__(self, archive, node_names, targetdir, ipbase):\n targetinfo = stat(targetdir)\n with self._mktemp():\n info('Download cfssl')\n cfssldir = abspath('cfssl')\n self._download_cfssl(cfssldir)\n ipaddress = ipbase\n for name in node_names:\n node_image = absjoin(targetdir, '%s.img' % name)\n info('prepare image for node %s in %s' % (name, node_image))\n info('Unpacking archive %s' % archive)\n self._unzip(archive, node_image)\n try:\n self._prepare_node_image(node_image, name, node_names[0\n ], ipaddress, cfssldir)\n except Exception as e:\n unlink(node_image)\n raise\n chown(node_image, targetinfo.st_uid, targetinfo.st_gid)\n ipaddress = self._increment_ip(ipaddress)\n info('done')\n\n def _setup_cgroups(self):\n debug('setup cgrops in %s' % getcwd())\n with open(absjoin('boot', 'cmdline.txt'), 'a') as cmdline:\n cmdline.write('cgroup_enable=cpuset cgroup_memory=1')\n\n def _enable_ssh(self):\n debug('enable ssh in %s' % getcwd())\n with open(absjoin('boot', 'ssh'), 'w') as ssh:\n ssh.write('')\n\n def _prepare_node_image(self, image, nodename, master, ipadddress, cfssl):\n with self._mount(image):\n self._setup_nodename(master, nodename)\n self._enable_ssh()\n self._setup_cgroups()\n debug('install cfssl to %s' % absjoin('system', USR_LOCAL_BIN))\n self._copytree(cfssl, absjoin('system', USR_LOCAL_BIN))\n self._init_first_boot(ipadddress, nodename)\n\n def _copytree(self, srcdir, dstdir):\n for f in listdir(srcdir):\n copy2(absjoin(srcdir, f), dstdir)\n\n def _setup_nodename(self, master, nodename):\n debug('setup nodename %s in %s' % (nodename, getcwd()))\n with open(absjoin('system', 'etc', 'hostname'), 'w') as hostname:\n print(nodename, file=hostname)\n with open(absjoin('system', 'etc', 'hosts'), 'w') as hosts:\n print('127.0.1.1 %(nodename)s' % locals(), file=hosts)\n if nodename != master:\n print('10.0.0.1 %(master)s' % locals(), file=hosts)\n\n def _init_first_boot(self, ipadddress, nodename):\n debug('Prepare first boot in %s' % getcwd())\n with self._executable(absjoin('system', SETUP_NODE_SH)) as fname:\n self.create_setup_script(fname)\n with self._executable(absjoin('system', 'etc', 'rc.local')) as rclocal:\n self.setup_rclocal(rclocal)\n self._create_setup_txt(absjoin('boot', 'setup.txt'), ipadddress,\n nodename)\n\n def create_setup_script(self, setup_node_sh):\n with open(setup_node_sh, 'x') as setup_node:\n print(PKG_SETUP % locals(), file=setup_node)\n\n def setup_rclocal(self, rc_local):\n with open(rc_local, 'r+') as script:\n script.write(self._edit(script.read(), SETUP_SCRIPT))\n\n def _create_setup_txt(self, fname, ipadddress, nodename):\n with open(fname, 'w') as setup:\n print('nodename=%s' % nodename, file=setup)\n print('ip=%s' % ipadddress, file=setup)\n\n def _edit(self, setup_script, setup_node_sh):\n lines = [l.rstrip() for l in setup_script.splitlines()]\n if 'exit 0' in lines:\n exit_line = lines.index('exit 0')\n lines.insert(exit_line, setup_node_sh)\n else:\n lines.append(setup_node_sh)\n lines.append('exit 0')\n return '\\n'.join(lines)\n\n def _download_cfssl(self, dstdir):\n if not isdir(dstdir):\n makedirs(dstdir)\n for line in CFSSL_PROGS_SHA256.splitlines():\n if line:\n checksum, fname = line.split()\n dstfile = absjoin(dstdir, fname)\n self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' %\n fname, dstfile, checksum)\n chmod(dstfile, 493)\n\n def _download(self, url, dstfile, checksum):\n request.urlretrieve(url, dstfile)\n m = sha256()\n with open(dstfile, 'rb') as f:\n hash = m.update(f.read())\n if checksum != m.hexdigest():\n raise RuntimeError('Checksum of %s does not match!' % dstfile)\n\n @staticmethod\n def _unzip(archive, dst_image):\n with ZipFile(archive) as image_archive:\n for name in image_archive.namelist():\n if name.endswith('.img'):\n image = image_archive.extract(name, dirname(dst_image))\n if isfile(dst_image):\n unlink(dst_image)\n rename(image, dst_image)\n return dst_image\n raise RuntimeError('No image file contained in archive %s' % archive)\n\n @contextmanager\n def _mktemp(self):\n here = getcwd()\n tempdir = mkdtemp()\n try:\n chdir(tempdir)\n yield tempdir, here\n finally:\n chdir(here)\n rmtree(tempdir)\n\n @contextmanager\n def _mount(self, image):\n with self._kpartx(abspath(image)) as nodes:\n with self._mktemp() as (here, cwd):\n for d in nodes.keys():\n mkdir(d)\n boot = abspath('boot')\n system = abspath('system')\n with self._mounted(nodes['boot'], boot) as boot:\n with self._mounted(nodes['system'], system) as system:\n chdir(here)\n yield boot, system\n\n @contextmanager\n def _kpartx(self, image):\n output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image),\n universal_newlines=True)\n try:\n nodes = []\n for l in output.splitlines():\n if l:\n fields = l.split()\n nodes.append((fields[2], fields[5]))\n assert len(nodes) == 2\n nodes.sort(key=lambda t: t[1], reverse=True)\n yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': \n '/dev/mapper/%s' % nodes[1][0]}\n finally:\n check_call(('sudo', 'kpartx', '-d', '-s', image))\n\n @contextmanager\n def _mounted(self, mapping, mountpoint):\n try:\n debug('mount %s on %s' % (mapping, mountpoint))\n check_call(('sudo', 'mount', mapping, mountpoint))\n yield mountpoint\n finally:\n check_call(('sudo', 'umount', mountpoint))\n\n @contextmanager\n def _executable(self, param):\n yield param\n chmod(param, 493)\n\n def _increment_ip(self, ipbase):\n octets = [int(o) for o in ipbase.split('.')]\n octets[3] += 1\n return '.'.join([str(o) for o in octets])\n\n\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass ClusterSetup:\n\n def __call__(self, archive, node_names, targetdir, ipbase):\n targetinfo = stat(targetdir)\n with self._mktemp():\n info('Download cfssl')\n cfssldir = abspath('cfssl')\n self._download_cfssl(cfssldir)\n ipaddress = ipbase\n for name in node_names:\n node_image = absjoin(targetdir, '%s.img' % name)\n info('prepare image for node %s in %s' % (name, node_image))\n info('Unpacking archive %s' % archive)\n self._unzip(archive, node_image)\n try:\n self._prepare_node_image(node_image, name, node_names[0\n ], ipaddress, cfssldir)\n except Exception as e:\n unlink(node_image)\n raise\n chown(node_image, targetinfo.st_uid, targetinfo.st_gid)\n ipaddress = self._increment_ip(ipaddress)\n info('done')\n\n def _setup_cgroups(self):\n debug('setup cgrops in %s' % getcwd())\n with open(absjoin('boot', 'cmdline.txt'), 'a') as cmdline:\n cmdline.write('cgroup_enable=cpuset cgroup_memory=1')\n\n def _enable_ssh(self):\n debug('enable ssh in %s' % getcwd())\n with open(absjoin('boot', 'ssh'), 'w') as ssh:\n ssh.write('')\n\n def _prepare_node_image(self, image, nodename, master, ipadddress, cfssl):\n with self._mount(image):\n self._setup_nodename(master, nodename)\n self._enable_ssh()\n self._setup_cgroups()\n debug('install cfssl to %s' % absjoin('system', USR_LOCAL_BIN))\n self._copytree(cfssl, absjoin('system', USR_LOCAL_BIN))\n self._init_first_boot(ipadddress, nodename)\n\n def _copytree(self, srcdir, dstdir):\n for f in listdir(srcdir):\n copy2(absjoin(srcdir, f), dstdir)\n\n def _setup_nodename(self, master, nodename):\n debug('setup nodename %s in %s' % (nodename, getcwd()))\n with open(absjoin('system', 'etc', 'hostname'), 'w') as hostname:\n print(nodename, file=hostname)\n with open(absjoin('system', 'etc', 'hosts'), 'w') as hosts:\n print('127.0.1.1 %(nodename)s' % locals(), file=hosts)\n if nodename != master:\n print('10.0.0.1 %(master)s' % locals(), file=hosts)\n\n def _init_first_boot(self, ipadddress, nodename):\n debug('Prepare first boot in %s' % getcwd())\n with self._executable(absjoin('system', SETUP_NODE_SH)) as fname:\n self.create_setup_script(fname)\n with self._executable(absjoin('system', 'etc', 'rc.local')) as rclocal:\n self.setup_rclocal(rclocal)\n self._create_setup_txt(absjoin('boot', 'setup.txt'), ipadddress,\n nodename)\n\n def create_setup_script(self, setup_node_sh):\n with open(setup_node_sh, 'x') as setup_node:\n print(PKG_SETUP % locals(), file=setup_node)\n\n def setup_rclocal(self, rc_local):\n with open(rc_local, 'r+') as script:\n script.write(self._edit(script.read(), SETUP_SCRIPT))\n\n def _create_setup_txt(self, fname, ipadddress, nodename):\n with open(fname, 'w') as setup:\n print('nodename=%s' % nodename, file=setup)\n print('ip=%s' % ipadddress, file=setup)\n\n def _edit(self, setup_script, setup_node_sh):\n lines = [l.rstrip() for l in setup_script.splitlines()]\n if 'exit 0' in lines:\n exit_line = lines.index('exit 0')\n lines.insert(exit_line, setup_node_sh)\n else:\n lines.append(setup_node_sh)\n lines.append('exit 0')\n return '\\n'.join(lines)\n\n def _download_cfssl(self, dstdir):\n if not isdir(dstdir):\n makedirs(dstdir)\n for line in CFSSL_PROGS_SHA256.splitlines():\n if line:\n checksum, fname = line.split()\n dstfile = absjoin(dstdir, fname)\n self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' %\n fname, dstfile, checksum)\n chmod(dstfile, 493)\n\n def _download(self, url, dstfile, checksum):\n request.urlretrieve(url, dstfile)\n m = sha256()\n with open(dstfile, 'rb') as f:\n hash = m.update(f.read())\n if checksum != m.hexdigest():\n raise RuntimeError('Checksum of %s does not match!' % dstfile)\n\n @staticmethod\n def _unzip(archive, dst_image):\n with ZipFile(archive) as image_archive:\n for name in image_archive.namelist():\n if name.endswith('.img'):\n image = image_archive.extract(name, dirname(dst_image))\n if isfile(dst_image):\n unlink(dst_image)\n rename(image, dst_image)\n return dst_image\n raise RuntimeError('No image file contained in archive %s' % archive)\n\n @contextmanager\n def _mktemp(self):\n here = getcwd()\n tempdir = mkdtemp()\n try:\n chdir(tempdir)\n yield tempdir, here\n finally:\n chdir(here)\n rmtree(tempdir)\n\n @contextmanager\n def _mount(self, image):\n with self._kpartx(abspath(image)) as nodes:\n with self._mktemp() as (here, cwd):\n for d in nodes.keys():\n mkdir(d)\n boot = abspath('boot')\n system = abspath('system')\n with self._mounted(nodes['boot'], boot) as boot:\n with self._mounted(nodes['system'], system) as system:\n chdir(here)\n yield boot, system\n\n @contextmanager\n def _kpartx(self, image):\n output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image),\n universal_newlines=True)\n try:\n nodes = []\n for l in output.splitlines():\n if l:\n fields = l.split()\n nodes.append((fields[2], fields[5]))\n assert len(nodes) == 2\n nodes.sort(key=lambda t: t[1], reverse=True)\n yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': \n '/dev/mapper/%s' % nodes[1][0]}\n finally:\n check_call(('sudo', 'kpartx', '-d', '-s', image))\n\n @contextmanager\n def _mounted(self, mapping, mountpoint):\n try:\n debug('mount %s on %s' % (mapping, mountpoint))\n check_call(('sudo', 'mount', mapping, mountpoint))\n yield mountpoint\n finally:\n check_call(('sudo', 'umount', mountpoint))\n\n @contextmanager\n def _executable(self, param):\n yield param\n chmod(param, 493)\n <function token>\n\n\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass ClusterSetup:\n\n def __call__(self, archive, node_names, targetdir, ipbase):\n targetinfo = stat(targetdir)\n with self._mktemp():\n info('Download cfssl')\n cfssldir = abspath('cfssl')\n self._download_cfssl(cfssldir)\n ipaddress = ipbase\n for name in node_names:\n node_image = absjoin(targetdir, '%s.img' % name)\n info('prepare image for node %s in %s' % (name, node_image))\n info('Unpacking archive %s' % archive)\n self._unzip(archive, node_image)\n try:\n self._prepare_node_image(node_image, name, node_names[0\n ], ipaddress, cfssldir)\n except Exception as e:\n unlink(node_image)\n raise\n chown(node_image, targetinfo.st_uid, targetinfo.st_gid)\n ipaddress = self._increment_ip(ipaddress)\n info('done')\n\n def _setup_cgroups(self):\n debug('setup cgrops in %s' % getcwd())\n with open(absjoin('boot', 'cmdline.txt'), 'a') as cmdline:\n cmdline.write('cgroup_enable=cpuset cgroup_memory=1')\n\n def _enable_ssh(self):\n debug('enable ssh in %s' % getcwd())\n with open(absjoin('boot', 'ssh'), 'w') as ssh:\n ssh.write('')\n\n def _prepare_node_image(self, image, nodename, master, ipadddress, cfssl):\n with self._mount(image):\n self._setup_nodename(master, nodename)\n self._enable_ssh()\n self._setup_cgroups()\n debug('install cfssl to %s' % absjoin('system', USR_LOCAL_BIN))\n self._copytree(cfssl, absjoin('system', USR_LOCAL_BIN))\n self._init_first_boot(ipadddress, nodename)\n\n def _copytree(self, srcdir, dstdir):\n for f in listdir(srcdir):\n copy2(absjoin(srcdir, f), dstdir)\n\n def _setup_nodename(self, master, nodename):\n debug('setup nodename %s in %s' % (nodename, getcwd()))\n with open(absjoin('system', 'etc', 'hostname'), 'w') as hostname:\n print(nodename, file=hostname)\n with open(absjoin('system', 'etc', 'hosts'), 'w') as hosts:\n print('127.0.1.1 %(nodename)s' % locals(), file=hosts)\n if nodename != master:\n print('10.0.0.1 %(master)s' % locals(), file=hosts)\n\n def _init_first_boot(self, ipadddress, nodename):\n debug('Prepare first boot in %s' % getcwd())\n with self._executable(absjoin('system', SETUP_NODE_SH)) as fname:\n self.create_setup_script(fname)\n with self._executable(absjoin('system', 'etc', 'rc.local')) as rclocal:\n self.setup_rclocal(rclocal)\n self._create_setup_txt(absjoin('boot', 'setup.txt'), ipadddress,\n nodename)\n\n def create_setup_script(self, setup_node_sh):\n with open(setup_node_sh, 'x') as setup_node:\n print(PKG_SETUP % locals(), file=setup_node)\n\n def setup_rclocal(self, rc_local):\n with open(rc_local, 'r+') as script:\n script.write(self._edit(script.read(), SETUP_SCRIPT))\n\n def _create_setup_txt(self, fname, ipadddress, nodename):\n with open(fname, 'w') as setup:\n print('nodename=%s' % nodename, file=setup)\n print('ip=%s' % ipadddress, file=setup)\n\n def _edit(self, setup_script, setup_node_sh):\n lines = [l.rstrip() for l in setup_script.splitlines()]\n if 'exit 0' in lines:\n exit_line = lines.index('exit 0')\n lines.insert(exit_line, setup_node_sh)\n else:\n lines.append(setup_node_sh)\n lines.append('exit 0')\n return '\\n'.join(lines)\n\n def _download_cfssl(self, dstdir):\n if not isdir(dstdir):\n makedirs(dstdir)\n for line in CFSSL_PROGS_SHA256.splitlines():\n if line:\n checksum, fname = line.split()\n dstfile = absjoin(dstdir, fname)\n self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' %\n fname, dstfile, checksum)\n chmod(dstfile, 493)\n\n def _download(self, url, dstfile, checksum):\n request.urlretrieve(url, dstfile)\n m = sha256()\n with open(dstfile, 'rb') as f:\n hash = m.update(f.read())\n if checksum != m.hexdigest():\n raise RuntimeError('Checksum of %s does not match!' % dstfile)\n\n @staticmethod\n def _unzip(archive, dst_image):\n with ZipFile(archive) as image_archive:\n for name in image_archive.namelist():\n if name.endswith('.img'):\n image = image_archive.extract(name, dirname(dst_image))\n if isfile(dst_image):\n unlink(dst_image)\n rename(image, dst_image)\n return dst_image\n raise RuntimeError('No image file contained in archive %s' % archive)\n\n @contextmanager\n def _mktemp(self):\n here = getcwd()\n tempdir = mkdtemp()\n try:\n chdir(tempdir)\n yield tempdir, here\n finally:\n chdir(here)\n rmtree(tempdir)\n\n @contextmanager\n def _mount(self, image):\n with self._kpartx(abspath(image)) as nodes:\n with self._mktemp() as (here, cwd):\n for d in nodes.keys():\n mkdir(d)\n boot = abspath('boot')\n system = abspath('system')\n with self._mounted(nodes['boot'], boot) as boot:\n with self._mounted(nodes['system'], system) as system:\n chdir(here)\n yield boot, system\n\n @contextmanager\n def _kpartx(self, image):\n output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image),\n universal_newlines=True)\n try:\n nodes = []\n for l in output.splitlines():\n if l:\n fields = l.split()\n nodes.append((fields[2], fields[5]))\n assert len(nodes) == 2\n nodes.sort(key=lambda t: t[1], reverse=True)\n yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': \n '/dev/mapper/%s' % nodes[1][0]}\n finally:\n check_call(('sudo', 'kpartx', '-d', '-s', image))\n\n @contextmanager\n def _mounted(self, mapping, mountpoint):\n try:\n debug('mount %s on %s' % (mapping, mountpoint))\n check_call(('sudo', 'mount', mapping, mountpoint))\n yield mountpoint\n finally:\n check_call(('sudo', 'umount', mountpoint))\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass ClusterSetup:\n\n def __call__(self, archive, node_names, targetdir, ipbase):\n targetinfo = stat(targetdir)\n with self._mktemp():\n info('Download cfssl')\n cfssldir = abspath('cfssl')\n self._download_cfssl(cfssldir)\n ipaddress = ipbase\n for name in node_names:\n node_image = absjoin(targetdir, '%s.img' % name)\n info('prepare image for node %s in %s' % (name, node_image))\n info('Unpacking archive %s' % archive)\n self._unzip(archive, node_image)\n try:\n self._prepare_node_image(node_image, name, node_names[0\n ], ipaddress, cfssldir)\n except Exception as e:\n unlink(node_image)\n raise\n chown(node_image, targetinfo.st_uid, targetinfo.st_gid)\n ipaddress = self._increment_ip(ipaddress)\n info('done')\n <function token>\n\n def _enable_ssh(self):\n debug('enable ssh in %s' % getcwd())\n with open(absjoin('boot', 'ssh'), 'w') as ssh:\n ssh.write('')\n\n def _prepare_node_image(self, image, nodename, master, ipadddress, cfssl):\n with self._mount(image):\n self._setup_nodename(master, nodename)\n self._enable_ssh()\n self._setup_cgroups()\n debug('install cfssl to %s' % absjoin('system', USR_LOCAL_BIN))\n self._copytree(cfssl, absjoin('system', USR_LOCAL_BIN))\n self._init_first_boot(ipadddress, nodename)\n\n def _copytree(self, srcdir, dstdir):\n for f in listdir(srcdir):\n copy2(absjoin(srcdir, f), dstdir)\n\n def _setup_nodename(self, master, nodename):\n debug('setup nodename %s in %s' % (nodename, getcwd()))\n with open(absjoin('system', 'etc', 'hostname'), 'w') as hostname:\n print(nodename, file=hostname)\n with open(absjoin('system', 'etc', 'hosts'), 'w') as hosts:\n print('127.0.1.1 %(nodename)s' % locals(), file=hosts)\n if nodename != master:\n print('10.0.0.1 %(master)s' % locals(), file=hosts)\n\n def _init_first_boot(self, ipadddress, nodename):\n debug('Prepare first boot in %s' % getcwd())\n with self._executable(absjoin('system', SETUP_NODE_SH)) as fname:\n self.create_setup_script(fname)\n with self._executable(absjoin('system', 'etc', 'rc.local')) as rclocal:\n self.setup_rclocal(rclocal)\n self._create_setup_txt(absjoin('boot', 'setup.txt'), ipadddress,\n nodename)\n\n def create_setup_script(self, setup_node_sh):\n with open(setup_node_sh, 'x') as setup_node:\n print(PKG_SETUP % locals(), file=setup_node)\n\n def setup_rclocal(self, rc_local):\n with open(rc_local, 'r+') as script:\n script.write(self._edit(script.read(), SETUP_SCRIPT))\n\n def _create_setup_txt(self, fname, ipadddress, nodename):\n with open(fname, 'w') as setup:\n print('nodename=%s' % nodename, file=setup)\n print('ip=%s' % ipadddress, file=setup)\n\n def _edit(self, setup_script, setup_node_sh):\n lines = [l.rstrip() for l in setup_script.splitlines()]\n if 'exit 0' in lines:\n exit_line = lines.index('exit 0')\n lines.insert(exit_line, setup_node_sh)\n else:\n lines.append(setup_node_sh)\n lines.append('exit 0')\n return '\\n'.join(lines)\n\n def _download_cfssl(self, dstdir):\n if not isdir(dstdir):\n makedirs(dstdir)\n for line in CFSSL_PROGS_SHA256.splitlines():\n if line:\n checksum, fname = line.split()\n dstfile = absjoin(dstdir, fname)\n self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' %\n fname, dstfile, checksum)\n chmod(dstfile, 493)\n\n def _download(self, url, dstfile, checksum):\n request.urlretrieve(url, dstfile)\n m = sha256()\n with open(dstfile, 'rb') as f:\n hash = m.update(f.read())\n if checksum != m.hexdigest():\n raise RuntimeError('Checksum of %s does not match!' % dstfile)\n\n @staticmethod\n def _unzip(archive, dst_image):\n with ZipFile(archive) as image_archive:\n for name in image_archive.namelist():\n if name.endswith('.img'):\n image = image_archive.extract(name, dirname(dst_image))\n if isfile(dst_image):\n unlink(dst_image)\n rename(image, dst_image)\n return dst_image\n raise RuntimeError('No image file contained in archive %s' % archive)\n\n @contextmanager\n def _mktemp(self):\n here = getcwd()\n tempdir = mkdtemp()\n try:\n chdir(tempdir)\n yield tempdir, here\n finally:\n chdir(here)\n rmtree(tempdir)\n\n @contextmanager\n def _mount(self, image):\n with self._kpartx(abspath(image)) as nodes:\n with self._mktemp() as (here, cwd):\n for d in nodes.keys():\n mkdir(d)\n boot = abspath('boot')\n system = abspath('system')\n with self._mounted(nodes['boot'], boot) as boot:\n with self._mounted(nodes['system'], system) as system:\n chdir(here)\n yield boot, system\n\n @contextmanager\n def _kpartx(self, image):\n output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image),\n universal_newlines=True)\n try:\n nodes = []\n for l in output.splitlines():\n if l:\n fields = l.split()\n nodes.append((fields[2], fields[5]))\n assert len(nodes) == 2\n nodes.sort(key=lambda t: t[1], reverse=True)\n yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': \n '/dev/mapper/%s' % nodes[1][0]}\n finally:\n check_call(('sudo', 'kpartx', '-d', '-s', image))\n\n @contextmanager\n def _mounted(self, mapping, mountpoint):\n try:\n debug('mount %s on %s' % (mapping, mountpoint))\n check_call(('sudo', 'mount', mapping, mountpoint))\n yield mountpoint\n finally:\n check_call(('sudo', 'umount', mountpoint))\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass ClusterSetup:\n\n def __call__(self, archive, node_names, targetdir, ipbase):\n targetinfo = stat(targetdir)\n with self._mktemp():\n info('Download cfssl')\n cfssldir = abspath('cfssl')\n self._download_cfssl(cfssldir)\n ipaddress = ipbase\n for name in node_names:\n node_image = absjoin(targetdir, '%s.img' % name)\n info('prepare image for node %s in %s' % (name, node_image))\n info('Unpacking archive %s' % archive)\n self._unzip(archive, node_image)\n try:\n self._prepare_node_image(node_image, name, node_names[0\n ], ipaddress, cfssldir)\n except Exception as e:\n unlink(node_image)\n raise\n chown(node_image, targetinfo.st_uid, targetinfo.st_gid)\n ipaddress = self._increment_ip(ipaddress)\n info('done')\n <function token>\n\n def _enable_ssh(self):\n debug('enable ssh in %s' % getcwd())\n with open(absjoin('boot', 'ssh'), 'w') as ssh:\n ssh.write('')\n\n def _prepare_node_image(self, image, nodename, master, ipadddress, cfssl):\n with self._mount(image):\n self._setup_nodename(master, nodename)\n self._enable_ssh()\n self._setup_cgroups()\n debug('install cfssl to %s' % absjoin('system', USR_LOCAL_BIN))\n self._copytree(cfssl, absjoin('system', USR_LOCAL_BIN))\n self._init_first_boot(ipadddress, nodename)\n\n def _copytree(self, srcdir, dstdir):\n for f in listdir(srcdir):\n copy2(absjoin(srcdir, f), dstdir)\n <function token>\n\n def _init_first_boot(self, ipadddress, nodename):\n debug('Prepare first boot in %s' % getcwd())\n with self._executable(absjoin('system', SETUP_NODE_SH)) as fname:\n self.create_setup_script(fname)\n with self._executable(absjoin('system', 'etc', 'rc.local')) as rclocal:\n self.setup_rclocal(rclocal)\n self._create_setup_txt(absjoin('boot', 'setup.txt'), ipadddress,\n nodename)\n\n def create_setup_script(self, setup_node_sh):\n with open(setup_node_sh, 'x') as setup_node:\n print(PKG_SETUP % locals(), file=setup_node)\n\n def setup_rclocal(self, rc_local):\n with open(rc_local, 'r+') as script:\n script.write(self._edit(script.read(), SETUP_SCRIPT))\n\n def _create_setup_txt(self, fname, ipadddress, nodename):\n with open(fname, 'w') as setup:\n print('nodename=%s' % nodename, file=setup)\n print('ip=%s' % ipadddress, file=setup)\n\n def _edit(self, setup_script, setup_node_sh):\n lines = [l.rstrip() for l in setup_script.splitlines()]\n if 'exit 0' in lines:\n exit_line = lines.index('exit 0')\n lines.insert(exit_line, setup_node_sh)\n else:\n lines.append(setup_node_sh)\n lines.append('exit 0')\n return '\\n'.join(lines)\n\n def _download_cfssl(self, dstdir):\n if not isdir(dstdir):\n makedirs(dstdir)\n for line in CFSSL_PROGS_SHA256.splitlines():\n if line:\n checksum, fname = line.split()\n dstfile = absjoin(dstdir, fname)\n self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' %\n fname, dstfile, checksum)\n chmod(dstfile, 493)\n\n def _download(self, url, dstfile, checksum):\n request.urlretrieve(url, dstfile)\n m = sha256()\n with open(dstfile, 'rb') as f:\n hash = m.update(f.read())\n if checksum != m.hexdigest():\n raise RuntimeError('Checksum of %s does not match!' % dstfile)\n\n @staticmethod\n def _unzip(archive, dst_image):\n with ZipFile(archive) as image_archive:\n for name in image_archive.namelist():\n if name.endswith('.img'):\n image = image_archive.extract(name, dirname(dst_image))\n if isfile(dst_image):\n unlink(dst_image)\n rename(image, dst_image)\n return dst_image\n raise RuntimeError('No image file contained in archive %s' % archive)\n\n @contextmanager\n def _mktemp(self):\n here = getcwd()\n tempdir = mkdtemp()\n try:\n chdir(tempdir)\n yield tempdir, here\n finally:\n chdir(here)\n rmtree(tempdir)\n\n @contextmanager\n def _mount(self, image):\n with self._kpartx(abspath(image)) as nodes:\n with self._mktemp() as (here, cwd):\n for d in nodes.keys():\n mkdir(d)\n boot = abspath('boot')\n system = abspath('system')\n with self._mounted(nodes['boot'], boot) as boot:\n with self._mounted(nodes['system'], system) as system:\n chdir(here)\n yield boot, system\n\n @contextmanager\n def _kpartx(self, image):\n output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image),\n universal_newlines=True)\n try:\n nodes = []\n for l in output.splitlines():\n if l:\n fields = l.split()\n nodes.append((fields[2], fields[5]))\n assert len(nodes) == 2\n nodes.sort(key=lambda t: t[1], reverse=True)\n yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': \n '/dev/mapper/%s' % nodes[1][0]}\n finally:\n check_call(('sudo', 'kpartx', '-d', '-s', image))\n\n @contextmanager\n def _mounted(self, mapping, mountpoint):\n try:\n debug('mount %s on %s' % (mapping, mountpoint))\n check_call(('sudo', 'mount', mapping, mountpoint))\n yield mountpoint\n finally:\n check_call(('sudo', 'umount', mountpoint))\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass ClusterSetup:\n\n def __call__(self, archive, node_names, targetdir, ipbase):\n targetinfo = stat(targetdir)\n with self._mktemp():\n info('Download cfssl')\n cfssldir = abspath('cfssl')\n self._download_cfssl(cfssldir)\n ipaddress = ipbase\n for name in node_names:\n node_image = absjoin(targetdir, '%s.img' % name)\n info('prepare image for node %s in %s' % (name, node_image))\n info('Unpacking archive %s' % archive)\n self._unzip(archive, node_image)\n try:\n self._prepare_node_image(node_image, name, node_names[0\n ], ipaddress, cfssldir)\n except Exception as e:\n unlink(node_image)\n raise\n chown(node_image, targetinfo.st_uid, targetinfo.st_gid)\n ipaddress = self._increment_ip(ipaddress)\n info('done')\n <function token>\n\n def _enable_ssh(self):\n debug('enable ssh in %s' % getcwd())\n with open(absjoin('boot', 'ssh'), 'w') as ssh:\n ssh.write('')\n\n def _prepare_node_image(self, image, nodename, master, ipadddress, cfssl):\n with self._mount(image):\n self._setup_nodename(master, nodename)\n self._enable_ssh()\n self._setup_cgroups()\n debug('install cfssl to %s' % absjoin('system', USR_LOCAL_BIN))\n self._copytree(cfssl, absjoin('system', USR_LOCAL_BIN))\n self._init_first_boot(ipadddress, nodename)\n\n def _copytree(self, srcdir, dstdir):\n for f in listdir(srcdir):\n copy2(absjoin(srcdir, f), dstdir)\n <function token>\n\n def _init_first_boot(self, ipadddress, nodename):\n debug('Prepare first boot in %s' % getcwd())\n with self._executable(absjoin('system', SETUP_NODE_SH)) as fname:\n self.create_setup_script(fname)\n with self._executable(absjoin('system', 'etc', 'rc.local')) as rclocal:\n self.setup_rclocal(rclocal)\n self._create_setup_txt(absjoin('boot', 'setup.txt'), ipadddress,\n nodename)\n\n def create_setup_script(self, setup_node_sh):\n with open(setup_node_sh, 'x') as setup_node:\n print(PKG_SETUP % locals(), file=setup_node)\n <function token>\n\n def _create_setup_txt(self, fname, ipadddress, nodename):\n with open(fname, 'w') as setup:\n print('nodename=%s' % nodename, file=setup)\n print('ip=%s' % ipadddress, file=setup)\n\n def _edit(self, setup_script, setup_node_sh):\n lines = [l.rstrip() for l in setup_script.splitlines()]\n if 'exit 0' in lines:\n exit_line = lines.index('exit 0')\n lines.insert(exit_line, setup_node_sh)\n else:\n lines.append(setup_node_sh)\n lines.append('exit 0')\n return '\\n'.join(lines)\n\n def _download_cfssl(self, dstdir):\n if not isdir(dstdir):\n makedirs(dstdir)\n for line in CFSSL_PROGS_SHA256.splitlines():\n if line:\n checksum, fname = line.split()\n dstfile = absjoin(dstdir, fname)\n self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' %\n fname, dstfile, checksum)\n chmod(dstfile, 493)\n\n def _download(self, url, dstfile, checksum):\n request.urlretrieve(url, dstfile)\n m = sha256()\n with open(dstfile, 'rb') as f:\n hash = m.update(f.read())\n if checksum != m.hexdigest():\n raise RuntimeError('Checksum of %s does not match!' % dstfile)\n\n @staticmethod\n def _unzip(archive, dst_image):\n with ZipFile(archive) as image_archive:\n for name in image_archive.namelist():\n if name.endswith('.img'):\n image = image_archive.extract(name, dirname(dst_image))\n if isfile(dst_image):\n unlink(dst_image)\n rename(image, dst_image)\n return dst_image\n raise RuntimeError('No image file contained in archive %s' % archive)\n\n @contextmanager\n def _mktemp(self):\n here = getcwd()\n tempdir = mkdtemp()\n try:\n chdir(tempdir)\n yield tempdir, here\n finally:\n chdir(here)\n rmtree(tempdir)\n\n @contextmanager\n def _mount(self, image):\n with self._kpartx(abspath(image)) as nodes:\n with self._mktemp() as (here, cwd):\n for d in nodes.keys():\n mkdir(d)\n boot = abspath('boot')\n system = abspath('system')\n with self._mounted(nodes['boot'], boot) as boot:\n with self._mounted(nodes['system'], system) as system:\n chdir(here)\n yield boot, system\n\n @contextmanager\n def _kpartx(self, image):\n output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image),\n universal_newlines=True)\n try:\n nodes = []\n for l in output.splitlines():\n if l:\n fields = l.split()\n nodes.append((fields[2], fields[5]))\n assert len(nodes) == 2\n nodes.sort(key=lambda t: t[1], reverse=True)\n yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': \n '/dev/mapper/%s' % nodes[1][0]}\n finally:\n check_call(('sudo', 'kpartx', '-d', '-s', image))\n\n @contextmanager\n def _mounted(self, mapping, mountpoint):\n try:\n debug('mount %s on %s' % (mapping, mountpoint))\n check_call(('sudo', 'mount', mapping, mountpoint))\n yield mountpoint\n finally:\n check_call(('sudo', 'umount', mountpoint))\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass ClusterSetup:\n\n def __call__(self, archive, node_names, targetdir, ipbase):\n targetinfo = stat(targetdir)\n with self._mktemp():\n info('Download cfssl')\n cfssldir = abspath('cfssl')\n self._download_cfssl(cfssldir)\n ipaddress = ipbase\n for name in node_names:\n node_image = absjoin(targetdir, '%s.img' % name)\n info('prepare image for node %s in %s' % (name, node_image))\n info('Unpacking archive %s' % archive)\n self._unzip(archive, node_image)\n try:\n self._prepare_node_image(node_image, name, node_names[0\n ], ipaddress, cfssldir)\n except Exception as e:\n unlink(node_image)\n raise\n chown(node_image, targetinfo.st_uid, targetinfo.st_gid)\n ipaddress = self._increment_ip(ipaddress)\n info('done')\n <function token>\n\n def _enable_ssh(self):\n debug('enable ssh in %s' % getcwd())\n with open(absjoin('boot', 'ssh'), 'w') as ssh:\n ssh.write('')\n\n def _prepare_node_image(self, image, nodename, master, ipadddress, cfssl):\n with self._mount(image):\n self._setup_nodename(master, nodename)\n self._enable_ssh()\n self._setup_cgroups()\n debug('install cfssl to %s' % absjoin('system', USR_LOCAL_BIN))\n self._copytree(cfssl, absjoin('system', USR_LOCAL_BIN))\n self._init_first_boot(ipadddress, nodename)\n\n def _copytree(self, srcdir, dstdir):\n for f in listdir(srcdir):\n copy2(absjoin(srcdir, f), dstdir)\n <function token>\n\n def _init_first_boot(self, ipadddress, nodename):\n debug('Prepare first boot in %s' % getcwd())\n with self._executable(absjoin('system', SETUP_NODE_SH)) as fname:\n self.create_setup_script(fname)\n with self._executable(absjoin('system', 'etc', 'rc.local')) as rclocal:\n self.setup_rclocal(rclocal)\n self._create_setup_txt(absjoin('boot', 'setup.txt'), ipadddress,\n nodename)\n\n def create_setup_script(self, setup_node_sh):\n with open(setup_node_sh, 'x') as setup_node:\n print(PKG_SETUP % locals(), file=setup_node)\n <function token>\n\n def _create_setup_txt(self, fname, ipadddress, nodename):\n with open(fname, 'w') as setup:\n print('nodename=%s' % nodename, file=setup)\n print('ip=%s' % ipadddress, file=setup)\n\n def _edit(self, setup_script, setup_node_sh):\n lines = [l.rstrip() for l in setup_script.splitlines()]\n if 'exit 0' in lines:\n exit_line = lines.index('exit 0')\n lines.insert(exit_line, setup_node_sh)\n else:\n lines.append(setup_node_sh)\n lines.append('exit 0')\n return '\\n'.join(lines)\n\n def _download_cfssl(self, dstdir):\n if not isdir(dstdir):\n makedirs(dstdir)\n for line in CFSSL_PROGS_SHA256.splitlines():\n if line:\n checksum, fname = line.split()\n dstfile = absjoin(dstdir, fname)\n self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' %\n fname, dstfile, checksum)\n chmod(dstfile, 493)\n\n def _download(self, url, dstfile, checksum):\n request.urlretrieve(url, dstfile)\n m = sha256()\n with open(dstfile, 'rb') as f:\n hash = m.update(f.read())\n if checksum != m.hexdigest():\n raise RuntimeError('Checksum of %s does not match!' % dstfile)\n <function token>\n\n @contextmanager\n def _mktemp(self):\n here = getcwd()\n tempdir = mkdtemp()\n try:\n chdir(tempdir)\n yield tempdir, here\n finally:\n chdir(here)\n rmtree(tempdir)\n\n @contextmanager\n def _mount(self, image):\n with self._kpartx(abspath(image)) as nodes:\n with self._mktemp() as (here, cwd):\n for d in nodes.keys():\n mkdir(d)\n boot = abspath('boot')\n system = abspath('system')\n with self._mounted(nodes['boot'], boot) as boot:\n with self._mounted(nodes['system'], system) as system:\n chdir(here)\n yield boot, system\n\n @contextmanager\n def _kpartx(self, image):\n output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image),\n universal_newlines=True)\n try:\n nodes = []\n for l in output.splitlines():\n if l:\n fields = l.split()\n nodes.append((fields[2], fields[5]))\n assert len(nodes) == 2\n nodes.sort(key=lambda t: t[1], reverse=True)\n yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': \n '/dev/mapper/%s' % nodes[1][0]}\n finally:\n check_call(('sudo', 'kpartx', '-d', '-s', image))\n\n @contextmanager\n def _mounted(self, mapping, mountpoint):\n try:\n debug('mount %s on %s' % (mapping, mountpoint))\n check_call(('sudo', 'mount', mapping, mountpoint))\n yield mountpoint\n finally:\n check_call(('sudo', 'umount', mountpoint))\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass ClusterSetup:\n\n def __call__(self, archive, node_names, targetdir, ipbase):\n targetinfo = stat(targetdir)\n with self._mktemp():\n info('Download cfssl')\n cfssldir = abspath('cfssl')\n self._download_cfssl(cfssldir)\n ipaddress = ipbase\n for name in node_names:\n node_image = absjoin(targetdir, '%s.img' % name)\n info('prepare image for node %s in %s' % (name, node_image))\n info('Unpacking archive %s' % archive)\n self._unzip(archive, node_image)\n try:\n self._prepare_node_image(node_image, name, node_names[0\n ], ipaddress, cfssldir)\n except Exception as e:\n unlink(node_image)\n raise\n chown(node_image, targetinfo.st_uid, targetinfo.st_gid)\n ipaddress = self._increment_ip(ipaddress)\n info('done')\n <function token>\n\n def _enable_ssh(self):\n debug('enable ssh in %s' % getcwd())\n with open(absjoin('boot', 'ssh'), 'w') as ssh:\n ssh.write('')\n\n def _prepare_node_image(self, image, nodename, master, ipadddress, cfssl):\n with self._mount(image):\n self._setup_nodename(master, nodename)\n self._enable_ssh()\n self._setup_cgroups()\n debug('install cfssl to %s' % absjoin('system', USR_LOCAL_BIN))\n self._copytree(cfssl, absjoin('system', USR_LOCAL_BIN))\n self._init_first_boot(ipadddress, nodename)\n <function token>\n <function token>\n\n def _init_first_boot(self, ipadddress, nodename):\n debug('Prepare first boot in %s' % getcwd())\n with self._executable(absjoin('system', SETUP_NODE_SH)) as fname:\n self.create_setup_script(fname)\n with self._executable(absjoin('system', 'etc', 'rc.local')) as rclocal:\n self.setup_rclocal(rclocal)\n self._create_setup_txt(absjoin('boot', 'setup.txt'), ipadddress,\n nodename)\n\n def create_setup_script(self, setup_node_sh):\n with open(setup_node_sh, 'x') as setup_node:\n print(PKG_SETUP % locals(), file=setup_node)\n <function token>\n\n def _create_setup_txt(self, fname, ipadddress, nodename):\n with open(fname, 'w') as setup:\n print('nodename=%s' % nodename, file=setup)\n print('ip=%s' % ipadddress, file=setup)\n\n def _edit(self, setup_script, setup_node_sh):\n lines = [l.rstrip() for l in setup_script.splitlines()]\n if 'exit 0' in lines:\n exit_line = lines.index('exit 0')\n lines.insert(exit_line, setup_node_sh)\n else:\n lines.append(setup_node_sh)\n lines.append('exit 0')\n return '\\n'.join(lines)\n\n def _download_cfssl(self, dstdir):\n if not isdir(dstdir):\n makedirs(dstdir)\n for line in CFSSL_PROGS_SHA256.splitlines():\n if line:\n checksum, fname = line.split()\n dstfile = absjoin(dstdir, fname)\n self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' %\n fname, dstfile, checksum)\n chmod(dstfile, 493)\n\n def _download(self, url, dstfile, checksum):\n request.urlretrieve(url, dstfile)\n m = sha256()\n with open(dstfile, 'rb') as f:\n hash = m.update(f.read())\n if checksum != m.hexdigest():\n raise RuntimeError('Checksum of %s does not match!' % dstfile)\n <function token>\n\n @contextmanager\n def _mktemp(self):\n here = getcwd()\n tempdir = mkdtemp()\n try:\n chdir(tempdir)\n yield tempdir, here\n finally:\n chdir(here)\n rmtree(tempdir)\n\n @contextmanager\n def _mount(self, image):\n with self._kpartx(abspath(image)) as nodes:\n with self._mktemp() as (here, cwd):\n for d in nodes.keys():\n mkdir(d)\n boot = abspath('boot')\n system = abspath('system')\n with self._mounted(nodes['boot'], boot) as boot:\n with self._mounted(nodes['system'], system) as system:\n chdir(here)\n yield boot, system\n\n @contextmanager\n def _kpartx(self, image):\n output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image),\n universal_newlines=True)\n try:\n nodes = []\n for l in output.splitlines():\n if l:\n fields = l.split()\n nodes.append((fields[2], fields[5]))\n assert len(nodes) == 2\n nodes.sort(key=lambda t: t[1], reverse=True)\n yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': \n '/dev/mapper/%s' % nodes[1][0]}\n finally:\n check_call(('sudo', 'kpartx', '-d', '-s', image))\n\n @contextmanager\n def _mounted(self, mapping, mountpoint):\n try:\n debug('mount %s on %s' % (mapping, mountpoint))\n check_call(('sudo', 'mount', mapping, mountpoint))\n yield mountpoint\n finally:\n check_call(('sudo', 'umount', mountpoint))\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass ClusterSetup:\n\n def __call__(self, archive, node_names, targetdir, ipbase):\n targetinfo = stat(targetdir)\n with self._mktemp():\n info('Download cfssl')\n cfssldir = abspath('cfssl')\n self._download_cfssl(cfssldir)\n ipaddress = ipbase\n for name in node_names:\n node_image = absjoin(targetdir, '%s.img' % name)\n info('prepare image for node %s in %s' % (name, node_image))\n info('Unpacking archive %s' % archive)\n self._unzip(archive, node_image)\n try:\n self._prepare_node_image(node_image, name, node_names[0\n ], ipaddress, cfssldir)\n except Exception as e:\n unlink(node_image)\n raise\n chown(node_image, targetinfo.st_uid, targetinfo.st_gid)\n ipaddress = self._increment_ip(ipaddress)\n info('done')\n <function token>\n\n def _enable_ssh(self):\n debug('enable ssh in %s' % getcwd())\n with open(absjoin('boot', 'ssh'), 'w') as ssh:\n ssh.write('')\n\n def _prepare_node_image(self, image, nodename, master, ipadddress, cfssl):\n with self._mount(image):\n self._setup_nodename(master, nodename)\n self._enable_ssh()\n self._setup_cgroups()\n debug('install cfssl to %s' % absjoin('system', USR_LOCAL_BIN))\n self._copytree(cfssl, absjoin('system', USR_LOCAL_BIN))\n self._init_first_boot(ipadddress, nodename)\n <function token>\n <function token>\n\n def _init_first_boot(self, ipadddress, nodename):\n debug('Prepare first boot in %s' % getcwd())\n with self._executable(absjoin('system', SETUP_NODE_SH)) as fname:\n self.create_setup_script(fname)\n with self._executable(absjoin('system', 'etc', 'rc.local')) as rclocal:\n self.setup_rclocal(rclocal)\n self._create_setup_txt(absjoin('boot', 'setup.txt'), ipadddress,\n nodename)\n\n def create_setup_script(self, setup_node_sh):\n with open(setup_node_sh, 'x') as setup_node:\n print(PKG_SETUP % locals(), file=setup_node)\n <function token>\n\n def _create_setup_txt(self, fname, ipadddress, nodename):\n with open(fname, 'w') as setup:\n print('nodename=%s' % nodename, file=setup)\n print('ip=%s' % ipadddress, file=setup)\n\n def _edit(self, setup_script, setup_node_sh):\n lines = [l.rstrip() for l in setup_script.splitlines()]\n if 'exit 0' in lines:\n exit_line = lines.index('exit 0')\n lines.insert(exit_line, setup_node_sh)\n else:\n lines.append(setup_node_sh)\n lines.append('exit 0')\n return '\\n'.join(lines)\n\n def _download_cfssl(self, dstdir):\n if not isdir(dstdir):\n makedirs(dstdir)\n for line in CFSSL_PROGS_SHA256.splitlines():\n if line:\n checksum, fname = line.split()\n dstfile = absjoin(dstdir, fname)\n self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' %\n fname, dstfile, checksum)\n chmod(dstfile, 493)\n\n def _download(self, url, dstfile, checksum):\n request.urlretrieve(url, dstfile)\n m = sha256()\n with open(dstfile, 'rb') as f:\n hash = m.update(f.read())\n if checksum != m.hexdigest():\n raise RuntimeError('Checksum of %s does not match!' % dstfile)\n <function token>\n <function token>\n\n @contextmanager\n def _mount(self, image):\n with self._kpartx(abspath(image)) as nodes:\n with self._mktemp() as (here, cwd):\n for d in nodes.keys():\n mkdir(d)\n boot = abspath('boot')\n system = abspath('system')\n with self._mounted(nodes['boot'], boot) as boot:\n with self._mounted(nodes['system'], system) as system:\n chdir(here)\n yield boot, system\n\n @contextmanager\n def _kpartx(self, image):\n output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image),\n universal_newlines=True)\n try:\n nodes = []\n for l in output.splitlines():\n if l:\n fields = l.split()\n nodes.append((fields[2], fields[5]))\n assert len(nodes) == 2\n nodes.sort(key=lambda t: t[1], reverse=True)\n yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': \n '/dev/mapper/%s' % nodes[1][0]}\n finally:\n check_call(('sudo', 'kpartx', '-d', '-s', image))\n\n @contextmanager\n def _mounted(self, mapping, mountpoint):\n try:\n debug('mount %s on %s' % (mapping, mountpoint))\n check_call(('sudo', 'mount', mapping, mountpoint))\n yield mountpoint\n finally:\n check_call(('sudo', 'umount', mountpoint))\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass ClusterSetup:\n <function token>\n <function token>\n\n def _enable_ssh(self):\n debug('enable ssh in %s' % getcwd())\n with open(absjoin('boot', 'ssh'), 'w') as ssh:\n ssh.write('')\n\n def _prepare_node_image(self, image, nodename, master, ipadddress, cfssl):\n with self._mount(image):\n self._setup_nodename(master, nodename)\n self._enable_ssh()\n self._setup_cgroups()\n debug('install cfssl to %s' % absjoin('system', USR_LOCAL_BIN))\n self._copytree(cfssl, absjoin('system', USR_LOCAL_BIN))\n self._init_first_boot(ipadddress, nodename)\n <function token>\n <function token>\n\n def _init_first_boot(self, ipadddress, nodename):\n debug('Prepare first boot in %s' % getcwd())\n with self._executable(absjoin('system', SETUP_NODE_SH)) as fname:\n self.create_setup_script(fname)\n with self._executable(absjoin('system', 'etc', 'rc.local')) as rclocal:\n self.setup_rclocal(rclocal)\n self._create_setup_txt(absjoin('boot', 'setup.txt'), ipadddress,\n nodename)\n\n def create_setup_script(self, setup_node_sh):\n with open(setup_node_sh, 'x') as setup_node:\n print(PKG_SETUP % locals(), file=setup_node)\n <function token>\n\n def _create_setup_txt(self, fname, ipadddress, nodename):\n with open(fname, 'w') as setup:\n print('nodename=%s' % nodename, file=setup)\n print('ip=%s' % ipadddress, file=setup)\n\n def _edit(self, setup_script, setup_node_sh):\n lines = [l.rstrip() for l in setup_script.splitlines()]\n if 'exit 0' in lines:\n exit_line = lines.index('exit 0')\n lines.insert(exit_line, setup_node_sh)\n else:\n lines.append(setup_node_sh)\n lines.append('exit 0')\n return '\\n'.join(lines)\n\n def _download_cfssl(self, dstdir):\n if not isdir(dstdir):\n makedirs(dstdir)\n for line in CFSSL_PROGS_SHA256.splitlines():\n if line:\n checksum, fname = line.split()\n dstfile = absjoin(dstdir, fname)\n self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' %\n fname, dstfile, checksum)\n chmod(dstfile, 493)\n\n def _download(self, url, dstfile, checksum):\n request.urlretrieve(url, dstfile)\n m = sha256()\n with open(dstfile, 'rb') as f:\n hash = m.update(f.read())\n if checksum != m.hexdigest():\n raise RuntimeError('Checksum of %s does not match!' % dstfile)\n <function token>\n <function token>\n\n @contextmanager\n def _mount(self, image):\n with self._kpartx(abspath(image)) as nodes:\n with self._mktemp() as (here, cwd):\n for d in nodes.keys():\n mkdir(d)\n boot = abspath('boot')\n system = abspath('system')\n with self._mounted(nodes['boot'], boot) as boot:\n with self._mounted(nodes['system'], system) as system:\n chdir(here)\n yield boot, system\n\n @contextmanager\n def _kpartx(self, image):\n output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image),\n universal_newlines=True)\n try:\n nodes = []\n for l in output.splitlines():\n if l:\n fields = l.split()\n nodes.append((fields[2], fields[5]))\n assert len(nodes) == 2\n nodes.sort(key=lambda t: t[1], reverse=True)\n yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': \n '/dev/mapper/%s' % nodes[1][0]}\n finally:\n check_call(('sudo', 'kpartx', '-d', '-s', image))\n\n @contextmanager\n def _mounted(self, mapping, mountpoint):\n try:\n debug('mount %s on %s' % (mapping, mountpoint))\n check_call(('sudo', 'mount', mapping, mountpoint))\n yield mountpoint\n finally:\n check_call(('sudo', 'umount', mountpoint))\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass ClusterSetup:\n <function token>\n <function token>\n\n def _enable_ssh(self):\n debug('enable ssh in %s' % getcwd())\n with open(absjoin('boot', 'ssh'), 'w') as ssh:\n ssh.write('')\n <function token>\n <function token>\n <function token>\n\n def _init_first_boot(self, ipadddress, nodename):\n debug('Prepare first boot in %s' % getcwd())\n with self._executable(absjoin('system', SETUP_NODE_SH)) as fname:\n self.create_setup_script(fname)\n with self._executable(absjoin('system', 'etc', 'rc.local')) as rclocal:\n self.setup_rclocal(rclocal)\n self._create_setup_txt(absjoin('boot', 'setup.txt'), ipadddress,\n nodename)\n\n def create_setup_script(self, setup_node_sh):\n with open(setup_node_sh, 'x') as setup_node:\n print(PKG_SETUP % locals(), file=setup_node)\n <function token>\n\n def _create_setup_txt(self, fname, ipadddress, nodename):\n with open(fname, 'w') as setup:\n print('nodename=%s' % nodename, file=setup)\n print('ip=%s' % ipadddress, file=setup)\n\n def _edit(self, setup_script, setup_node_sh):\n lines = [l.rstrip() for l in setup_script.splitlines()]\n if 'exit 0' in lines:\n exit_line = lines.index('exit 0')\n lines.insert(exit_line, setup_node_sh)\n else:\n lines.append(setup_node_sh)\n lines.append('exit 0')\n return '\\n'.join(lines)\n\n def _download_cfssl(self, dstdir):\n if not isdir(dstdir):\n makedirs(dstdir)\n for line in CFSSL_PROGS_SHA256.splitlines():\n if line:\n checksum, fname = line.split()\n dstfile = absjoin(dstdir, fname)\n self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' %\n fname, dstfile, checksum)\n chmod(dstfile, 493)\n\n def _download(self, url, dstfile, checksum):\n request.urlretrieve(url, dstfile)\n m = sha256()\n with open(dstfile, 'rb') as f:\n hash = m.update(f.read())\n if checksum != m.hexdigest():\n raise RuntimeError('Checksum of %s does not match!' % dstfile)\n <function token>\n <function token>\n\n @contextmanager\n def _mount(self, image):\n with self._kpartx(abspath(image)) as nodes:\n with self._mktemp() as (here, cwd):\n for d in nodes.keys():\n mkdir(d)\n boot = abspath('boot')\n system = abspath('system')\n with self._mounted(nodes['boot'], boot) as boot:\n with self._mounted(nodes['system'], system) as system:\n chdir(here)\n yield boot, system\n\n @contextmanager\n def _kpartx(self, image):\n output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image),\n universal_newlines=True)\n try:\n nodes = []\n for l in output.splitlines():\n if l:\n fields = l.split()\n nodes.append((fields[2], fields[5]))\n assert len(nodes) == 2\n nodes.sort(key=lambda t: t[1], reverse=True)\n yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': \n '/dev/mapper/%s' % nodes[1][0]}\n finally:\n check_call(('sudo', 'kpartx', '-d', '-s', image))\n\n @contextmanager\n def _mounted(self, mapping, mountpoint):\n try:\n debug('mount %s on %s' % (mapping, mountpoint))\n check_call(('sudo', 'mount', mapping, mountpoint))\n yield mountpoint\n finally:\n check_call(('sudo', 'umount', mountpoint))\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass ClusterSetup:\n <function token>\n <function token>\n\n def _enable_ssh(self):\n debug('enable ssh in %s' % getcwd())\n with open(absjoin('boot', 'ssh'), 'w') as ssh:\n ssh.write('')\n <function token>\n <function token>\n <function token>\n\n def _init_first_boot(self, ipadddress, nodename):\n debug('Prepare first boot in %s' % getcwd())\n with self._executable(absjoin('system', SETUP_NODE_SH)) as fname:\n self.create_setup_script(fname)\n with self._executable(absjoin('system', 'etc', 'rc.local')) as rclocal:\n self.setup_rclocal(rclocal)\n self._create_setup_txt(absjoin('boot', 'setup.txt'), ipadddress,\n nodename)\n\n def create_setup_script(self, setup_node_sh):\n with open(setup_node_sh, 'x') as setup_node:\n print(PKG_SETUP % locals(), file=setup_node)\n <function token>\n\n def _create_setup_txt(self, fname, ipadddress, nodename):\n with open(fname, 'w') as setup:\n print('nodename=%s' % nodename, file=setup)\n print('ip=%s' % ipadddress, file=setup)\n\n def _edit(self, setup_script, setup_node_sh):\n lines = [l.rstrip() for l in setup_script.splitlines()]\n if 'exit 0' in lines:\n exit_line = lines.index('exit 0')\n lines.insert(exit_line, setup_node_sh)\n else:\n lines.append(setup_node_sh)\n lines.append('exit 0')\n return '\\n'.join(lines)\n\n def _download_cfssl(self, dstdir):\n if not isdir(dstdir):\n makedirs(dstdir)\n for line in CFSSL_PROGS_SHA256.splitlines():\n if line:\n checksum, fname = line.split()\n dstfile = absjoin(dstdir, fname)\n self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' %\n fname, dstfile, checksum)\n chmod(dstfile, 493)\n <function token>\n <function token>\n <function token>\n\n @contextmanager\n def _mount(self, image):\n with self._kpartx(abspath(image)) as nodes:\n with self._mktemp() as (here, cwd):\n for d in nodes.keys():\n mkdir(d)\n boot = abspath('boot')\n system = abspath('system')\n with self._mounted(nodes['boot'], boot) as boot:\n with self._mounted(nodes['system'], system) as system:\n chdir(here)\n yield boot, system\n\n @contextmanager\n def _kpartx(self, image):\n output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image),\n universal_newlines=True)\n try:\n nodes = []\n for l in output.splitlines():\n if l:\n fields = l.split()\n nodes.append((fields[2], fields[5]))\n assert len(nodes) == 2\n nodes.sort(key=lambda t: t[1], reverse=True)\n yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': \n '/dev/mapper/%s' % nodes[1][0]}\n finally:\n check_call(('sudo', 'kpartx', '-d', '-s', image))\n\n @contextmanager\n def _mounted(self, mapping, mountpoint):\n try:\n debug('mount %s on %s' % (mapping, mountpoint))\n check_call(('sudo', 'mount', mapping, mountpoint))\n yield mountpoint\n finally:\n check_call(('sudo', 'umount', mountpoint))\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass ClusterSetup:\n <function token>\n <function token>\n\n def _enable_ssh(self):\n debug('enable ssh in %s' % getcwd())\n with open(absjoin('boot', 'ssh'), 'w') as ssh:\n ssh.write('')\n <function token>\n <function token>\n <function token>\n <function token>\n\n def create_setup_script(self, setup_node_sh):\n with open(setup_node_sh, 'x') as setup_node:\n print(PKG_SETUP % locals(), file=setup_node)\n <function token>\n\n def _create_setup_txt(self, fname, ipadddress, nodename):\n with open(fname, 'w') as setup:\n print('nodename=%s' % nodename, file=setup)\n print('ip=%s' % ipadddress, file=setup)\n\n def _edit(self, setup_script, setup_node_sh):\n lines = [l.rstrip() for l in setup_script.splitlines()]\n if 'exit 0' in lines:\n exit_line = lines.index('exit 0')\n lines.insert(exit_line, setup_node_sh)\n else:\n lines.append(setup_node_sh)\n lines.append('exit 0')\n return '\\n'.join(lines)\n\n def _download_cfssl(self, dstdir):\n if not isdir(dstdir):\n makedirs(dstdir)\n for line in CFSSL_PROGS_SHA256.splitlines():\n if line:\n checksum, fname = line.split()\n dstfile = absjoin(dstdir, fname)\n self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' %\n fname, dstfile, checksum)\n chmod(dstfile, 493)\n <function token>\n <function token>\n <function token>\n\n @contextmanager\n def _mount(self, image):\n with self._kpartx(abspath(image)) as nodes:\n with self._mktemp() as (here, cwd):\n for d in nodes.keys():\n mkdir(d)\n boot = abspath('boot')\n system = abspath('system')\n with self._mounted(nodes['boot'], boot) as boot:\n with self._mounted(nodes['system'], system) as system:\n chdir(here)\n yield boot, system\n\n @contextmanager\n def _kpartx(self, image):\n output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image),\n universal_newlines=True)\n try:\n nodes = []\n for l in output.splitlines():\n if l:\n fields = l.split()\n nodes.append((fields[2], fields[5]))\n assert len(nodes) == 2\n nodes.sort(key=lambda t: t[1], reverse=True)\n yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': \n '/dev/mapper/%s' % nodes[1][0]}\n finally:\n check_call(('sudo', 'kpartx', '-d', '-s', image))\n\n @contextmanager\n def _mounted(self, mapping, mountpoint):\n try:\n debug('mount %s on %s' % (mapping, mountpoint))\n check_call(('sudo', 'mount', mapping, mountpoint))\n yield mountpoint\n finally:\n check_call(('sudo', 'umount', mountpoint))\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass ClusterSetup:\n <function token>\n <function token>\n\n def _enable_ssh(self):\n debug('enable ssh in %s' % getcwd())\n with open(absjoin('boot', 'ssh'), 'w') as ssh:\n ssh.write('')\n <function token>\n <function token>\n <function token>\n <function token>\n\n def create_setup_script(self, setup_node_sh):\n with open(setup_node_sh, 'x') as setup_node:\n print(PKG_SETUP % locals(), file=setup_node)\n <function token>\n\n def _create_setup_txt(self, fname, ipadddress, nodename):\n with open(fname, 'w') as setup:\n print('nodename=%s' % nodename, file=setup)\n print('ip=%s' % ipadddress, file=setup)\n <function token>\n\n def _download_cfssl(self, dstdir):\n if not isdir(dstdir):\n makedirs(dstdir)\n for line in CFSSL_PROGS_SHA256.splitlines():\n if line:\n checksum, fname = line.split()\n dstfile = absjoin(dstdir, fname)\n self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' %\n fname, dstfile, checksum)\n chmod(dstfile, 493)\n <function token>\n <function token>\n <function token>\n\n @contextmanager\n def _mount(self, image):\n with self._kpartx(abspath(image)) as nodes:\n with self._mktemp() as (here, cwd):\n for d in nodes.keys():\n mkdir(d)\n boot = abspath('boot')\n system = abspath('system')\n with self._mounted(nodes['boot'], boot) as boot:\n with self._mounted(nodes['system'], system) as system:\n chdir(here)\n yield boot, system\n\n @contextmanager\n def _kpartx(self, image):\n output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image),\n universal_newlines=True)\n try:\n nodes = []\n for l in output.splitlines():\n if l:\n fields = l.split()\n nodes.append((fields[2], fields[5]))\n assert len(nodes) == 2\n nodes.sort(key=lambda t: t[1], reverse=True)\n yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': \n '/dev/mapper/%s' % nodes[1][0]}\n finally:\n check_call(('sudo', 'kpartx', '-d', '-s', image))\n\n @contextmanager\n def _mounted(self, mapping, mountpoint):\n try:\n debug('mount %s on %s' % (mapping, mountpoint))\n check_call(('sudo', 'mount', mapping, mountpoint))\n yield mountpoint\n finally:\n check_call(('sudo', 'umount', mountpoint))\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass ClusterSetup:\n <function token>\n <function token>\n\n def _enable_ssh(self):\n debug('enable ssh in %s' % getcwd())\n with open(absjoin('boot', 'ssh'), 'w') as ssh:\n ssh.write('')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _create_setup_txt(self, fname, ipadddress, nodename):\n with open(fname, 'w') as setup:\n print('nodename=%s' % nodename, file=setup)\n print('ip=%s' % ipadddress, file=setup)\n <function token>\n\n def _download_cfssl(self, dstdir):\n if not isdir(dstdir):\n makedirs(dstdir)\n for line in CFSSL_PROGS_SHA256.splitlines():\n if line:\n checksum, fname = line.split()\n dstfile = absjoin(dstdir, fname)\n self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' %\n fname, dstfile, checksum)\n chmod(dstfile, 493)\n <function token>\n <function token>\n <function token>\n\n @contextmanager\n def _mount(self, image):\n with self._kpartx(abspath(image)) as nodes:\n with self._mktemp() as (here, cwd):\n for d in nodes.keys():\n mkdir(d)\n boot = abspath('boot')\n system = abspath('system')\n with self._mounted(nodes['boot'], boot) as boot:\n with self._mounted(nodes['system'], system) as system:\n chdir(here)\n yield boot, system\n\n @contextmanager\n def _kpartx(self, image):\n output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image),\n universal_newlines=True)\n try:\n nodes = []\n for l in output.splitlines():\n if l:\n fields = l.split()\n nodes.append((fields[2], fields[5]))\n assert len(nodes) == 2\n nodes.sort(key=lambda t: t[1], reverse=True)\n yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': \n '/dev/mapper/%s' % nodes[1][0]}\n finally:\n check_call(('sudo', 'kpartx', '-d', '-s', image))\n\n @contextmanager\n def _mounted(self, mapping, mountpoint):\n try:\n debug('mount %s on %s' % (mapping, mountpoint))\n check_call(('sudo', 'mount', mapping, mountpoint))\n yield mountpoint\n finally:\n check_call(('sudo', 'umount', mountpoint))\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass ClusterSetup:\n <function token>\n <function token>\n\n def _enable_ssh(self):\n debug('enable ssh in %s' % getcwd())\n with open(absjoin('boot', 'ssh'), 'w') as ssh:\n ssh.write('')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _create_setup_txt(self, fname, ipadddress, nodename):\n with open(fname, 'w') as setup:\n print('nodename=%s' % nodename, file=setup)\n print('ip=%s' % ipadddress, file=setup)\n <function token>\n\n def _download_cfssl(self, dstdir):\n if not isdir(dstdir):\n makedirs(dstdir)\n for line in CFSSL_PROGS_SHA256.splitlines():\n if line:\n checksum, fname = line.split()\n dstfile = absjoin(dstdir, fname)\n self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' %\n fname, dstfile, checksum)\n chmod(dstfile, 493)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @contextmanager\n def _kpartx(self, image):\n output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image),\n universal_newlines=True)\n try:\n nodes = []\n for l in output.splitlines():\n if l:\n fields = l.split()\n nodes.append((fields[2], fields[5]))\n assert len(nodes) == 2\n nodes.sort(key=lambda t: t[1], reverse=True)\n yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': \n '/dev/mapper/%s' % nodes[1][0]}\n finally:\n check_call(('sudo', 'kpartx', '-d', '-s', image))\n\n @contextmanager\n def _mounted(self, mapping, mountpoint):\n try:\n debug('mount %s on %s' % (mapping, mountpoint))\n check_call(('sudo', 'mount', mapping, mountpoint))\n yield mountpoint\n finally:\n check_call(('sudo', 'umount', mountpoint))\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass ClusterSetup:\n <function token>\n <function token>\n\n def _enable_ssh(self):\n debug('enable ssh in %s' % getcwd())\n with open(absjoin('boot', 'ssh'), 'w') as ssh:\n ssh.write('')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _create_setup_txt(self, fname, ipadddress, nodename):\n with open(fname, 'w') as setup:\n print('nodename=%s' % nodename, file=setup)\n print('ip=%s' % ipadddress, file=setup)\n <function token>\n\n def _download_cfssl(self, dstdir):\n if not isdir(dstdir):\n makedirs(dstdir)\n for line in CFSSL_PROGS_SHA256.splitlines():\n if line:\n checksum, fname = line.split()\n dstfile = absjoin(dstdir, fname)\n self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' %\n fname, dstfile, checksum)\n chmod(dstfile, 493)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @contextmanager\n def _mounted(self, mapping, mountpoint):\n try:\n debug('mount %s on %s' % (mapping, mountpoint))\n check_call(('sudo', 'mount', mapping, mountpoint))\n yield mountpoint\n finally:\n check_call(('sudo', 'umount', mountpoint))\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass ClusterSetup:\n <function token>\n <function token>\n\n def _enable_ssh(self):\n debug('enable ssh in %s' % getcwd())\n with open(absjoin('boot', 'ssh'), 'w') as ssh:\n ssh.write('')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _download_cfssl(self, dstdir):\n if not isdir(dstdir):\n makedirs(dstdir)\n for line in CFSSL_PROGS_SHA256.splitlines():\n if line:\n checksum, fname = line.split()\n dstfile = absjoin(dstdir, fname)\n self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' %\n fname, dstfile, checksum)\n chmod(dstfile, 493)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @contextmanager\n def _mounted(self, mapping, mountpoint):\n try:\n debug('mount %s on %s' % (mapping, mountpoint))\n check_call(('sudo', 'mount', mapping, mountpoint))\n yield mountpoint\n finally:\n check_call(('sudo', 'umount', mountpoint))\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass ClusterSetup:\n <function token>\n <function token>\n\n def _enable_ssh(self):\n debug('enable ssh in %s' % getcwd())\n with open(absjoin('boot', 'ssh'), 'w') as ssh:\n ssh.write('')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @contextmanager\n def _mounted(self, mapping, mountpoint):\n try:\n debug('mount %s on %s' % (mapping, mountpoint))\n check_call(('sudo', 'mount', mapping, mountpoint))\n yield mountpoint\n finally:\n check_call(('sudo', 'umount', mountpoint))\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass ClusterSetup:\n <function token>\n <function token>\n\n def _enable_ssh(self):\n debug('enable ssh in %s' % getcwd())\n with open(absjoin('boot', 'ssh'), 'w') as ssh:\n ssh.write('')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\nclass ClusterSetup:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n<class token>\n<function token>\n<function token>\n<code token>\n"
] | false |
99,668 |
25712c926ee372f8cf971e388a1764bc9b9ce550
|
word = input()
ans = True
length = len(word)
if length % 2 == 0:
string=""
pal1 = list(word[0:(length//2)])
pal1.reverse()
pal2 = list(word[length//2:])
if pal1 == pal2:
ans = True
else:
ans = False
else:
pal1 = list(word[0:(length//2)+1])
pal1.reverse()
pal2 = list(word[length//2:])
if pal1 == pal2:
ans = True
else:
ans = False
if ans == True:
print("Yes")
else:
print("No")
|
[
"word = input()\nans = True\nlength = len(word)\nif length % 2 == 0:\n string=\"\"\n pal1 = list(word[0:(length//2)])\n pal1.reverse()\n pal2 = list(word[length//2:])\n if pal1 == pal2:\n ans = True\n else:\n ans = False\n \nelse:\n pal1 = list(word[0:(length//2)+1])\n pal1.reverse()\n pal2 = list(word[length//2:])\n if pal1 == pal2:\n ans = True\n else:\n ans = False\n\n\nif ans == True:\n print(\"Yes\")\nelse:\n print(\"No\")\n",
"word = input()\nans = True\nlength = len(word)\nif length % 2 == 0:\n string = ''\n pal1 = list(word[0:length // 2])\n pal1.reverse()\n pal2 = list(word[length // 2:])\n if pal1 == pal2:\n ans = True\n else:\n ans = False\nelse:\n pal1 = list(word[0:length // 2 + 1])\n pal1.reverse()\n pal2 = list(word[length // 2:])\n if pal1 == pal2:\n ans = True\n else:\n ans = False\nif ans == True:\n print('Yes')\nelse:\n print('No')\n",
"<assignment token>\nif length % 2 == 0:\n string = ''\n pal1 = list(word[0:length // 2])\n pal1.reverse()\n pal2 = list(word[length // 2:])\n if pal1 == pal2:\n ans = True\n else:\n ans = False\nelse:\n pal1 = list(word[0:length // 2 + 1])\n pal1.reverse()\n pal2 = list(word[length // 2:])\n if pal1 == pal2:\n ans = True\n else:\n ans = False\nif ans == True:\n print('Yes')\nelse:\n print('No')\n",
"<assignment token>\n<code token>\n"
] | false |
99,669 |
371d37a2f133021412ac966e7e6bd23d4ef9c322
|
from typing import Tuple
from utils.criteria import least_confidence, entropy, margin_sampling
import numpy as np
def get_high_confidence_samples(pred_prob: np.ndarray,
delta: float) -> Tuple[np.ndarray, np.ndarray]:
"""
Select high confidence samples from `D^U` whose entropy is smaller than
the threshold
`delta`.
Parameters
----------
pred_prob : np.ndarray
prediction probability of x_i with dimension (batch x n_class)
delta : float
threshold
Returns
-------
np.array with dimension (K x 1) containing the indices of the K
most informative samples.
np.array with dimension (K x 1) containing the predicted classes of the
k most informative samples
"""
_, eni = entropy(pred_prob=pred_prob, k=len(pred_prob))
hcs = eni[eni[:, 2] < delta]
return hcs[:, 0].astype(np.int32), hcs[:, 1].astype(np.int32)
def get_uncertain_samples(pred_prob: np.ndarray, k: int,
criteria: str) -> Tuple[np.ndarray, np.ndarray]:
"""
Get the K most informative samples based on the criteria
Parameters
----------
pred_prob : np.ndarray
prediction probability of x_i with dimension (batch x n_class)
k: int
criteria: str
`cl` : least_confidence()
`ms` : margin_sampling()
`en` : entropy
Returns
-------
tuple(np.ndarray, np.ndarray)
"""
if criteria == 'cl':
uncertain_samples = least_confidence(pred_prob=pred_prob, k=k)
elif criteria == 'ms':
uncertain_samples = margin_sampling(pred_prob=pred_prob, k=k)
elif criteria == 'en':
uncertain_samples = entropy(pred_prob=pred_prob, k=k)
else:
raise ValueError('criteria {} not found !'.format(criteria))
return uncertain_samples
|
[
"from typing import Tuple\n\nfrom utils.criteria import least_confidence, entropy, margin_sampling\nimport numpy as np\n\n\ndef get_high_confidence_samples(pred_prob: np.ndarray,\n delta: float) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Select high confidence samples from `D^U` whose entropy is smaller than\n the threshold\n `delta`.\n\n Parameters\n ----------\n pred_prob : np.ndarray\n prediction probability of x_i with dimension (batch x n_class)\n delta : float\n threshold\n\n Returns\n -------\n np.array with dimension (K x 1) containing the indices of the K\n most informative samples.\n np.array with dimension (K x 1) containing the predicted classes of the\n k most informative samples\n \"\"\"\n _, eni = entropy(pred_prob=pred_prob, k=len(pred_prob))\n hcs = eni[eni[:, 2] < delta]\n return hcs[:, 0].astype(np.int32), hcs[:, 1].astype(np.int32)\n\n\ndef get_uncertain_samples(pred_prob: np.ndarray, k: int,\n criteria: str) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Get the K most informative samples based on the criteria\n Parameters\n ----------\n pred_prob : np.ndarray\n prediction probability of x_i with dimension (batch x n_class)\n k: int\n criteria: str\n `cl` : least_confidence()\n `ms` : margin_sampling()\n `en` : entropy\n\n Returns\n -------\n tuple(np.ndarray, np.ndarray)\n \"\"\"\n if criteria == 'cl':\n uncertain_samples = least_confidence(pred_prob=pred_prob, k=k)\n elif criteria == 'ms':\n uncertain_samples = margin_sampling(pred_prob=pred_prob, k=k)\n elif criteria == 'en':\n uncertain_samples = entropy(pred_prob=pred_prob, k=k)\n else:\n raise ValueError('criteria {} not found !'.format(criteria))\n return uncertain_samples\n",
"from typing import Tuple\nfrom utils.criteria import least_confidence, entropy, margin_sampling\nimport numpy as np\n\n\ndef get_high_confidence_samples(pred_prob: np.ndarray, delta: float) ->Tuple[\n np.ndarray, np.ndarray]:\n \"\"\"\n Select high confidence samples from `D^U` whose entropy is smaller than\n the threshold\n `delta`.\n\n Parameters\n ----------\n pred_prob : np.ndarray\n prediction probability of x_i with dimension (batch x n_class)\n delta : float\n threshold\n\n Returns\n -------\n np.array with dimension (K x 1) containing the indices of the K\n most informative samples.\n np.array with dimension (K x 1) containing the predicted classes of the\n k most informative samples\n \"\"\"\n _, eni = entropy(pred_prob=pred_prob, k=len(pred_prob))\n hcs = eni[eni[:, 2] < delta]\n return hcs[:, 0].astype(np.int32), hcs[:, 1].astype(np.int32)\n\n\ndef get_uncertain_samples(pred_prob: np.ndarray, k: int, criteria: str\n ) ->Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Get the K most informative samples based on the criteria\n Parameters\n ----------\n pred_prob : np.ndarray\n prediction probability of x_i with dimension (batch x n_class)\n k: int\n criteria: str\n `cl` : least_confidence()\n `ms` : margin_sampling()\n `en` : entropy\n\n Returns\n -------\n tuple(np.ndarray, np.ndarray)\n \"\"\"\n if criteria == 'cl':\n uncertain_samples = least_confidence(pred_prob=pred_prob, k=k)\n elif criteria == 'ms':\n uncertain_samples = margin_sampling(pred_prob=pred_prob, k=k)\n elif criteria == 'en':\n uncertain_samples = entropy(pred_prob=pred_prob, k=k)\n else:\n raise ValueError('criteria {} not found !'.format(criteria))\n return uncertain_samples\n",
"<import token>\n\n\ndef get_high_confidence_samples(pred_prob: np.ndarray, delta: float) ->Tuple[\n np.ndarray, np.ndarray]:\n \"\"\"\n Select high confidence samples from `D^U` whose entropy is smaller than\n the threshold\n `delta`.\n\n Parameters\n ----------\n pred_prob : np.ndarray\n prediction probability of x_i with dimension (batch x n_class)\n delta : float\n threshold\n\n Returns\n -------\n np.array with dimension (K x 1) containing the indices of the K\n most informative samples.\n np.array with dimension (K x 1) containing the predicted classes of the\n k most informative samples\n \"\"\"\n _, eni = entropy(pred_prob=pred_prob, k=len(pred_prob))\n hcs = eni[eni[:, 2] < delta]\n return hcs[:, 0].astype(np.int32), hcs[:, 1].astype(np.int32)\n\n\ndef get_uncertain_samples(pred_prob: np.ndarray, k: int, criteria: str\n ) ->Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Get the K most informative samples based on the criteria\n Parameters\n ----------\n pred_prob : np.ndarray\n prediction probability of x_i with dimension (batch x n_class)\n k: int\n criteria: str\n `cl` : least_confidence()\n `ms` : margin_sampling()\n `en` : entropy\n\n Returns\n -------\n tuple(np.ndarray, np.ndarray)\n \"\"\"\n if criteria == 'cl':\n uncertain_samples = least_confidence(pred_prob=pred_prob, k=k)\n elif criteria == 'ms':\n uncertain_samples = margin_sampling(pred_prob=pred_prob, k=k)\n elif criteria == 'en':\n uncertain_samples = entropy(pred_prob=pred_prob, k=k)\n else:\n raise ValueError('criteria {} not found !'.format(criteria))\n return uncertain_samples\n",
"<import token>\n<function token>\n\n\ndef get_uncertain_samples(pred_prob: np.ndarray, k: int, criteria: str\n ) ->Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Get the K most informative samples based on the criteria\n Parameters\n ----------\n pred_prob : np.ndarray\n prediction probability of x_i with dimension (batch x n_class)\n k: int\n criteria: str\n `cl` : least_confidence()\n `ms` : margin_sampling()\n `en` : entropy\n\n Returns\n -------\n tuple(np.ndarray, np.ndarray)\n \"\"\"\n if criteria == 'cl':\n uncertain_samples = least_confidence(pred_prob=pred_prob, k=k)\n elif criteria == 'ms':\n uncertain_samples = margin_sampling(pred_prob=pred_prob, k=k)\n elif criteria == 'en':\n uncertain_samples = entropy(pred_prob=pred_prob, k=k)\n else:\n raise ValueError('criteria {} not found !'.format(criteria))\n return uncertain_samples\n",
"<import token>\n<function token>\n<function token>\n"
] | false |
99,670 |
2f45122a288f94a6f59997704b3ae16f557b266a
|
# coding=utf8
import requests
import json
import unittest
class CaseLogin(unittest.TestCase):
def setUp(self):
self.url = "http://182.61.33.241:8089/app/api/public/1.0/open/login"
def test_01_login(self):
response = requests.post(url=self.url, data=json.dumps({'operId': '15616699600', 'operPwd': '123456'}),
headers={'Content-Type': 'application/json'})
print(self.url)
print(response.status_code)
result = response.json()
print("打印登录结果:", result)
self.token = result["data"]["token"]
print("登录返回的token值:", self.token)
def test_02_home(self):
response = requests.get(url="http://182.61.33.241:8089/app/api/private/1.0/homePage/index",
headers={'Authorization':'Bearer '+self.token,'Content-Type': 'application/json'})
print("首页获取到的token值:", self.token)
print(response.status_code)
result = response.json()
return result
if __name__ == '__main__':
unittest.main()
|
[
"# coding=utf8\nimport requests\nimport json\nimport unittest\n\nclass CaseLogin(unittest.TestCase):\n def setUp(self):\n self.url = \"http://182.61.33.241:8089/app/api/public/1.0/open/login\"\n\n def test_01_login(self):\n response = requests.post(url=self.url, data=json.dumps({'operId': '15616699600', 'operPwd': '123456'}),\n headers={'Content-Type': 'application/json'})\n print(self.url)\n print(response.status_code)\n result = response.json()\n print(\"打印登录结果:\", result)\n self.token = result[\"data\"][\"token\"]\n print(\"登录返回的token值:\", self.token)\n\n def test_02_home(self):\n response = requests.get(url=\"http://182.61.33.241:8089/app/api/private/1.0/homePage/index\",\n headers={'Authorization':'Bearer '+self.token,'Content-Type': 'application/json'})\n print(\"首页获取到的token值:\", self.token)\n print(response.status_code)\n result = response.json()\n return result\n\nif __name__ == '__main__':\n unittest.main()\n\n\n\n\n\n",
"import requests\nimport json\nimport unittest\n\n\nclass CaseLogin(unittest.TestCase):\n\n def setUp(self):\n self.url = 'http://182.61.33.241:8089/app/api/public/1.0/open/login'\n\n def test_01_login(self):\n response = requests.post(url=self.url, data=json.dumps({'operId':\n '15616699600', 'operPwd': '123456'}), headers={'Content-Type':\n 'application/json'})\n print(self.url)\n print(response.status_code)\n result = response.json()\n print('打印登录结果:', result)\n self.token = result['data']['token']\n print('登录返回的token值:', self.token)\n\n def test_02_home(self):\n response = requests.get(url=\n 'http://182.61.33.241:8089/app/api/private/1.0/homePage/index',\n headers={'Authorization': 'Bearer ' + self.token,\n 'Content-Type': 'application/json'})\n print('首页获取到的token值:', self.token)\n print(response.status_code)\n result = response.json()\n return result\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<import token>\n\n\nclass CaseLogin(unittest.TestCase):\n\n def setUp(self):\n self.url = 'http://182.61.33.241:8089/app/api/public/1.0/open/login'\n\n def test_01_login(self):\n response = requests.post(url=self.url, data=json.dumps({'operId':\n '15616699600', 'operPwd': '123456'}), headers={'Content-Type':\n 'application/json'})\n print(self.url)\n print(response.status_code)\n result = response.json()\n print('打印登录结果:', result)\n self.token = result['data']['token']\n print('登录返回的token值:', self.token)\n\n def test_02_home(self):\n response = requests.get(url=\n 'http://182.61.33.241:8089/app/api/private/1.0/homePage/index',\n headers={'Authorization': 'Bearer ' + self.token,\n 'Content-Type': 'application/json'})\n print('首页获取到的token值:', self.token)\n print(response.status_code)\n result = response.json()\n return result\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<import token>\n\n\nclass CaseLogin(unittest.TestCase):\n\n def setUp(self):\n self.url = 'http://182.61.33.241:8089/app/api/public/1.0/open/login'\n\n def test_01_login(self):\n response = requests.post(url=self.url, data=json.dumps({'operId':\n '15616699600', 'operPwd': '123456'}), headers={'Content-Type':\n 'application/json'})\n print(self.url)\n print(response.status_code)\n result = response.json()\n print('打印登录结果:', result)\n self.token = result['data']['token']\n print('登录返回的token值:', self.token)\n\n def test_02_home(self):\n response = requests.get(url=\n 'http://182.61.33.241:8089/app/api/private/1.0/homePage/index',\n headers={'Authorization': 'Bearer ' + self.token,\n 'Content-Type': 'application/json'})\n print('首页获取到的token值:', self.token)\n print(response.status_code)\n result = response.json()\n return result\n\n\n<code token>\n",
"<import token>\n\n\nclass CaseLogin(unittest.TestCase):\n <function token>\n\n def test_01_login(self):\n response = requests.post(url=self.url, data=json.dumps({'operId':\n '15616699600', 'operPwd': '123456'}), headers={'Content-Type':\n 'application/json'})\n print(self.url)\n print(response.status_code)\n result = response.json()\n print('打印登录结果:', result)\n self.token = result['data']['token']\n print('登录返回的token值:', self.token)\n\n def test_02_home(self):\n response = requests.get(url=\n 'http://182.61.33.241:8089/app/api/private/1.0/homePage/index',\n headers={'Authorization': 'Bearer ' + self.token,\n 'Content-Type': 'application/json'})\n print('首页获取到的token值:', self.token)\n print(response.status_code)\n result = response.json()\n return result\n\n\n<code token>\n",
"<import token>\n\n\nclass CaseLogin(unittest.TestCase):\n <function token>\n <function token>\n\n def test_02_home(self):\n response = requests.get(url=\n 'http://182.61.33.241:8089/app/api/private/1.0/homePage/index',\n headers={'Authorization': 'Bearer ' + self.token,\n 'Content-Type': 'application/json'})\n print('首页获取到的token值:', self.token)\n print(response.status_code)\n result = response.json()\n return result\n\n\n<code token>\n",
"<import token>\n\n\nclass CaseLogin(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n<code token>\n"
] | false |
99,671 |
caf934de559ceac760d0a70173b71ebcf86ff7bc
|
"""
Sub-package with modules related to the supported univariate distributions.
None of the modules are exposed, they are used internally
by the UnivariateInput class.
"""
|
[
"\"\"\"\nSub-package with modules related to the supported univariate distributions.\n\nNone of the modules are exposed, they are used internally\nby the UnivariateInput class.\n\"\"\"\n",
"<docstring token>\n"
] | false |
99,672 |
31ec0ee6beb79f0fe7c6ad528717405c7e8a25a8
|
#
# @lc app=leetcode.cn id=15 lang=python3
#
# [15] 三数之和
# 双指针法:当我们需要枚举数组中的两个元素时,如果我们发现随着第一个元素的递增,第二个元素是递减的,那么就可以使用双指针的方法
# @lc code=start
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
nums.sort()
numsLen = len(nums)
ans = []
# 枚举 a
for first in range(numsLen):
if nums[first]>0:
return ans
# 跳过相邻重复元素!
if first>0 and nums[first]==nums[first-1]:
continue
#target: -nums[first] 是定值,在这个定值之上,使用双指针法,由于
#排序之后,L增大,R必然要减小
L = first+1
R = numsLen - 1
while(L<R):
if(nums[first]+nums[R]+nums[L]==0):
ans.append([nums[first],nums[L],nums[R]])
# 去除重复解
while(L<R and nums[L]==nums[L+1]):
L+=1
while(L<R and nums[R]==nums[R-1]):
R-=1
L+=1
R-=1
elif nums[first]+nums[L]+nums[R]>0:
R=R-1
else:
L=L+1
return ans
# @lc code=end
|
[
"#\n# @lc app=leetcode.cn id=15 lang=python3\n#\n# [15] 三数之和\n# 双指针法:当我们需要枚举数组中的两个元素时,如果我们发现随着第一个元素的递增,第二个元素是递减的,那么就可以使用双指针的方法\n\n# @lc code=start\nclass Solution:\n def threeSum(self, nums: List[int]) -> List[List[int]]:\n nums.sort()\n numsLen = len(nums)\n ans = []\n # 枚举 a \n for first in range(numsLen):\n if nums[first]>0:\n return ans\n # 跳过相邻重复元素!\n if first>0 and nums[first]==nums[first-1]:\n continue\n #target: -nums[first] 是定值,在这个定值之上,使用双指针法,由于\n #排序之后,L增大,R必然要减小\n L = first+1\n R = numsLen - 1\n while(L<R):\n if(nums[first]+nums[R]+nums[L]==0):\n ans.append([nums[first],nums[L],nums[R]])\n # 去除重复解\n while(L<R and nums[L]==nums[L+1]):\n L+=1\n while(L<R and nums[R]==nums[R-1]):\n R-=1\n L+=1\n R-=1\n elif nums[first]+nums[L]+nums[R]>0:\n R=R-1\n else:\n L=L+1\n return ans\n\n\n# @lc code=end\n\n",
"class Solution:\n\n def threeSum(self, nums: List[int]) ->List[List[int]]:\n nums.sort()\n numsLen = len(nums)\n ans = []\n for first in range(numsLen):\n if nums[first] > 0:\n return ans\n if first > 0 and nums[first] == nums[first - 1]:\n continue\n L = first + 1\n R = numsLen - 1\n while L < R:\n if nums[first] + nums[R] + nums[L] == 0:\n ans.append([nums[first], nums[L], nums[R]])\n while L < R and nums[L] == nums[L + 1]:\n L += 1\n while L < R and nums[R] == nums[R - 1]:\n R -= 1\n L += 1\n R -= 1\n elif nums[first] + nums[L] + nums[R] > 0:\n R = R - 1\n else:\n L = L + 1\n return ans\n",
"class Solution:\n <function token>\n",
"<class token>\n"
] | false |
99,673 |
51b7ee45dd39aa4df98333b9a8fa967b8d261ae3
|
from django.urls import path
from . import view_common, view_user, views_inventory, view_customer, view_excel
app_name = "BPlan"
urlpatterns = [
path('test/', view_excel.test, name="test"),
# path('index/whetherLogin/', view_common.whether_login, name='whether_login'),
path('index/', view_common.index, name='index'),
# path('markdown/', view_common.markdown_html, name='markdown'),
path('login/', view_user.login_html, name='login'),
# path('login/loginCheck/', view_user.login_check, name='login_check'),
path('login/logout/', view_user.login_logout, name='login_logout'),
path('login/record/', view_user.login_record_html, name='login_record_html'),
# path('login/record/ask/', view_user.login_record_ask_html, name='login_record_ask_html'),
# path('register/', view_user.register_html, name='register_html'),
# path('register/check/', view_user.register_check, name='register_check'),
# path('change/password/', view_user.change_password_html, name='change_password_html'),
# path('change/password/check/', view_user.change_password_check, name='change_password_check'),
# path('change/question/', view_user.change_question_html, name='change_question_html'),
# path('change/question/check/', view_user.change_question_check, name='change_question_check'),
# path('change/personalInformation/', view_user.change_personal_information, name='change_personal_information'),
path('inventory/show/all/', views_inventory.inventory_show_all_html, name='inventory_show_all_html'),
path('inventory/show/detail/', views_inventory.inventory_show_detail_html, name='inventory_show_detail_html'),
path('inventory/create/', views_inventory.inventory_create, name='inventory_create_html'),
# path('inventory/create/add/', views_inventory.inventory_create_add, name='inventory_create_add'),
path('inventory/change/', views_inventory.inventory_change, name='inventory_change_html'),
# path('inventory/change/add/', views_inventory.inventory_change_add, name='inventory_change_add'),
path('inventory/change/detail/', views_inventory.inventory_change_detail, name='inventory_change_detail'),
path('inventory/search/', views_inventory.inventory_search, name='inventory_search'),
path('inventory/operation/', views_inventory.inventory_operation_html, name='inventory_operation_html'),
path('inventory/operation/chart/', views_inventory.inventory_operation_chart_html, name='inventory_operation_chart_html'),
path('inventory/group/create/', views_inventory.inventory_group_create, name='inventory_group_create_html'),
# path('inventory/group/create/add/', views_inventory.inventory_group_create_add, name='inventory_group_create_add'),
path('customer/show/all/', view_customer.customer_show_all_html, name='customer_show_all_html'),
path('customer/show/detail/', view_customer.customer_show_detail_html, name='customer_show_detail_html'),
path('customer/search/', view_customer.customer_search, name='customer_search'),
path('customer/create/', view_customer.customer_create_html, name='customer_create_html'),
# path('customer/create/add/', view_customer.customer_create_add, name='customer_create_add'),
path('customer/tracking/', view_customer.customer_tracking, name='customer_tracking'),
path('customer/change/detail/', view_customer.customer_change_detail, name='customer_change_detail'),
path('customer/excel/', view_customer.customer_excel, name='customer_excel'),
path('excel/inventory/', view_excel.inventory_excel, name='excel_inventory'),
path('excel/inventory_operation/', view_excel.inventory_operation_excel, name='excel_inventory_operation'),
path('excel/customer/', view_excel.customer_excel, name='excel_customer'), # 导出全部客户信息
path('excel/login_record', view_excel.login_record_excel, name='excel_login_record'),
# path('verificationCode/get/', VerificationCode.verification_code, name='getVerificationCode'),
# WXApi URL:
# path('wx/login/', WXApi.wx_login_check, name='wx_login_check'),
# path('wx/logout/', WXApi.wx_logout, name='wx_logout'),
]
|
[
"from django.urls import path\r\nfrom . import view_common, view_user, views_inventory, view_customer, view_excel\r\n\r\napp_name = \"BPlan\"\r\n\r\n\r\nurlpatterns = [\r\n path('test/', view_excel.test, name=\"test\"),\r\n\r\n # path('index/whetherLogin/', view_common.whether_login, name='whether_login'),\r\n path('index/', view_common.index, name='index'),\r\n # path('markdown/', view_common.markdown_html, name='markdown'),\r\n\r\n path('login/', view_user.login_html, name='login'),\r\n # path('login/loginCheck/', view_user.login_check, name='login_check'),\r\n path('login/logout/', view_user.login_logout, name='login_logout'),\r\n path('login/record/', view_user.login_record_html, name='login_record_html'),\r\n # path('login/record/ask/', view_user.login_record_ask_html, name='login_record_ask_html'),\r\n # path('register/', view_user.register_html, name='register_html'),\r\n # path('register/check/', view_user.register_check, name='register_check'),\r\n # path('change/password/', view_user.change_password_html, name='change_password_html'),\r\n # path('change/password/check/', view_user.change_password_check, name='change_password_check'),\r\n # path('change/question/', view_user.change_question_html, name='change_question_html'),\r\n # path('change/question/check/', view_user.change_question_check, name='change_question_check'),\r\n # path('change/personalInformation/', view_user.change_personal_information, name='change_personal_information'),\r\n\r\n path('inventory/show/all/', views_inventory.inventory_show_all_html, name='inventory_show_all_html'),\r\n path('inventory/show/detail/', views_inventory.inventory_show_detail_html, name='inventory_show_detail_html'),\r\n path('inventory/create/', views_inventory.inventory_create, name='inventory_create_html'),\r\n # path('inventory/create/add/', views_inventory.inventory_create_add, name='inventory_create_add'),\r\n path('inventory/change/', views_inventory.inventory_change, name='inventory_change_html'),\r\n # path('inventory/change/add/', views_inventory.inventory_change_add, name='inventory_change_add'),\r\n path('inventory/change/detail/', views_inventory.inventory_change_detail, name='inventory_change_detail'),\r\n path('inventory/search/', views_inventory.inventory_search, name='inventory_search'),\r\n path('inventory/operation/', views_inventory.inventory_operation_html, name='inventory_operation_html'),\r\n path('inventory/operation/chart/', views_inventory.inventory_operation_chart_html, name='inventory_operation_chart_html'),\r\n path('inventory/group/create/', views_inventory.inventory_group_create, name='inventory_group_create_html'),\r\n # path('inventory/group/create/add/', views_inventory.inventory_group_create_add, name='inventory_group_create_add'),\r\n\r\n path('customer/show/all/', view_customer.customer_show_all_html, name='customer_show_all_html'),\r\n path('customer/show/detail/', view_customer.customer_show_detail_html, name='customer_show_detail_html'),\r\n path('customer/search/', view_customer.customer_search, name='customer_search'),\r\n path('customer/create/', view_customer.customer_create_html, name='customer_create_html'),\r\n # path('customer/create/add/', view_customer.customer_create_add, name='customer_create_add'),\r\n path('customer/tracking/', view_customer.customer_tracking, name='customer_tracking'),\r\n path('customer/change/detail/', view_customer.customer_change_detail, name='customer_change_detail'),\r\n path('customer/excel/', view_customer.customer_excel, name='customer_excel'),\r\n\r\n path('excel/inventory/', view_excel.inventory_excel, name='excel_inventory'),\r\n path('excel/inventory_operation/', view_excel.inventory_operation_excel, name='excel_inventory_operation'),\r\n path('excel/customer/', view_excel.customer_excel, name='excel_customer'), # 导出全部客户信息\r\n path('excel/login_record', view_excel.login_record_excel, name='excel_login_record'),\r\n\r\n # path('verificationCode/get/', VerificationCode.verification_code, name='getVerificationCode'),\r\n\r\n # WXApi URL:\r\n # path('wx/login/', WXApi.wx_login_check, name='wx_login_check'),\r\n # path('wx/logout/', WXApi.wx_logout, name='wx_logout'),\r\n]\r\n",
"from django.urls import path\nfrom . import view_common, view_user, views_inventory, view_customer, view_excel\napp_name = 'BPlan'\nurlpatterns = [path('test/', view_excel.test, name='test'), path('index/',\n view_common.index, name='index'), path('login/', view_user.login_html,\n name='login'), path('login/logout/', view_user.login_logout, name=\n 'login_logout'), path('login/record/', view_user.login_record_html,\n name='login_record_html'), path('inventory/show/all/', views_inventory.\n inventory_show_all_html, name='inventory_show_all_html'), path(\n 'inventory/show/detail/', views_inventory.inventory_show_detail_html,\n name='inventory_show_detail_html'), path('inventory/create/',\n views_inventory.inventory_create, name='inventory_create_html'), path(\n 'inventory/change/', views_inventory.inventory_change, name=\n 'inventory_change_html'), path('inventory/change/detail/',\n views_inventory.inventory_change_detail, name='inventory_change_detail'\n ), path('inventory/search/', views_inventory.inventory_search, name=\n 'inventory_search'), path('inventory/operation/', views_inventory.\n inventory_operation_html, name='inventory_operation_html'), path(\n 'inventory/operation/chart/', views_inventory.\n inventory_operation_chart_html, name='inventory_operation_chart_html'),\n path('inventory/group/create/', views_inventory.inventory_group_create,\n name='inventory_group_create_html'), path('customer/show/all/',\n view_customer.customer_show_all_html, name='customer_show_all_html'),\n path('customer/show/detail/', view_customer.customer_show_detail_html,\n name='customer_show_detail_html'), path('customer/search/',\n view_customer.customer_search, name='customer_search'), path(\n 'customer/create/', view_customer.customer_create_html, name=\n 'customer_create_html'), path('customer/tracking/', view_customer.\n customer_tracking, name='customer_tracking'), path(\n 'customer/change/detail/', view_customer.customer_change_detail, name=\n 'customer_change_detail'), path('customer/excel/', view_customer.\n customer_excel, name='customer_excel'), path('excel/inventory/',\n view_excel.inventory_excel, name='excel_inventory'), path(\n 'excel/inventory_operation/', view_excel.inventory_operation_excel,\n name='excel_inventory_operation'), path('excel/customer/', view_excel.\n customer_excel, name='excel_customer'), path('excel/login_record',\n view_excel.login_record_excel, name='excel_login_record')]\n",
"<import token>\napp_name = 'BPlan'\nurlpatterns = [path('test/', view_excel.test, name='test'), path('index/',\n view_common.index, name='index'), path('login/', view_user.login_html,\n name='login'), path('login/logout/', view_user.login_logout, name=\n 'login_logout'), path('login/record/', view_user.login_record_html,\n name='login_record_html'), path('inventory/show/all/', views_inventory.\n inventory_show_all_html, name='inventory_show_all_html'), path(\n 'inventory/show/detail/', views_inventory.inventory_show_detail_html,\n name='inventory_show_detail_html'), path('inventory/create/',\n views_inventory.inventory_create, name='inventory_create_html'), path(\n 'inventory/change/', views_inventory.inventory_change, name=\n 'inventory_change_html'), path('inventory/change/detail/',\n views_inventory.inventory_change_detail, name='inventory_change_detail'\n ), path('inventory/search/', views_inventory.inventory_search, name=\n 'inventory_search'), path('inventory/operation/', views_inventory.\n inventory_operation_html, name='inventory_operation_html'), path(\n 'inventory/operation/chart/', views_inventory.\n inventory_operation_chart_html, name='inventory_operation_chart_html'),\n path('inventory/group/create/', views_inventory.inventory_group_create,\n name='inventory_group_create_html'), path('customer/show/all/',\n view_customer.customer_show_all_html, name='customer_show_all_html'),\n path('customer/show/detail/', view_customer.customer_show_detail_html,\n name='customer_show_detail_html'), path('customer/search/',\n view_customer.customer_search, name='customer_search'), path(\n 'customer/create/', view_customer.customer_create_html, name=\n 'customer_create_html'), path('customer/tracking/', view_customer.\n customer_tracking, name='customer_tracking'), path(\n 'customer/change/detail/', view_customer.customer_change_detail, name=\n 'customer_change_detail'), path('customer/excel/', view_customer.\n customer_excel, name='customer_excel'), path('excel/inventory/',\n view_excel.inventory_excel, name='excel_inventory'), path(\n 'excel/inventory_operation/', view_excel.inventory_operation_excel,\n name='excel_inventory_operation'), path('excel/customer/', view_excel.\n customer_excel, name='excel_customer'), path('excel/login_record',\n view_excel.login_record_excel, name='excel_login_record')]\n",
"<import token>\n<assignment token>\n"
] | false |
99,674 |
9b082a82354f15b8d523444ccc76472fe50b2341
|
from qiskit import IBMQ
from qiskit.aqua import QuantumInstance
from qiskit.aqua.algorithms import Shor
IBMQ.enable_account('ENTER API TOKEN HERE') # Enter your API token here
provider = IBMQ.get_provider(hub='ibm-q')
backend = provider.get_backend('ibmq_qasm_simulator') # Specifies the quantum device
print('\n Shors Algorithm')
print('--------------------')
print('\nExecuting...\n')
factors = Shor(21) #Function to run Shor's algorithm where 21 is the integer to be factored
result_dict = factors.run(QuantumInstance(backend, shots=1, skip_qobj_validation=False))
result = result_dict['factors'] # Get factors from results
print(result)
print('\nPress any key to close')
input()
|
[
"from qiskit import IBMQ\nfrom qiskit.aqua import QuantumInstance\nfrom qiskit.aqua.algorithms import Shor\n\nIBMQ.enable_account('ENTER API TOKEN HERE') # Enter your API token here\nprovider = IBMQ.get_provider(hub='ibm-q')\n\nbackend = provider.get_backend('ibmq_qasm_simulator') # Specifies the quantum device\n\nprint('\\n Shors Algorithm')\nprint('--------------------')\nprint('\\nExecuting...\\n')\n\nfactors = Shor(21) #Function to run Shor's algorithm where 21 is the integer to be factored\n\nresult_dict = factors.run(QuantumInstance(backend, shots=1, skip_qobj_validation=False))\nresult = result_dict['factors'] # Get factors from results\n\nprint(result)\nprint('\\nPress any key to close')\ninput()",
"from qiskit import IBMQ\nfrom qiskit.aqua import QuantumInstance\nfrom qiskit.aqua.algorithms import Shor\nIBMQ.enable_account('ENTER API TOKEN HERE')\nprovider = IBMQ.get_provider(hub='ibm-q')\nbackend = provider.get_backend('ibmq_qasm_simulator')\nprint(\"\"\"\n Shors Algorithm\"\"\")\nprint('--------------------')\nprint('\\nExecuting...\\n')\nfactors = Shor(21)\nresult_dict = factors.run(QuantumInstance(backend, shots=1,\n skip_qobj_validation=False))\nresult = result_dict['factors']\nprint(result)\nprint(\"\"\"\nPress any key to close\"\"\")\ninput()\n",
"<import token>\nIBMQ.enable_account('ENTER API TOKEN HERE')\nprovider = IBMQ.get_provider(hub='ibm-q')\nbackend = provider.get_backend('ibmq_qasm_simulator')\nprint(\"\"\"\n Shors Algorithm\"\"\")\nprint('--------------------')\nprint('\\nExecuting...\\n')\nfactors = Shor(21)\nresult_dict = factors.run(QuantumInstance(backend, shots=1,\n skip_qobj_validation=False))\nresult = result_dict['factors']\nprint(result)\nprint(\"\"\"\nPress any key to close\"\"\")\ninput()\n",
"<import token>\nIBMQ.enable_account('ENTER API TOKEN HERE')\n<assignment token>\nprint(\"\"\"\n Shors Algorithm\"\"\")\nprint('--------------------')\nprint('\\nExecuting...\\n')\n<assignment token>\nprint(result)\nprint(\"\"\"\nPress any key to close\"\"\")\ninput()\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,675 |
6e6858040e8b05999c1d97648b76d46e85324ca9
|
"""
MooseGesture - A mouse gestures recognition library.
By Al Sweigart [email protected]
http://coffeeghost.net/2011/05/09/moosegesture-python-mouse-gestures-module
Usage:
import moosegesture
gesture = moosegesture.getGesture(points)
Where "points" is a list of x, y coordinate tuples, e.g. [(100, 200), (1234, 5678), ...]
getGesture returns a list of string for the recognized mouse gesture. The strings
correspond to the 8 cardinal and diagonal directions:
'UL' (up-left), 'U' (up), 'UR' (up-right)
'L' (left), 'R' (right)
'DL' (down-left), 'D' (down), 'DR' (down-right)
Second usage:
strokes = ['D', 'L', 'R']
gestures = [['D', 'L', 'D'], ['D', 'R', 'UR']]
gesture = moosegesture.findClosestMatchingGesture(strokes, gestures)
gesture == ['D', 'L', 'D']
Where "strokes" is a list of the directional integers that are returned from
getGesture(). This returns the closest resembling gesture from the list of
gestures that is passed to the function.
The optional "tolerance" parameter can ensure that the "closest" identified
gesture isn't too different.
Explanation of the nomenclature in this module:
A "point" is a 2D tuple of x, y values. These values can be ints or floats,
MooseGesture supports both.
A "point pair" is a point and its immediately subsequent point, i.e. two
points that are next to each other.
A "segment" is two or more ordered points forming a series of lines.
A "stroke" is a segment going in a single direction (one of the 8 cardinal or
diagonal directions: up, upright, left, etc.)
A "gesture" is one or more strokes in a specific pattern, e.g. up then right
then down then left.
"""
__version__ = '1.0.2'
import doctest
from math import sqrt
# This is the minimum distance the mouse must travel (in pixels) before a
# segment will be considered for stroke interpretation.
_MIN_STROKE_LEN = 60
DOWNLEFT = 'DL'
DOWN = 'D'
DOWNRIGHT = 'DR'
LEFT = 'L'
RIGHT = 'R'
UPLEFT = 'UL'
UP = 'U'
UPRIGHT = 'UR'
def getGesture(points):
"""
Returns a gesture as a list of directions, i.e. ['U', 'DL'] for
the down-left-right gesture.
The `points` parameter is a list of (x, y) tuples of points that make up
the user's mouse gesture.
"""
return _identifyStrokes(points)[0]
def getSegments(points):
"""
Returns a list of tuples of integers. The tuples are the start and end
indexes of the points that make up a consistent stroke.
"""
return _identifyStrokes(points)[1]
def getGestureAndSegments(points):
"""
Returns a list of tuples. The first item in the tuple is the directional
integer, and the second item is a tuple of integers for the start and end
indexes of the points that make up the stroke.
"""
strokes, strokeSegments = _identifyStrokes(points)
return list(zip(strokes, strokeSegments))
def findClosestMatchingGesture(strokes, gestureList, maxDifference=None):
"""
Returns the gesture(s) in `gestureList` that closest matches the gesture in
`strokes`. The `maxDifference` is how many differences there can be and still
be considered a match.
"""
if len(gestureList) == 0:
return None
#gestureList = [list(frozenset(tuple(gesture))) for gesture in gestureList] # make a unique list
gestureList = frozenset([tuple(gesture) for gesture in gestureList])
distances = {}
for g in gestureList:
levDist = levenshteinDistance(strokes, g)
if maxDifference is None or levDist <= maxDifference:
distances.setdefault(levDist, [])
distances[levDist].append(g)
if not distances:
return None # No matching gestures are within the tolerance of maxDifference.
return tuple(distances[min(distances.keys())])
def levenshteinDistance(s1, s2):
"""
Returns the Levenshtein Distance between two strings, `s1` and `s2` as an
integer.
http://en.wikipedia.org/wiki/Levenshtein_distance
The Levenshtein Distance (aka edit distance) is how many changes (i.e.
insertions, deletions, substitutions) have to be made to convert one
string into another.
For example, the Levenshtein distance between "kitten" and "sitting" is
3, since the following three edits change one into the other, and there
is no way to do it with fewer than three edits:
kitten -> sitten -> sittin -> sitting
"""
singleLetterMapping = {DOWNLEFT: '1', DOWN:'2', DOWNRIGHT:'3',
LEFT:'4', RIGHT:'6',
UPLEFT:'7', UP:'8', UPRIGHT:'9'}
len1 = len([singleLetterMapping[letter] for letter in s1])
len2 = len([singleLetterMapping[letter] for letter in s2])
matrix = list(range(len1 + 1)) * (len2 + 1)
for i in range(len2 + 1):
matrix[i] = list(range(i, i + len1 + 1))
for i in range(len2):
for j in range(len1):
if s1[j] == s2[i]:
matrix[i+1][j+1] = min(matrix[i+1][j] + 1, matrix[i][j+1] + 1, matrix[i][j])
else:
matrix[i+1][j+1] = min(matrix[i+1][j] + 1, matrix[i][j+1] + 1, matrix[i][j] + 1)
return matrix[len2][len1]
def _identifyStrokes(points):
strokes = []
strokeSegments = []
# calculate lengths between each sequential points
distances = []
for i in range(len(points)-1):
distances.append( _distance(points[i], points[i+1]) )
# keeps getting points until we go past the min. segment length
#startSegPoint = 0
#while startSegPoint < len(points)-1:
for startSegPoint in range(len(points)-1):
segmentDist = 0
curDir = None
consistent = True
direction = None
for curSegPoint in range(startSegPoint, len(points)-1):
segmentDist += distances[curSegPoint]
if segmentDist >= _MIN_STROKE_LEN:
# check if all points are going the same direction.
for i in range(startSegPoint, curSegPoint):
direction = _getDirection(points[i], points[i+1])
if curDir is None:
curDir = direction
elif direction != curDir:
consistent = False
break
break
if not consistent:
continue
elif (direction is not None and ( (not len(strokes)) or (len(strokes) and strokes[-1] != direction) )):
strokes.append(direction)
strokeSegments.append( [startSegPoint, curSegPoint] )
elif len(strokeSegments):
# update and lengthen the latest stroke since this stroke is being lengthened.
strokeSegments[-1][1] = curSegPoint
return strokes, strokeSegments
def _getDirection(coord1, coord2):
"""
Return the direction the line formed by the (x, y)
points in `coord1` and `coord2`.
"""
x1, y1 = coord1
x2, y2 = coord2
if x1 == x2 and y1 == y2:
return None # two coordinates are the same.
elif x1 == x2 and y1 > y2:
return UP
elif x1 == x2 and y1 < y2:
return DOWN
elif x1 > x2 and y1 == y2:
return LEFT
elif x1 < x2 and y1 == y2:
return RIGHT
slope = float(y2 - y1) / float(x2 - x1)
# Figure out which quadrant the line is going in, and then
# determine the closest direction by calculating the slope
if x2 > x1 and y2 < y1: # up right quadrant
if slope > -0.4142:
return RIGHT # slope is between 0 and 22.5 degrees
elif slope < -2.4142:
return UP # slope is between 67.5 and 90 degrees
else:
return UPRIGHT # slope is between 22.5 and 67.5 degrees
elif x2 > x1 and y2 > y1: # down right quadrant
if slope > 2.4142:
return DOWN
elif slope < 0.4142:
return RIGHT
else:
return DOWNRIGHT
elif x2 < x1 and y2 < y1: # up left quadrant
if slope < 0.4142:
return LEFT
elif slope > 2.4142:
return UP
else:
return UPLEFT
elif x2 < x1 and y2 > y1: # down left quadrant
if slope < -2.4142:
return DOWN
elif slope > -0.4142:
return LEFT
else:
return DOWNLEFT
def _distance(coord1, coord2):
"""
Return the distance between two points, `coord1` and `coord2`. These
parameters are assumed to be (x, y) tuples.
"""
xdist = coord1[0] - coord2[0]
ydist = coord1[1] - coord2[1]
return sqrt(xdist*xdist + ydist*ydist)
|
[
"\"\"\"\nMooseGesture - A mouse gestures recognition library.\nBy Al Sweigart [email protected]\nhttp://coffeeghost.net/2011/05/09/moosegesture-python-mouse-gestures-module\n\nUsage:\n import moosegesture\n gesture = moosegesture.getGesture(points)\n\nWhere \"points\" is a list of x, y coordinate tuples, e.g. [(100, 200), (1234, 5678), ...]\ngetGesture returns a list of string for the recognized mouse gesture. The strings\ncorrespond to the 8 cardinal and diagonal directions:\n\n 'UL' (up-left), 'U' (up), 'UR' (up-right)\n 'L' (left), 'R' (right)\n 'DL' (down-left), 'D' (down), 'DR' (down-right)\n\nSecond usage:\n strokes = ['D', 'L', 'R']\n gestures = [['D', 'L', 'D'], ['D', 'R', 'UR']]\n gesture = moosegesture.findClosestMatchingGesture(strokes, gestures)\n\n gesture == ['D', 'L', 'D']\n\nWhere \"strokes\" is a list of the directional integers that are returned from\ngetGesture(). This returns the closest resembling gesture from the list of\ngestures that is passed to the function.\n\nThe optional \"tolerance\" parameter can ensure that the \"closest\" identified\ngesture isn't too different.\n\n\nExplanation of the nomenclature in this module:\n A \"point\" is a 2D tuple of x, y values. These values can be ints or floats,\n MooseGesture supports both.\n\n A \"point pair\" is a point and its immediately subsequent point, i.e. two\n points that are next to each other.\n\n A \"segment\" is two or more ordered points forming a series of lines.\n\n A \"stroke\" is a segment going in a single direction (one of the 8 cardinal or\n diagonal directions: up, upright, left, etc.)\n\n A \"gesture\" is one or more strokes in a specific pattern, e.g. up then right\n then down then left.\n\n\n\"\"\"\n\n__version__ = '1.0.2'\n\nimport doctest\n\nfrom math import sqrt\n\n# This is the minimum distance the mouse must travel (in pixels) before a\n# segment will be considered for stroke interpretation.\n_MIN_STROKE_LEN = 60\n\nDOWNLEFT = 'DL'\nDOWN = 'D'\nDOWNRIGHT = 'DR'\nLEFT = 'L'\nRIGHT = 'R'\nUPLEFT = 'UL'\nUP = 'U'\nUPRIGHT = 'UR'\n\ndef getGesture(points):\n \"\"\"\n Returns a gesture as a list of directions, i.e. ['U', 'DL'] for\n the down-left-right gesture.\n\n The `points` parameter is a list of (x, y) tuples of points that make up\n the user's mouse gesture.\n \"\"\"\n return _identifyStrokes(points)[0]\n\n\ndef getSegments(points):\n \"\"\"\n Returns a list of tuples of integers. The tuples are the start and end\n indexes of the points that make up a consistent stroke.\n \"\"\"\n return _identifyStrokes(points)[1]\n\n\ndef getGestureAndSegments(points):\n \"\"\"\n Returns a list of tuples. The first item in the tuple is the directional\n integer, and the second item is a tuple of integers for the start and end\n indexes of the points that make up the stroke.\n \"\"\"\n strokes, strokeSegments = _identifyStrokes(points)\n return list(zip(strokes, strokeSegments))\n\n\ndef findClosestMatchingGesture(strokes, gestureList, maxDifference=None):\n \"\"\"\n Returns the gesture(s) in `gestureList` that closest matches the gesture in\n `strokes`. The `maxDifference` is how many differences there can be and still\n be considered a match.\n \"\"\"\n if len(gestureList) == 0:\n return None\n\n #gestureList = [list(frozenset(tuple(gesture))) for gesture in gestureList] # make a unique list\n gestureList = frozenset([tuple(gesture) for gesture in gestureList])\n distances = {}\n for g in gestureList:\n levDist = levenshteinDistance(strokes, g)\n if maxDifference is None or levDist <= maxDifference:\n distances.setdefault(levDist, [])\n distances[levDist].append(g)\n\n if not distances:\n return None # No matching gestures are within the tolerance of maxDifference.\n\n return tuple(distances[min(distances.keys())])\n\n\ndef levenshteinDistance(s1, s2):\n \"\"\"\n Returns the Levenshtein Distance between two strings, `s1` and `s2` as an\n integer.\n\n http://en.wikipedia.org/wiki/Levenshtein_distance\n The Levenshtein Distance (aka edit distance) is how many changes (i.e.\n insertions, deletions, substitutions) have to be made to convert one\n string into another.\n\n For example, the Levenshtein distance between \"kitten\" and \"sitting\" is\n 3, since the following three edits change one into the other, and there\n is no way to do it with fewer than three edits:\n kitten -> sitten -> sittin -> sitting\n \"\"\"\n singleLetterMapping = {DOWNLEFT: '1', DOWN:'2', DOWNRIGHT:'3',\n LEFT:'4', RIGHT:'6',\n UPLEFT:'7', UP:'8', UPRIGHT:'9'}\n\n len1 = len([singleLetterMapping[letter] for letter in s1])\n len2 = len([singleLetterMapping[letter] for letter in s2])\n\n matrix = list(range(len1 + 1)) * (len2 + 1)\n for i in range(len2 + 1):\n matrix[i] = list(range(i, i + len1 + 1))\n for i in range(len2):\n for j in range(len1):\n if s1[j] == s2[i]:\n matrix[i+1][j+1] = min(matrix[i+1][j] + 1, matrix[i][j+1] + 1, matrix[i][j])\n else:\n matrix[i+1][j+1] = min(matrix[i+1][j] + 1, matrix[i][j+1] + 1, matrix[i][j] + 1)\n return matrix[len2][len1]\n\n\ndef _identifyStrokes(points):\n strokes = []\n strokeSegments = []\n\n # calculate lengths between each sequential points\n distances = []\n for i in range(len(points)-1):\n distances.append( _distance(points[i], points[i+1]) )\n\n # keeps getting points until we go past the min. segment length\n #startSegPoint = 0\n #while startSegPoint < len(points)-1:\n for startSegPoint in range(len(points)-1):\n segmentDist = 0\n curDir = None\n consistent = True\n direction = None\n for curSegPoint in range(startSegPoint, len(points)-1):\n segmentDist += distances[curSegPoint]\n if segmentDist >= _MIN_STROKE_LEN:\n # check if all points are going the same direction.\n for i in range(startSegPoint, curSegPoint):\n direction = _getDirection(points[i], points[i+1])\n if curDir is None:\n curDir = direction\n elif direction != curDir:\n consistent = False\n break\n break\n if not consistent:\n continue\n elif (direction is not None and ( (not len(strokes)) or (len(strokes) and strokes[-1] != direction) )):\n strokes.append(direction)\n strokeSegments.append( [startSegPoint, curSegPoint] )\n elif len(strokeSegments):\n # update and lengthen the latest stroke since this stroke is being lengthened.\n strokeSegments[-1][1] = curSegPoint\n return strokes, strokeSegments\n\ndef _getDirection(coord1, coord2):\n \"\"\"\n Return the direction the line formed by the (x, y)\n points in `coord1` and `coord2`.\n \"\"\"\n x1, y1 = coord1\n x2, y2 = coord2\n\n if x1 == x2 and y1 == y2:\n return None # two coordinates are the same.\n elif x1 == x2 and y1 > y2:\n return UP\n elif x1 == x2 and y1 < y2:\n return DOWN\n elif x1 > x2 and y1 == y2:\n return LEFT\n elif x1 < x2 and y1 == y2:\n return RIGHT\n\n slope = float(y2 - y1) / float(x2 - x1)\n\n # Figure out which quadrant the line is going in, and then\n # determine the closest direction by calculating the slope\n if x2 > x1 and y2 < y1: # up right quadrant\n if slope > -0.4142:\n return RIGHT # slope is between 0 and 22.5 degrees\n elif slope < -2.4142:\n return UP # slope is between 67.5 and 90 degrees\n else:\n return UPRIGHT # slope is between 22.5 and 67.5 degrees\n elif x2 > x1 and y2 > y1: # down right quadrant\n if slope > 2.4142:\n return DOWN\n elif slope < 0.4142:\n return RIGHT\n else:\n return DOWNRIGHT\n elif x2 < x1 and y2 < y1: # up left quadrant\n if slope < 0.4142:\n return LEFT\n elif slope > 2.4142:\n return UP\n else:\n return UPLEFT\n elif x2 < x1 and y2 > y1: # down left quadrant\n if slope < -2.4142:\n return DOWN\n elif slope > -0.4142:\n return LEFT\n else:\n return DOWNLEFT\n\ndef _distance(coord1, coord2):\n \"\"\"\n Return the distance between two points, `coord1` and `coord2`. These\n parameters are assumed to be (x, y) tuples.\n \"\"\"\n xdist = coord1[0] - coord2[0]\n ydist = coord1[1] - coord2[1]\n return sqrt(xdist*xdist + ydist*ydist)\n",
"<docstring token>\n__version__ = '1.0.2'\nimport doctest\nfrom math import sqrt\n_MIN_STROKE_LEN = 60\nDOWNLEFT = 'DL'\nDOWN = 'D'\nDOWNRIGHT = 'DR'\nLEFT = 'L'\nRIGHT = 'R'\nUPLEFT = 'UL'\nUP = 'U'\nUPRIGHT = 'UR'\n\n\ndef getGesture(points):\n \"\"\"\n Returns a gesture as a list of directions, i.e. ['U', 'DL'] for\n the down-left-right gesture.\n\n The `points` parameter is a list of (x, y) tuples of points that make up\n the user's mouse gesture.\n \"\"\"\n return _identifyStrokes(points)[0]\n\n\ndef getSegments(points):\n \"\"\"\n Returns a list of tuples of integers. The tuples are the start and end\n indexes of the points that make up a consistent stroke.\n \"\"\"\n return _identifyStrokes(points)[1]\n\n\ndef getGestureAndSegments(points):\n \"\"\"\n Returns a list of tuples. The first item in the tuple is the directional\n integer, and the second item is a tuple of integers for the start and end\n indexes of the points that make up the stroke.\n \"\"\"\n strokes, strokeSegments = _identifyStrokes(points)\n return list(zip(strokes, strokeSegments))\n\n\ndef findClosestMatchingGesture(strokes, gestureList, maxDifference=None):\n \"\"\"\n Returns the gesture(s) in `gestureList` that closest matches the gesture in\n `strokes`. The `maxDifference` is how many differences there can be and still\n be considered a match.\n \"\"\"\n if len(gestureList) == 0:\n return None\n gestureList = frozenset([tuple(gesture) for gesture in gestureList])\n distances = {}\n for g in gestureList:\n levDist = levenshteinDistance(strokes, g)\n if maxDifference is None or levDist <= maxDifference:\n distances.setdefault(levDist, [])\n distances[levDist].append(g)\n if not distances:\n return None\n return tuple(distances[min(distances.keys())])\n\n\ndef levenshteinDistance(s1, s2):\n \"\"\"\n Returns the Levenshtein Distance between two strings, `s1` and `s2` as an\n integer.\n\n http://en.wikipedia.org/wiki/Levenshtein_distance\n The Levenshtein Distance (aka edit distance) is how many changes (i.e.\n insertions, deletions, substitutions) have to be made to convert one\n string into another.\n\n For example, the Levenshtein distance between \"kitten\" and \"sitting\" is\n 3, since the following three edits change one into the other, and there\n is no way to do it with fewer than three edits:\n kitten -> sitten -> sittin -> sitting\n \"\"\"\n singleLetterMapping = {DOWNLEFT: '1', DOWN: '2', DOWNRIGHT: '3', LEFT:\n '4', RIGHT: '6', UPLEFT: '7', UP: '8', UPRIGHT: '9'}\n len1 = len([singleLetterMapping[letter] for letter in s1])\n len2 = len([singleLetterMapping[letter] for letter in s2])\n matrix = list(range(len1 + 1)) * (len2 + 1)\n for i in range(len2 + 1):\n matrix[i] = list(range(i, i + len1 + 1))\n for i in range(len2):\n for j in range(len1):\n if s1[j] == s2[i]:\n matrix[i + 1][j + 1] = min(matrix[i + 1][j] + 1, matrix[i][\n j + 1] + 1, matrix[i][j])\n else:\n matrix[i + 1][j + 1] = min(matrix[i + 1][j] + 1, matrix[i][\n j + 1] + 1, matrix[i][j] + 1)\n return matrix[len2][len1]\n\n\ndef _identifyStrokes(points):\n strokes = []\n strokeSegments = []\n distances = []\n for i in range(len(points) - 1):\n distances.append(_distance(points[i], points[i + 1]))\n for startSegPoint in range(len(points) - 1):\n segmentDist = 0\n curDir = None\n consistent = True\n direction = None\n for curSegPoint in range(startSegPoint, len(points) - 1):\n segmentDist += distances[curSegPoint]\n if segmentDist >= _MIN_STROKE_LEN:\n for i in range(startSegPoint, curSegPoint):\n direction = _getDirection(points[i], points[i + 1])\n if curDir is None:\n curDir = direction\n elif direction != curDir:\n consistent = False\n break\n break\n if not consistent:\n continue\n elif direction is not None and (not len(strokes) or len(strokes) and\n strokes[-1] != direction):\n strokes.append(direction)\n strokeSegments.append([startSegPoint, curSegPoint])\n elif len(strokeSegments):\n strokeSegments[-1][1] = curSegPoint\n return strokes, strokeSegments\n\n\ndef _getDirection(coord1, coord2):\n \"\"\"\n Return the direction the line formed by the (x, y)\n points in `coord1` and `coord2`.\n \"\"\"\n x1, y1 = coord1\n x2, y2 = coord2\n if x1 == x2 and y1 == y2:\n return None\n elif x1 == x2 and y1 > y2:\n return UP\n elif x1 == x2 and y1 < y2:\n return DOWN\n elif x1 > x2 and y1 == y2:\n return LEFT\n elif x1 < x2 and y1 == y2:\n return RIGHT\n slope = float(y2 - y1) / float(x2 - x1)\n if x2 > x1 and y2 < y1:\n if slope > -0.4142:\n return RIGHT\n elif slope < -2.4142:\n return UP\n else:\n return UPRIGHT\n elif x2 > x1 and y2 > y1:\n if slope > 2.4142:\n return DOWN\n elif slope < 0.4142:\n return RIGHT\n else:\n return DOWNRIGHT\n elif x2 < x1 and y2 < y1:\n if slope < 0.4142:\n return LEFT\n elif slope > 2.4142:\n return UP\n else:\n return UPLEFT\n elif x2 < x1 and y2 > y1:\n if slope < -2.4142:\n return DOWN\n elif slope > -0.4142:\n return LEFT\n else:\n return DOWNLEFT\n\n\ndef _distance(coord1, coord2):\n \"\"\"\n Return the distance between two points, `coord1` and `coord2`. These\n parameters are assumed to be (x, y) tuples.\n \"\"\"\n xdist = coord1[0] - coord2[0]\n ydist = coord1[1] - coord2[1]\n return sqrt(xdist * xdist + ydist * ydist)\n",
"<docstring token>\n__version__ = '1.0.2'\n<import token>\n_MIN_STROKE_LEN = 60\nDOWNLEFT = 'DL'\nDOWN = 'D'\nDOWNRIGHT = 'DR'\nLEFT = 'L'\nRIGHT = 'R'\nUPLEFT = 'UL'\nUP = 'U'\nUPRIGHT = 'UR'\n\n\ndef getGesture(points):\n \"\"\"\n Returns a gesture as a list of directions, i.e. ['U', 'DL'] for\n the down-left-right gesture.\n\n The `points` parameter is a list of (x, y) tuples of points that make up\n the user's mouse gesture.\n \"\"\"\n return _identifyStrokes(points)[0]\n\n\ndef getSegments(points):\n \"\"\"\n Returns a list of tuples of integers. The tuples are the start and end\n indexes of the points that make up a consistent stroke.\n \"\"\"\n return _identifyStrokes(points)[1]\n\n\ndef getGestureAndSegments(points):\n \"\"\"\n Returns a list of tuples. The first item in the tuple is the directional\n integer, and the second item is a tuple of integers for the start and end\n indexes of the points that make up the stroke.\n \"\"\"\n strokes, strokeSegments = _identifyStrokes(points)\n return list(zip(strokes, strokeSegments))\n\n\ndef findClosestMatchingGesture(strokes, gestureList, maxDifference=None):\n \"\"\"\n Returns the gesture(s) in `gestureList` that closest matches the gesture in\n `strokes`. The `maxDifference` is how many differences there can be and still\n be considered a match.\n \"\"\"\n if len(gestureList) == 0:\n return None\n gestureList = frozenset([tuple(gesture) for gesture in gestureList])\n distances = {}\n for g in gestureList:\n levDist = levenshteinDistance(strokes, g)\n if maxDifference is None or levDist <= maxDifference:\n distances.setdefault(levDist, [])\n distances[levDist].append(g)\n if not distances:\n return None\n return tuple(distances[min(distances.keys())])\n\n\ndef levenshteinDistance(s1, s2):\n \"\"\"\n Returns the Levenshtein Distance between two strings, `s1` and `s2` as an\n integer.\n\n http://en.wikipedia.org/wiki/Levenshtein_distance\n The Levenshtein Distance (aka edit distance) is how many changes (i.e.\n insertions, deletions, substitutions) have to be made to convert one\n string into another.\n\n For example, the Levenshtein distance between \"kitten\" and \"sitting\" is\n 3, since the following three edits change one into the other, and there\n is no way to do it with fewer than three edits:\n kitten -> sitten -> sittin -> sitting\n \"\"\"\n singleLetterMapping = {DOWNLEFT: '1', DOWN: '2', DOWNRIGHT: '3', LEFT:\n '4', RIGHT: '6', UPLEFT: '7', UP: '8', UPRIGHT: '9'}\n len1 = len([singleLetterMapping[letter] for letter in s1])\n len2 = len([singleLetterMapping[letter] for letter in s2])\n matrix = list(range(len1 + 1)) * (len2 + 1)\n for i in range(len2 + 1):\n matrix[i] = list(range(i, i + len1 + 1))\n for i in range(len2):\n for j in range(len1):\n if s1[j] == s2[i]:\n matrix[i + 1][j + 1] = min(matrix[i + 1][j] + 1, matrix[i][\n j + 1] + 1, matrix[i][j])\n else:\n matrix[i + 1][j + 1] = min(matrix[i + 1][j] + 1, matrix[i][\n j + 1] + 1, matrix[i][j] + 1)\n return matrix[len2][len1]\n\n\ndef _identifyStrokes(points):\n strokes = []\n strokeSegments = []\n distances = []\n for i in range(len(points) - 1):\n distances.append(_distance(points[i], points[i + 1]))\n for startSegPoint in range(len(points) - 1):\n segmentDist = 0\n curDir = None\n consistent = True\n direction = None\n for curSegPoint in range(startSegPoint, len(points) - 1):\n segmentDist += distances[curSegPoint]\n if segmentDist >= _MIN_STROKE_LEN:\n for i in range(startSegPoint, curSegPoint):\n direction = _getDirection(points[i], points[i + 1])\n if curDir is None:\n curDir = direction\n elif direction != curDir:\n consistent = False\n break\n break\n if not consistent:\n continue\n elif direction is not None and (not len(strokes) or len(strokes) and\n strokes[-1] != direction):\n strokes.append(direction)\n strokeSegments.append([startSegPoint, curSegPoint])\n elif len(strokeSegments):\n strokeSegments[-1][1] = curSegPoint\n return strokes, strokeSegments\n\n\ndef _getDirection(coord1, coord2):\n \"\"\"\n Return the direction the line formed by the (x, y)\n points in `coord1` and `coord2`.\n \"\"\"\n x1, y1 = coord1\n x2, y2 = coord2\n if x1 == x2 and y1 == y2:\n return None\n elif x1 == x2 and y1 > y2:\n return UP\n elif x1 == x2 and y1 < y2:\n return DOWN\n elif x1 > x2 and y1 == y2:\n return LEFT\n elif x1 < x2 and y1 == y2:\n return RIGHT\n slope = float(y2 - y1) / float(x2 - x1)\n if x2 > x1 and y2 < y1:\n if slope > -0.4142:\n return RIGHT\n elif slope < -2.4142:\n return UP\n else:\n return UPRIGHT\n elif x2 > x1 and y2 > y1:\n if slope > 2.4142:\n return DOWN\n elif slope < 0.4142:\n return RIGHT\n else:\n return DOWNRIGHT\n elif x2 < x1 and y2 < y1:\n if slope < 0.4142:\n return LEFT\n elif slope > 2.4142:\n return UP\n else:\n return UPLEFT\n elif x2 < x1 and y2 > y1:\n if slope < -2.4142:\n return DOWN\n elif slope > -0.4142:\n return LEFT\n else:\n return DOWNLEFT\n\n\ndef _distance(coord1, coord2):\n \"\"\"\n Return the distance between two points, `coord1` and `coord2`. These\n parameters are assumed to be (x, y) tuples.\n \"\"\"\n xdist = coord1[0] - coord2[0]\n ydist = coord1[1] - coord2[1]\n return sqrt(xdist * xdist + ydist * ydist)\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n\n\ndef getGesture(points):\n \"\"\"\n Returns a gesture as a list of directions, i.e. ['U', 'DL'] for\n the down-left-right gesture.\n\n The `points` parameter is a list of (x, y) tuples of points that make up\n the user's mouse gesture.\n \"\"\"\n return _identifyStrokes(points)[0]\n\n\ndef getSegments(points):\n \"\"\"\n Returns a list of tuples of integers. The tuples are the start and end\n indexes of the points that make up a consistent stroke.\n \"\"\"\n return _identifyStrokes(points)[1]\n\n\ndef getGestureAndSegments(points):\n \"\"\"\n Returns a list of tuples. The first item in the tuple is the directional\n integer, and the second item is a tuple of integers for the start and end\n indexes of the points that make up the stroke.\n \"\"\"\n strokes, strokeSegments = _identifyStrokes(points)\n return list(zip(strokes, strokeSegments))\n\n\ndef findClosestMatchingGesture(strokes, gestureList, maxDifference=None):\n \"\"\"\n Returns the gesture(s) in `gestureList` that closest matches the gesture in\n `strokes`. The `maxDifference` is how many differences there can be and still\n be considered a match.\n \"\"\"\n if len(gestureList) == 0:\n return None\n gestureList = frozenset([tuple(gesture) for gesture in gestureList])\n distances = {}\n for g in gestureList:\n levDist = levenshteinDistance(strokes, g)\n if maxDifference is None or levDist <= maxDifference:\n distances.setdefault(levDist, [])\n distances[levDist].append(g)\n if not distances:\n return None\n return tuple(distances[min(distances.keys())])\n\n\ndef levenshteinDistance(s1, s2):\n \"\"\"\n Returns the Levenshtein Distance between two strings, `s1` and `s2` as an\n integer.\n\n http://en.wikipedia.org/wiki/Levenshtein_distance\n The Levenshtein Distance (aka edit distance) is how many changes (i.e.\n insertions, deletions, substitutions) have to be made to convert one\n string into another.\n\n For example, the Levenshtein distance between \"kitten\" and \"sitting\" is\n 3, since the following three edits change one into the other, and there\n is no way to do it with fewer than three edits:\n kitten -> sitten -> sittin -> sitting\n \"\"\"\n singleLetterMapping = {DOWNLEFT: '1', DOWN: '2', DOWNRIGHT: '3', LEFT:\n '4', RIGHT: '6', UPLEFT: '7', UP: '8', UPRIGHT: '9'}\n len1 = len([singleLetterMapping[letter] for letter in s1])\n len2 = len([singleLetterMapping[letter] for letter in s2])\n matrix = list(range(len1 + 1)) * (len2 + 1)\n for i in range(len2 + 1):\n matrix[i] = list(range(i, i + len1 + 1))\n for i in range(len2):\n for j in range(len1):\n if s1[j] == s2[i]:\n matrix[i + 1][j + 1] = min(matrix[i + 1][j] + 1, matrix[i][\n j + 1] + 1, matrix[i][j])\n else:\n matrix[i + 1][j + 1] = min(matrix[i + 1][j] + 1, matrix[i][\n j + 1] + 1, matrix[i][j] + 1)\n return matrix[len2][len1]\n\n\ndef _identifyStrokes(points):\n strokes = []\n strokeSegments = []\n distances = []\n for i in range(len(points) - 1):\n distances.append(_distance(points[i], points[i + 1]))\n for startSegPoint in range(len(points) - 1):\n segmentDist = 0\n curDir = None\n consistent = True\n direction = None\n for curSegPoint in range(startSegPoint, len(points) - 1):\n segmentDist += distances[curSegPoint]\n if segmentDist >= _MIN_STROKE_LEN:\n for i in range(startSegPoint, curSegPoint):\n direction = _getDirection(points[i], points[i + 1])\n if curDir is None:\n curDir = direction\n elif direction != curDir:\n consistent = False\n break\n break\n if not consistent:\n continue\n elif direction is not None and (not len(strokes) or len(strokes) and\n strokes[-1] != direction):\n strokes.append(direction)\n strokeSegments.append([startSegPoint, curSegPoint])\n elif len(strokeSegments):\n strokeSegments[-1][1] = curSegPoint\n return strokes, strokeSegments\n\n\ndef _getDirection(coord1, coord2):\n \"\"\"\n Return the direction the line formed by the (x, y)\n points in `coord1` and `coord2`.\n \"\"\"\n x1, y1 = coord1\n x2, y2 = coord2\n if x1 == x2 and y1 == y2:\n return None\n elif x1 == x2 and y1 > y2:\n return UP\n elif x1 == x2 and y1 < y2:\n return DOWN\n elif x1 > x2 and y1 == y2:\n return LEFT\n elif x1 < x2 and y1 == y2:\n return RIGHT\n slope = float(y2 - y1) / float(x2 - x1)\n if x2 > x1 and y2 < y1:\n if slope > -0.4142:\n return RIGHT\n elif slope < -2.4142:\n return UP\n else:\n return UPRIGHT\n elif x2 > x1 and y2 > y1:\n if slope > 2.4142:\n return DOWN\n elif slope < 0.4142:\n return RIGHT\n else:\n return DOWNRIGHT\n elif x2 < x1 and y2 < y1:\n if slope < 0.4142:\n return LEFT\n elif slope > 2.4142:\n return UP\n else:\n return UPLEFT\n elif x2 < x1 and y2 > y1:\n if slope < -2.4142:\n return DOWN\n elif slope > -0.4142:\n return LEFT\n else:\n return DOWNLEFT\n\n\ndef _distance(coord1, coord2):\n \"\"\"\n Return the distance between two points, `coord1` and `coord2`. These\n parameters are assumed to be (x, y) tuples.\n \"\"\"\n xdist = coord1[0] - coord2[0]\n ydist = coord1[1] - coord2[1]\n return sqrt(xdist * xdist + ydist * ydist)\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n\n\ndef getGesture(points):\n \"\"\"\n Returns a gesture as a list of directions, i.e. ['U', 'DL'] for\n the down-left-right gesture.\n\n The `points` parameter is a list of (x, y) tuples of points that make up\n the user's mouse gesture.\n \"\"\"\n return _identifyStrokes(points)[0]\n\n\ndef getSegments(points):\n \"\"\"\n Returns a list of tuples of integers. The tuples are the start and end\n indexes of the points that make up a consistent stroke.\n \"\"\"\n return _identifyStrokes(points)[1]\n\n\ndef getGestureAndSegments(points):\n \"\"\"\n Returns a list of tuples. The first item in the tuple is the directional\n integer, and the second item is a tuple of integers for the start and end\n indexes of the points that make up the stroke.\n \"\"\"\n strokes, strokeSegments = _identifyStrokes(points)\n return list(zip(strokes, strokeSegments))\n\n\ndef findClosestMatchingGesture(strokes, gestureList, maxDifference=None):\n \"\"\"\n Returns the gesture(s) in `gestureList` that closest matches the gesture in\n `strokes`. The `maxDifference` is how many differences there can be and still\n be considered a match.\n \"\"\"\n if len(gestureList) == 0:\n return None\n gestureList = frozenset([tuple(gesture) for gesture in gestureList])\n distances = {}\n for g in gestureList:\n levDist = levenshteinDistance(strokes, g)\n if maxDifference is None or levDist <= maxDifference:\n distances.setdefault(levDist, [])\n distances[levDist].append(g)\n if not distances:\n return None\n return tuple(distances[min(distances.keys())])\n\n\ndef levenshteinDistance(s1, s2):\n \"\"\"\n Returns the Levenshtein Distance between two strings, `s1` and `s2` as an\n integer.\n\n http://en.wikipedia.org/wiki/Levenshtein_distance\n The Levenshtein Distance (aka edit distance) is how many changes (i.e.\n insertions, deletions, substitutions) have to be made to convert one\n string into another.\n\n For example, the Levenshtein distance between \"kitten\" and \"sitting\" is\n 3, since the following three edits change one into the other, and there\n is no way to do it with fewer than three edits:\n kitten -> sitten -> sittin -> sitting\n \"\"\"\n singleLetterMapping = {DOWNLEFT: '1', DOWN: '2', DOWNRIGHT: '3', LEFT:\n '4', RIGHT: '6', UPLEFT: '7', UP: '8', UPRIGHT: '9'}\n len1 = len([singleLetterMapping[letter] for letter in s1])\n len2 = len([singleLetterMapping[letter] for letter in s2])\n matrix = list(range(len1 + 1)) * (len2 + 1)\n for i in range(len2 + 1):\n matrix[i] = list(range(i, i + len1 + 1))\n for i in range(len2):\n for j in range(len1):\n if s1[j] == s2[i]:\n matrix[i + 1][j + 1] = min(matrix[i + 1][j] + 1, matrix[i][\n j + 1] + 1, matrix[i][j])\n else:\n matrix[i + 1][j + 1] = min(matrix[i + 1][j] + 1, matrix[i][\n j + 1] + 1, matrix[i][j] + 1)\n return matrix[len2][len1]\n\n\n<function token>\n\n\ndef _getDirection(coord1, coord2):\n \"\"\"\n Return the direction the line formed by the (x, y)\n points in `coord1` and `coord2`.\n \"\"\"\n x1, y1 = coord1\n x2, y2 = coord2\n if x1 == x2 and y1 == y2:\n return None\n elif x1 == x2 and y1 > y2:\n return UP\n elif x1 == x2 and y1 < y2:\n return DOWN\n elif x1 > x2 and y1 == y2:\n return LEFT\n elif x1 < x2 and y1 == y2:\n return RIGHT\n slope = float(y2 - y1) / float(x2 - x1)\n if x2 > x1 and y2 < y1:\n if slope > -0.4142:\n return RIGHT\n elif slope < -2.4142:\n return UP\n else:\n return UPRIGHT\n elif x2 > x1 and y2 > y1:\n if slope > 2.4142:\n return DOWN\n elif slope < 0.4142:\n return RIGHT\n else:\n return DOWNRIGHT\n elif x2 < x1 and y2 < y1:\n if slope < 0.4142:\n return LEFT\n elif slope > 2.4142:\n return UP\n else:\n return UPLEFT\n elif x2 < x1 and y2 > y1:\n if slope < -2.4142:\n return DOWN\n elif slope > -0.4142:\n return LEFT\n else:\n return DOWNLEFT\n\n\ndef _distance(coord1, coord2):\n \"\"\"\n Return the distance between two points, `coord1` and `coord2`. These\n parameters are assumed to be (x, y) tuples.\n \"\"\"\n xdist = coord1[0] - coord2[0]\n ydist = coord1[1] - coord2[1]\n return sqrt(xdist * xdist + ydist * ydist)\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n\n\ndef getGesture(points):\n \"\"\"\n Returns a gesture as a list of directions, i.e. ['U', 'DL'] for\n the down-left-right gesture.\n\n The `points` parameter is a list of (x, y) tuples of points that make up\n the user's mouse gesture.\n \"\"\"\n return _identifyStrokes(points)[0]\n\n\ndef getSegments(points):\n \"\"\"\n Returns a list of tuples of integers. The tuples are the start and end\n indexes of the points that make up a consistent stroke.\n \"\"\"\n return _identifyStrokes(points)[1]\n\n\ndef getGestureAndSegments(points):\n \"\"\"\n Returns a list of tuples. The first item in the tuple is the directional\n integer, and the second item is a tuple of integers for the start and end\n indexes of the points that make up the stroke.\n \"\"\"\n strokes, strokeSegments = _identifyStrokes(points)\n return list(zip(strokes, strokeSegments))\n\n\ndef findClosestMatchingGesture(strokes, gestureList, maxDifference=None):\n \"\"\"\n Returns the gesture(s) in `gestureList` that closest matches the gesture in\n `strokes`. The `maxDifference` is how many differences there can be and still\n be considered a match.\n \"\"\"\n if len(gestureList) == 0:\n return None\n gestureList = frozenset([tuple(gesture) for gesture in gestureList])\n distances = {}\n for g in gestureList:\n levDist = levenshteinDistance(strokes, g)\n if maxDifference is None or levDist <= maxDifference:\n distances.setdefault(levDist, [])\n distances[levDist].append(g)\n if not distances:\n return None\n return tuple(distances[min(distances.keys())])\n\n\n<function token>\n<function token>\n\n\ndef _getDirection(coord1, coord2):\n \"\"\"\n Return the direction the line formed by the (x, y)\n points in `coord1` and `coord2`.\n \"\"\"\n x1, y1 = coord1\n x2, y2 = coord2\n if x1 == x2 and y1 == y2:\n return None\n elif x1 == x2 and y1 > y2:\n return UP\n elif x1 == x2 and y1 < y2:\n return DOWN\n elif x1 > x2 and y1 == y2:\n return LEFT\n elif x1 < x2 and y1 == y2:\n return RIGHT\n slope = float(y2 - y1) / float(x2 - x1)\n if x2 > x1 and y2 < y1:\n if slope > -0.4142:\n return RIGHT\n elif slope < -2.4142:\n return UP\n else:\n return UPRIGHT\n elif x2 > x1 and y2 > y1:\n if slope > 2.4142:\n return DOWN\n elif slope < 0.4142:\n return RIGHT\n else:\n return DOWNRIGHT\n elif x2 < x1 and y2 < y1:\n if slope < 0.4142:\n return LEFT\n elif slope > 2.4142:\n return UP\n else:\n return UPLEFT\n elif x2 < x1 and y2 > y1:\n if slope < -2.4142:\n return DOWN\n elif slope > -0.4142:\n return LEFT\n else:\n return DOWNLEFT\n\n\ndef _distance(coord1, coord2):\n \"\"\"\n Return the distance between two points, `coord1` and `coord2`. These\n parameters are assumed to be (x, y) tuples.\n \"\"\"\n xdist = coord1[0] - coord2[0]\n ydist = coord1[1] - coord2[1]\n return sqrt(xdist * xdist + ydist * ydist)\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n\n\ndef getGesture(points):\n \"\"\"\n Returns a gesture as a list of directions, i.e. ['U', 'DL'] for\n the down-left-right gesture.\n\n The `points` parameter is a list of (x, y) tuples of points that make up\n the user's mouse gesture.\n \"\"\"\n return _identifyStrokes(points)[0]\n\n\n<function token>\n\n\ndef getGestureAndSegments(points):\n \"\"\"\n Returns a list of tuples. The first item in the tuple is the directional\n integer, and the second item is a tuple of integers for the start and end\n indexes of the points that make up the stroke.\n \"\"\"\n strokes, strokeSegments = _identifyStrokes(points)\n return list(zip(strokes, strokeSegments))\n\n\ndef findClosestMatchingGesture(strokes, gestureList, maxDifference=None):\n \"\"\"\n Returns the gesture(s) in `gestureList` that closest matches the gesture in\n `strokes`. The `maxDifference` is how many differences there can be and still\n be considered a match.\n \"\"\"\n if len(gestureList) == 0:\n return None\n gestureList = frozenset([tuple(gesture) for gesture in gestureList])\n distances = {}\n for g in gestureList:\n levDist = levenshteinDistance(strokes, g)\n if maxDifference is None or levDist <= maxDifference:\n distances.setdefault(levDist, [])\n distances[levDist].append(g)\n if not distances:\n return None\n return tuple(distances[min(distances.keys())])\n\n\n<function token>\n<function token>\n\n\ndef _getDirection(coord1, coord2):\n \"\"\"\n Return the direction the line formed by the (x, y)\n points in `coord1` and `coord2`.\n \"\"\"\n x1, y1 = coord1\n x2, y2 = coord2\n if x1 == x2 and y1 == y2:\n return None\n elif x1 == x2 and y1 > y2:\n return UP\n elif x1 == x2 and y1 < y2:\n return DOWN\n elif x1 > x2 and y1 == y2:\n return LEFT\n elif x1 < x2 and y1 == y2:\n return RIGHT\n slope = float(y2 - y1) / float(x2 - x1)\n if x2 > x1 and y2 < y1:\n if slope > -0.4142:\n return RIGHT\n elif slope < -2.4142:\n return UP\n else:\n return UPRIGHT\n elif x2 > x1 and y2 > y1:\n if slope > 2.4142:\n return DOWN\n elif slope < 0.4142:\n return RIGHT\n else:\n return DOWNRIGHT\n elif x2 < x1 and y2 < y1:\n if slope < 0.4142:\n return LEFT\n elif slope > 2.4142:\n return UP\n else:\n return UPLEFT\n elif x2 < x1 and y2 > y1:\n if slope < -2.4142:\n return DOWN\n elif slope > -0.4142:\n return LEFT\n else:\n return DOWNLEFT\n\n\ndef _distance(coord1, coord2):\n \"\"\"\n Return the distance between two points, `coord1` and `coord2`. These\n parameters are assumed to be (x, y) tuples.\n \"\"\"\n xdist = coord1[0] - coord2[0]\n ydist = coord1[1] - coord2[1]\n return sqrt(xdist * xdist + ydist * ydist)\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n\n\ndef getGesture(points):\n \"\"\"\n Returns a gesture as a list of directions, i.e. ['U', 'DL'] for\n the down-left-right gesture.\n\n The `points` parameter is a list of (x, y) tuples of points that make up\n the user's mouse gesture.\n \"\"\"\n return _identifyStrokes(points)[0]\n\n\n<function token>\n\n\ndef getGestureAndSegments(points):\n \"\"\"\n Returns a list of tuples. The first item in the tuple is the directional\n integer, and the second item is a tuple of integers for the start and end\n indexes of the points that make up the stroke.\n \"\"\"\n strokes, strokeSegments = _identifyStrokes(points)\n return list(zip(strokes, strokeSegments))\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef _getDirection(coord1, coord2):\n \"\"\"\n Return the direction the line formed by the (x, y)\n points in `coord1` and `coord2`.\n \"\"\"\n x1, y1 = coord1\n x2, y2 = coord2\n if x1 == x2 and y1 == y2:\n return None\n elif x1 == x2 and y1 > y2:\n return UP\n elif x1 == x2 and y1 < y2:\n return DOWN\n elif x1 > x2 and y1 == y2:\n return LEFT\n elif x1 < x2 and y1 == y2:\n return RIGHT\n slope = float(y2 - y1) / float(x2 - x1)\n if x2 > x1 and y2 < y1:\n if slope > -0.4142:\n return RIGHT\n elif slope < -2.4142:\n return UP\n else:\n return UPRIGHT\n elif x2 > x1 and y2 > y1:\n if slope > 2.4142:\n return DOWN\n elif slope < 0.4142:\n return RIGHT\n else:\n return DOWNRIGHT\n elif x2 < x1 and y2 < y1:\n if slope < 0.4142:\n return LEFT\n elif slope > 2.4142:\n return UP\n else:\n return UPLEFT\n elif x2 < x1 and y2 > y1:\n if slope < -2.4142:\n return DOWN\n elif slope > -0.4142:\n return LEFT\n else:\n return DOWNLEFT\n\n\ndef _distance(coord1, coord2):\n \"\"\"\n Return the distance between two points, `coord1` and `coord2`. These\n parameters are assumed to be (x, y) tuples.\n \"\"\"\n xdist = coord1[0] - coord2[0]\n ydist = coord1[1] - coord2[1]\n return sqrt(xdist * xdist + ydist * ydist)\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n\n\ndef getGesture(points):\n \"\"\"\n Returns a gesture as a list of directions, i.e. ['U', 'DL'] for\n the down-left-right gesture.\n\n The `points` parameter is a list of (x, y) tuples of points that make up\n the user's mouse gesture.\n \"\"\"\n return _identifyStrokes(points)[0]\n\n\n<function token>\n\n\ndef getGestureAndSegments(points):\n \"\"\"\n Returns a list of tuples. The first item in the tuple is the directional\n integer, and the second item is a tuple of integers for the start and end\n indexes of the points that make up the stroke.\n \"\"\"\n strokes, strokeSegments = _identifyStrokes(points)\n return list(zip(strokes, strokeSegments))\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef _getDirection(coord1, coord2):\n \"\"\"\n Return the direction the line formed by the (x, y)\n points in `coord1` and `coord2`.\n \"\"\"\n x1, y1 = coord1\n x2, y2 = coord2\n if x1 == x2 and y1 == y2:\n return None\n elif x1 == x2 and y1 > y2:\n return UP\n elif x1 == x2 and y1 < y2:\n return DOWN\n elif x1 > x2 and y1 == y2:\n return LEFT\n elif x1 < x2 and y1 == y2:\n return RIGHT\n slope = float(y2 - y1) / float(x2 - x1)\n if x2 > x1 and y2 < y1:\n if slope > -0.4142:\n return RIGHT\n elif slope < -2.4142:\n return UP\n else:\n return UPRIGHT\n elif x2 > x1 and y2 > y1:\n if slope > 2.4142:\n return DOWN\n elif slope < 0.4142:\n return RIGHT\n else:\n return DOWNRIGHT\n elif x2 < x1 and y2 < y1:\n if slope < 0.4142:\n return LEFT\n elif slope > 2.4142:\n return UP\n else:\n return UPLEFT\n elif x2 < x1 and y2 > y1:\n if slope < -2.4142:\n return DOWN\n elif slope > -0.4142:\n return LEFT\n else:\n return DOWNLEFT\n\n\n<function token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n\n\ndef getGesture(points):\n \"\"\"\n Returns a gesture as a list of directions, i.e. ['U', 'DL'] for\n the down-left-right gesture.\n\n The `points` parameter is a list of (x, y) tuples of points that make up\n the user's mouse gesture.\n \"\"\"\n return _identifyStrokes(points)[0]\n\n\n<function token>\n\n\ndef getGestureAndSegments(points):\n \"\"\"\n Returns a list of tuples. The first item in the tuple is the directional\n integer, and the second item is a tuple of integers for the start and end\n indexes of the points that make up the stroke.\n \"\"\"\n strokes, strokeSegments = _identifyStrokes(points)\n return list(zip(strokes, strokeSegments))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n\n\ndef getGesture(points):\n \"\"\"\n Returns a gesture as a list of directions, i.e. ['U', 'DL'] for\n the down-left-right gesture.\n\n The `points` parameter is a list of (x, y) tuples of points that make up\n the user's mouse gesture.\n \"\"\"\n return _identifyStrokes(points)[0]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,676 |
97a4e8c3d26d847e90b8e82eac73e2856f73cef8
|
# -*- coding: utf-8 -*-
import rclpy
from rclpy.node import Node
from fgs_data_generator.generate import generate as factory
def generate(args=None):
rclpy.init(args=args)
# Node name should be declared
node = Node('generate')
# Even if you pass parameters via command line by using yaml file,
# should declare parameter name
node.declare_parameter(name='config_path', value='')
config_path = node.get_parameter('config_path').value
try:
factory(config_path)
except Exception as e:
# In python3 Exception has no attribute 'message'
print(e)
exit(-1)
exit(0)
|
[
"# -*- coding: utf-8 -*-\nimport rclpy\nfrom rclpy.node import Node\n\nfrom fgs_data_generator.generate import generate as factory\n\ndef generate(args=None):\n rclpy.init(args=args)\n\n # Node name should be declared\n node = Node('generate')\n\n # Even if you pass parameters via command line by using yaml file,\n # should declare parameter name\n node.declare_parameter(name='config_path', value='')\n config_path = node.get_parameter('config_path').value\n\n try:\n factory(config_path)\n except Exception as e:\n # In python3 Exception has no attribute 'message'\n print(e)\n exit(-1)\n\n exit(0)\n",
"import rclpy\nfrom rclpy.node import Node\nfrom fgs_data_generator.generate import generate as factory\n\n\ndef generate(args=None):\n rclpy.init(args=args)\n node = Node('generate')\n node.declare_parameter(name='config_path', value='')\n config_path = node.get_parameter('config_path').value\n try:\n factory(config_path)\n except Exception as e:\n print(e)\n exit(-1)\n exit(0)\n",
"<import token>\n\n\ndef generate(args=None):\n rclpy.init(args=args)\n node = Node('generate')\n node.declare_parameter(name='config_path', value='')\n config_path = node.get_parameter('config_path').value\n try:\n factory(config_path)\n except Exception as e:\n print(e)\n exit(-1)\n exit(0)\n",
"<import token>\n<function token>\n"
] | false |
99,677 |
f3904e633ebc0dd381efd27345fdb0d87a390670
|
import pymc3 as pm
import numpy as np
from typing import Dict
import matplotlib.pyplot as plt
def _debug(*args, **kwargs):
pass
# print(*args, file=sys.stderr, **kwargs)
class RingBuff(pm.backends.ndarray.NDArray):
"""NDArray trace object
Parameters
----------
name : str
Name of backend. This has no meaning for the NDArray backend.
model : Model
If None, the model is taken from the `with` context.
keep_n : int
Number of samples to keep.
vars : list of variables
Sampling values will be stored for these variables. If None,
`model.unobserved_RVs` is used.
"""
supports_sampler_stats = True
def __init__(self, name=None, model=None, vars=None, keep_n=10,
test_point=None):
super().__init__(name, model, vars, test_point)
self.keep_n = keep_n
def setup(self, draws, chain, sampler_vars=None) -> None:
"""Perform chain-specific setup.
Parameters
----------
draws : int
Expected number of draws
chain : int
Chain number
sampler_vars : list of dicts
Names and dtypes of the variables that are
exported by the samplers.
"""
_debug(f"Setting up ring buffer backend of size {self.keep_n}.")
super(pm.backends.ndarray.NDArray, self).setup(draws, chain, sampler_vars)
self.chain = chain
_debug(f"I am chain {chain}.")
if self.samples: # Concatenate new array if chain is already present.
_debug("Concatenating old samples.")
old_draws = len(self)
self.draws = old_draws + draws
self.draw_idx = old_draws
for varname, shape in self.var_shapes.items():
old_var_samples = self.samples[varname]
_debug(f"Initializing container for {varname} of shape {shape} which has old samples.")
new_var_samples = np.empty((max(0, self.keep_n - old_draws),)
+ shape,
self.var_dtypes[varname])
_debug(f"Concatenating old samples to {varname}.")
self.samples[varname] = np.concatenate((old_var_samples,
new_var_samples),
axis=0)
_debug(f"Finished concatenating old samples for {varname}.")
else: # Otherwise, make empty arrays for each variable.
self.draws = draws
for varname, shape in self.var_shapes.items():
_debug(f"Initializing container for {varname} of shape {shape}")
self.samples[varname] = \
np.empty((self.keep_n, ) + shape,
dtype=self.var_dtypes[varname])
if sampler_vars is None:
return
if self._stats is None:
self._stats = []
for sampler in sampler_vars:
data = dict() # type: Dict[str, np.ndarray]
self._stats.append(data)
for varname, dtype in sampler.items():
data[varname] = np.empty(draws, dtype=dtype)
else:
for data, vars in zip(self._stats, sampler_vars):
if vars.keys() != data.keys():
raise ValueError("Sampler vars can't change")
old_draws = len(self)
for varname, dtype in vars.items():
old = data[varname]
new = np.empty(draws, dtype=dtype)
data[varname] = np.concatenate([old, new])
def record(self, point, sampler_stats=None) -> None:
"""Record results of a sampling iteration.
Parameters
----------
point : dict
Values mapped to variable names
"""
for varname, value in zip(self.varnames, self.fn(point)):
self.samples[varname][self.draw_idx % self.keep_n] = value
if self._stats is not None and sampler_stats is None:
raise ValueError("Expected sampler_stats")
if self._stats is None and sampler_stats is not None:
raise ValueError("Unknown sampler_stats")
if sampler_stats is not None:
for data, vars in zip(self._stats, sampler_stats):
for key, val in vars.items():
data[key][self.draw_idx % self.keep_n] = val
self.draw_idx += 1
def close(self):
if self.draw_idx == self.draws:
return
elif self.draw_idx < self.keep_n:
# Remove trailing zeros if interrupted before completing enough
# draws.
self.samples = {var: vtrace[:self.draw_idx]
for var, vtrace in self.samples.items()}
else:
# Rearrange the trace given the pointer location.
self.samples = \
{var: np.concatenate([vtrace[self.draw_idx % self.keep_n:],
vtrace[:self.draw_idx % self.keep_n]
],
axis=0)
for var, vtrace in self.samples.items()}
if self._stats is not None:
self._stats = [
{var: trace[:self.draw_idx] for var, trace in stats.items()}
for stats in self._stats]
def __len__(self):
if not self.samples: # `setup` has not been called.
return 0
return min(self.draw_idx, self.keep_n)
def split_sampler_traces(trace, statname):
nsteps = len(trace)
nchains = len(trace.chains)
varshape = trace[statname][0].shape
out = np.empty((nchains, nsteps, *varshape))
for i in trace.chains:
chain_start = i * nsteps
chain_stop = (i + 1) * nsteps
out[i] = trace[statname][chain_start:chain_stop]
return out
def trace_stat_plot(trace, statname,
savepath=None, exclude=[], skip_frac=0.2):
nsteps = len(trace)
skip = int(np.floor(nsteps * skip_frac))
fig, ax = plt.subplots()
stat = getattr(trace, statname)
for i in trace.chains:
if i in exclude:
continue
chain_start = i*nsteps
chain_stop = i*nsteps + nsteps
ax.plot(range(skip, nsteps),
stat[chain_start:chain_stop][skip:], label=i)
ax.legend(bbox_to_anchor=(1, 1))
ax.set_title(statname)
if savepath is not None:
fig.savefig(savepath)
|
[
"import pymc3 as pm\nimport numpy as np\nfrom typing import Dict\nimport matplotlib.pyplot as plt\n\n\ndef _debug(*args, **kwargs):\n pass\n # print(*args, file=sys.stderr, **kwargs)\n\n\nclass RingBuff(pm.backends.ndarray.NDArray):\n \"\"\"NDArray trace object\n\n Parameters\n ----------\n name : str\n Name of backend. This has no meaning for the NDArray backend.\n model : Model\n If None, the model is taken from the `with` context.\n keep_n : int\n Number of samples to keep.\n vars : list of variables\n Sampling values will be stored for these variables. If None,\n `model.unobserved_RVs` is used.\n \"\"\"\n\n supports_sampler_stats = True\n\n def __init__(self, name=None, model=None, vars=None, keep_n=10,\n test_point=None):\n super().__init__(name, model, vars, test_point)\n self.keep_n = keep_n\n\n def setup(self, draws, chain, sampler_vars=None) -> None:\n \"\"\"Perform chain-specific setup.\n\n Parameters\n ----------\n draws : int\n Expected number of draws\n chain : int\n Chain number\n sampler_vars : list of dicts\n Names and dtypes of the variables that are\n exported by the samplers.\n \"\"\"\n _debug(f\"Setting up ring buffer backend of size {self.keep_n}.\")\n super(pm.backends.ndarray.NDArray, self).setup(draws, chain, sampler_vars)\n self.chain = chain\n _debug(f\"I am chain {chain}.\")\n if self.samples: # Concatenate new array if chain is already present.\n _debug(\"Concatenating old samples.\")\n old_draws = len(self)\n self.draws = old_draws + draws\n self.draw_idx = old_draws\n for varname, shape in self.var_shapes.items():\n old_var_samples = self.samples[varname]\n _debug(f\"Initializing container for {varname} of shape {shape} which has old samples.\")\n new_var_samples = np.empty((max(0, self.keep_n - old_draws),)\n + shape,\n self.var_dtypes[varname])\n _debug(f\"Concatenating old samples to {varname}.\")\n self.samples[varname] = np.concatenate((old_var_samples,\n new_var_samples),\n axis=0)\n _debug(f\"Finished concatenating old samples for {varname}.\")\n else: # Otherwise, make empty arrays for each variable.\n self.draws = draws\n for varname, shape in self.var_shapes.items():\n _debug(f\"Initializing container for {varname} of shape {shape}\")\n self.samples[varname] = \\\n np.empty((self.keep_n, ) + shape,\n dtype=self.var_dtypes[varname])\n\n if sampler_vars is None:\n return\n\n if self._stats is None:\n self._stats = []\n for sampler in sampler_vars:\n data = dict() # type: Dict[str, np.ndarray]\n self._stats.append(data)\n for varname, dtype in sampler.items():\n data[varname] = np.empty(draws, dtype=dtype)\n else:\n for data, vars in zip(self._stats, sampler_vars):\n if vars.keys() != data.keys():\n raise ValueError(\"Sampler vars can't change\")\n old_draws = len(self)\n for varname, dtype in vars.items():\n old = data[varname]\n new = np.empty(draws, dtype=dtype)\n data[varname] = np.concatenate([old, new])\n\n def record(self, point, sampler_stats=None) -> None:\n \"\"\"Record results of a sampling iteration.\n\n Parameters\n ----------\n point : dict\n Values mapped to variable names\n \"\"\"\n for varname, value in zip(self.varnames, self.fn(point)):\n self.samples[varname][self.draw_idx % self.keep_n] = value\n\n if self._stats is not None and sampler_stats is None:\n raise ValueError(\"Expected sampler_stats\")\n if self._stats is None and sampler_stats is not None:\n raise ValueError(\"Unknown sampler_stats\")\n if sampler_stats is not None:\n for data, vars in zip(self._stats, sampler_stats):\n for key, val in vars.items():\n data[key][self.draw_idx % self.keep_n] = val\n self.draw_idx += 1\n\n def close(self):\n if self.draw_idx == self.draws:\n return\n elif self.draw_idx < self.keep_n:\n # Remove trailing zeros if interrupted before completing enough\n # draws.\n self.samples = {var: vtrace[:self.draw_idx]\n for var, vtrace in self.samples.items()}\n else:\n # Rearrange the trace given the pointer location.\n self.samples = \\\n {var: np.concatenate([vtrace[self.draw_idx % self.keep_n:],\n vtrace[:self.draw_idx % self.keep_n]\n ],\n axis=0)\n for var, vtrace in self.samples.items()}\n if self._stats is not None:\n self._stats = [\n {var: trace[:self.draw_idx] for var, trace in stats.items()}\n for stats in self._stats]\n\n def __len__(self):\n if not self.samples: # `setup` has not been called.\n return 0\n return min(self.draw_idx, self.keep_n)\n\n\ndef split_sampler_traces(trace, statname):\n nsteps = len(trace)\n nchains = len(trace.chains)\n varshape = trace[statname][0].shape\n out = np.empty((nchains, nsteps, *varshape))\n for i in trace.chains:\n chain_start = i * nsteps\n chain_stop = (i + 1) * nsteps\n out[i] = trace[statname][chain_start:chain_stop]\n return out\n\n\ndef trace_stat_plot(trace, statname,\n savepath=None, exclude=[], skip_frac=0.2):\n nsteps = len(trace)\n skip = int(np.floor(nsteps * skip_frac))\n fig, ax = plt.subplots()\n stat = getattr(trace, statname)\n for i in trace.chains:\n if i in exclude:\n continue\n chain_start = i*nsteps\n chain_stop = i*nsteps + nsteps\n ax.plot(range(skip, nsteps),\n stat[chain_start:chain_stop][skip:], label=i)\n ax.legend(bbox_to_anchor=(1, 1))\n ax.set_title(statname)\n if savepath is not None:\n fig.savefig(savepath)\n",
"import pymc3 as pm\nimport numpy as np\nfrom typing import Dict\nimport matplotlib.pyplot as plt\n\n\ndef _debug(*args, **kwargs):\n pass\n\n\nclass RingBuff(pm.backends.ndarray.NDArray):\n \"\"\"NDArray trace object\n\n Parameters\n ----------\n name : str\n Name of backend. This has no meaning for the NDArray backend.\n model : Model\n If None, the model is taken from the `with` context.\n keep_n : int\n Number of samples to keep.\n vars : list of variables\n Sampling values will be stored for these variables. If None,\n `model.unobserved_RVs` is used.\n \"\"\"\n supports_sampler_stats = True\n\n def __init__(self, name=None, model=None, vars=None, keep_n=10,\n test_point=None):\n super().__init__(name, model, vars, test_point)\n self.keep_n = keep_n\n\n def setup(self, draws, chain, sampler_vars=None) ->None:\n \"\"\"Perform chain-specific setup.\n\n Parameters\n ----------\n draws : int\n Expected number of draws\n chain : int\n Chain number\n sampler_vars : list of dicts\n Names and dtypes of the variables that are\n exported by the samplers.\n \"\"\"\n _debug(f'Setting up ring buffer backend of size {self.keep_n}.')\n super(pm.backends.ndarray.NDArray, self).setup(draws, chain,\n sampler_vars)\n self.chain = chain\n _debug(f'I am chain {chain}.')\n if self.samples:\n _debug('Concatenating old samples.')\n old_draws = len(self)\n self.draws = old_draws + draws\n self.draw_idx = old_draws\n for varname, shape in self.var_shapes.items():\n old_var_samples = self.samples[varname]\n _debug(\n f'Initializing container for {varname} of shape {shape} which has old samples.'\n )\n new_var_samples = np.empty((max(0, self.keep_n - old_draws)\n ,) + shape, self.var_dtypes[varname])\n _debug(f'Concatenating old samples to {varname}.')\n self.samples[varname] = np.concatenate((old_var_samples,\n new_var_samples), axis=0)\n _debug(f'Finished concatenating old samples for {varname}.')\n else:\n self.draws = draws\n for varname, shape in self.var_shapes.items():\n _debug(f'Initializing container for {varname} of shape {shape}'\n )\n self.samples[varname] = np.empty((self.keep_n,) + shape,\n dtype=self.var_dtypes[varname])\n if sampler_vars is None:\n return\n if self._stats is None:\n self._stats = []\n for sampler in sampler_vars:\n data = dict()\n self._stats.append(data)\n for varname, dtype in sampler.items():\n data[varname] = np.empty(draws, dtype=dtype)\n else:\n for data, vars in zip(self._stats, sampler_vars):\n if vars.keys() != data.keys():\n raise ValueError(\"Sampler vars can't change\")\n old_draws = len(self)\n for varname, dtype in vars.items():\n old = data[varname]\n new = np.empty(draws, dtype=dtype)\n data[varname] = np.concatenate([old, new])\n\n def record(self, point, sampler_stats=None) ->None:\n \"\"\"Record results of a sampling iteration.\n\n Parameters\n ----------\n point : dict\n Values mapped to variable names\n \"\"\"\n for varname, value in zip(self.varnames, self.fn(point)):\n self.samples[varname][self.draw_idx % self.keep_n] = value\n if self._stats is not None and sampler_stats is None:\n raise ValueError('Expected sampler_stats')\n if self._stats is None and sampler_stats is not None:\n raise ValueError('Unknown sampler_stats')\n if sampler_stats is not None:\n for data, vars in zip(self._stats, sampler_stats):\n for key, val in vars.items():\n data[key][self.draw_idx % self.keep_n] = val\n self.draw_idx += 1\n\n def close(self):\n if self.draw_idx == self.draws:\n return\n elif self.draw_idx < self.keep_n:\n self.samples = {var: vtrace[:self.draw_idx] for var, vtrace in\n self.samples.items()}\n else:\n self.samples = {var: np.concatenate([vtrace[self.draw_idx %\n self.keep_n:], vtrace[:self.draw_idx % self.keep_n]], axis=\n 0) for var, vtrace in self.samples.items()}\n if self._stats is not None:\n self._stats = [{var: trace[:self.draw_idx] for var, trace in\n stats.items()} for stats in self._stats]\n\n def __len__(self):\n if not self.samples:\n return 0\n return min(self.draw_idx, self.keep_n)\n\n\ndef split_sampler_traces(trace, statname):\n nsteps = len(trace)\n nchains = len(trace.chains)\n varshape = trace[statname][0].shape\n out = np.empty((nchains, nsteps, *varshape))\n for i in trace.chains:\n chain_start = i * nsteps\n chain_stop = (i + 1) * nsteps\n out[i] = trace[statname][chain_start:chain_stop]\n return out\n\n\ndef trace_stat_plot(trace, statname, savepath=None, exclude=[], skip_frac=0.2):\n nsteps = len(trace)\n skip = int(np.floor(nsteps * skip_frac))\n fig, ax = plt.subplots()\n stat = getattr(trace, statname)\n for i in trace.chains:\n if i in exclude:\n continue\n chain_start = i * nsteps\n chain_stop = i * nsteps + nsteps\n ax.plot(range(skip, nsteps), stat[chain_start:chain_stop][skip:],\n label=i)\n ax.legend(bbox_to_anchor=(1, 1))\n ax.set_title(statname)\n if savepath is not None:\n fig.savefig(savepath)\n",
"<import token>\n\n\ndef _debug(*args, **kwargs):\n pass\n\n\nclass RingBuff(pm.backends.ndarray.NDArray):\n \"\"\"NDArray trace object\n\n Parameters\n ----------\n name : str\n Name of backend. This has no meaning for the NDArray backend.\n model : Model\n If None, the model is taken from the `with` context.\n keep_n : int\n Number of samples to keep.\n vars : list of variables\n Sampling values will be stored for these variables. If None,\n `model.unobserved_RVs` is used.\n \"\"\"\n supports_sampler_stats = True\n\n def __init__(self, name=None, model=None, vars=None, keep_n=10,\n test_point=None):\n super().__init__(name, model, vars, test_point)\n self.keep_n = keep_n\n\n def setup(self, draws, chain, sampler_vars=None) ->None:\n \"\"\"Perform chain-specific setup.\n\n Parameters\n ----------\n draws : int\n Expected number of draws\n chain : int\n Chain number\n sampler_vars : list of dicts\n Names and dtypes of the variables that are\n exported by the samplers.\n \"\"\"\n _debug(f'Setting up ring buffer backend of size {self.keep_n}.')\n super(pm.backends.ndarray.NDArray, self).setup(draws, chain,\n sampler_vars)\n self.chain = chain\n _debug(f'I am chain {chain}.')\n if self.samples:\n _debug('Concatenating old samples.')\n old_draws = len(self)\n self.draws = old_draws + draws\n self.draw_idx = old_draws\n for varname, shape in self.var_shapes.items():\n old_var_samples = self.samples[varname]\n _debug(\n f'Initializing container for {varname} of shape {shape} which has old samples.'\n )\n new_var_samples = np.empty((max(0, self.keep_n - old_draws)\n ,) + shape, self.var_dtypes[varname])\n _debug(f'Concatenating old samples to {varname}.')\n self.samples[varname] = np.concatenate((old_var_samples,\n new_var_samples), axis=0)\n _debug(f'Finished concatenating old samples for {varname}.')\n else:\n self.draws = draws\n for varname, shape in self.var_shapes.items():\n _debug(f'Initializing container for {varname} of shape {shape}'\n )\n self.samples[varname] = np.empty((self.keep_n,) + shape,\n dtype=self.var_dtypes[varname])\n if sampler_vars is None:\n return\n if self._stats is None:\n self._stats = []\n for sampler in sampler_vars:\n data = dict()\n self._stats.append(data)\n for varname, dtype in sampler.items():\n data[varname] = np.empty(draws, dtype=dtype)\n else:\n for data, vars in zip(self._stats, sampler_vars):\n if vars.keys() != data.keys():\n raise ValueError(\"Sampler vars can't change\")\n old_draws = len(self)\n for varname, dtype in vars.items():\n old = data[varname]\n new = np.empty(draws, dtype=dtype)\n data[varname] = np.concatenate([old, new])\n\n def record(self, point, sampler_stats=None) ->None:\n \"\"\"Record results of a sampling iteration.\n\n Parameters\n ----------\n point : dict\n Values mapped to variable names\n \"\"\"\n for varname, value in zip(self.varnames, self.fn(point)):\n self.samples[varname][self.draw_idx % self.keep_n] = value\n if self._stats is not None and sampler_stats is None:\n raise ValueError('Expected sampler_stats')\n if self._stats is None and sampler_stats is not None:\n raise ValueError('Unknown sampler_stats')\n if sampler_stats is not None:\n for data, vars in zip(self._stats, sampler_stats):\n for key, val in vars.items():\n data[key][self.draw_idx % self.keep_n] = val\n self.draw_idx += 1\n\n def close(self):\n if self.draw_idx == self.draws:\n return\n elif self.draw_idx < self.keep_n:\n self.samples = {var: vtrace[:self.draw_idx] for var, vtrace in\n self.samples.items()}\n else:\n self.samples = {var: np.concatenate([vtrace[self.draw_idx %\n self.keep_n:], vtrace[:self.draw_idx % self.keep_n]], axis=\n 0) for var, vtrace in self.samples.items()}\n if self._stats is not None:\n self._stats = [{var: trace[:self.draw_idx] for var, trace in\n stats.items()} for stats in self._stats]\n\n def __len__(self):\n if not self.samples:\n return 0\n return min(self.draw_idx, self.keep_n)\n\n\ndef split_sampler_traces(trace, statname):\n nsteps = len(trace)\n nchains = len(trace.chains)\n varshape = trace[statname][0].shape\n out = np.empty((nchains, nsteps, *varshape))\n for i in trace.chains:\n chain_start = i * nsteps\n chain_stop = (i + 1) * nsteps\n out[i] = trace[statname][chain_start:chain_stop]\n return out\n\n\ndef trace_stat_plot(trace, statname, savepath=None, exclude=[], skip_frac=0.2):\n nsteps = len(trace)\n skip = int(np.floor(nsteps * skip_frac))\n fig, ax = plt.subplots()\n stat = getattr(trace, statname)\n for i in trace.chains:\n if i in exclude:\n continue\n chain_start = i * nsteps\n chain_stop = i * nsteps + nsteps\n ax.plot(range(skip, nsteps), stat[chain_start:chain_stop][skip:],\n label=i)\n ax.legend(bbox_to_anchor=(1, 1))\n ax.set_title(statname)\n if savepath is not None:\n fig.savefig(savepath)\n",
"<import token>\n\n\ndef _debug(*args, **kwargs):\n pass\n\n\nclass RingBuff(pm.backends.ndarray.NDArray):\n \"\"\"NDArray trace object\n\n Parameters\n ----------\n name : str\n Name of backend. This has no meaning for the NDArray backend.\n model : Model\n If None, the model is taken from the `with` context.\n keep_n : int\n Number of samples to keep.\n vars : list of variables\n Sampling values will be stored for these variables. If None,\n `model.unobserved_RVs` is used.\n \"\"\"\n supports_sampler_stats = True\n\n def __init__(self, name=None, model=None, vars=None, keep_n=10,\n test_point=None):\n super().__init__(name, model, vars, test_point)\n self.keep_n = keep_n\n\n def setup(self, draws, chain, sampler_vars=None) ->None:\n \"\"\"Perform chain-specific setup.\n\n Parameters\n ----------\n draws : int\n Expected number of draws\n chain : int\n Chain number\n sampler_vars : list of dicts\n Names and dtypes of the variables that are\n exported by the samplers.\n \"\"\"\n _debug(f'Setting up ring buffer backend of size {self.keep_n}.')\n super(pm.backends.ndarray.NDArray, self).setup(draws, chain,\n sampler_vars)\n self.chain = chain\n _debug(f'I am chain {chain}.')\n if self.samples:\n _debug('Concatenating old samples.')\n old_draws = len(self)\n self.draws = old_draws + draws\n self.draw_idx = old_draws\n for varname, shape in self.var_shapes.items():\n old_var_samples = self.samples[varname]\n _debug(\n f'Initializing container for {varname} of shape {shape} which has old samples.'\n )\n new_var_samples = np.empty((max(0, self.keep_n - old_draws)\n ,) + shape, self.var_dtypes[varname])\n _debug(f'Concatenating old samples to {varname}.')\n self.samples[varname] = np.concatenate((old_var_samples,\n new_var_samples), axis=0)\n _debug(f'Finished concatenating old samples for {varname}.')\n else:\n self.draws = draws\n for varname, shape in self.var_shapes.items():\n _debug(f'Initializing container for {varname} of shape {shape}'\n )\n self.samples[varname] = np.empty((self.keep_n,) + shape,\n dtype=self.var_dtypes[varname])\n if sampler_vars is None:\n return\n if self._stats is None:\n self._stats = []\n for sampler in sampler_vars:\n data = dict()\n self._stats.append(data)\n for varname, dtype in sampler.items():\n data[varname] = np.empty(draws, dtype=dtype)\n else:\n for data, vars in zip(self._stats, sampler_vars):\n if vars.keys() != data.keys():\n raise ValueError(\"Sampler vars can't change\")\n old_draws = len(self)\n for varname, dtype in vars.items():\n old = data[varname]\n new = np.empty(draws, dtype=dtype)\n data[varname] = np.concatenate([old, new])\n\n def record(self, point, sampler_stats=None) ->None:\n \"\"\"Record results of a sampling iteration.\n\n Parameters\n ----------\n point : dict\n Values mapped to variable names\n \"\"\"\n for varname, value in zip(self.varnames, self.fn(point)):\n self.samples[varname][self.draw_idx % self.keep_n] = value\n if self._stats is not None and sampler_stats is None:\n raise ValueError('Expected sampler_stats')\n if self._stats is None and sampler_stats is not None:\n raise ValueError('Unknown sampler_stats')\n if sampler_stats is not None:\n for data, vars in zip(self._stats, sampler_stats):\n for key, val in vars.items():\n data[key][self.draw_idx % self.keep_n] = val\n self.draw_idx += 1\n\n def close(self):\n if self.draw_idx == self.draws:\n return\n elif self.draw_idx < self.keep_n:\n self.samples = {var: vtrace[:self.draw_idx] for var, vtrace in\n self.samples.items()}\n else:\n self.samples = {var: np.concatenate([vtrace[self.draw_idx %\n self.keep_n:], vtrace[:self.draw_idx % self.keep_n]], axis=\n 0) for var, vtrace in self.samples.items()}\n if self._stats is not None:\n self._stats = [{var: trace[:self.draw_idx] for var, trace in\n stats.items()} for stats in self._stats]\n\n def __len__(self):\n if not self.samples:\n return 0\n return min(self.draw_idx, self.keep_n)\n\n\ndef split_sampler_traces(trace, statname):\n nsteps = len(trace)\n nchains = len(trace.chains)\n varshape = trace[statname][0].shape\n out = np.empty((nchains, nsteps, *varshape))\n for i in trace.chains:\n chain_start = i * nsteps\n chain_stop = (i + 1) * nsteps\n out[i] = trace[statname][chain_start:chain_stop]\n return out\n\n\n<function token>\n",
"<import token>\n<function token>\n\n\nclass RingBuff(pm.backends.ndarray.NDArray):\n \"\"\"NDArray trace object\n\n Parameters\n ----------\n name : str\n Name of backend. This has no meaning for the NDArray backend.\n model : Model\n If None, the model is taken from the `with` context.\n keep_n : int\n Number of samples to keep.\n vars : list of variables\n Sampling values will be stored for these variables. If None,\n `model.unobserved_RVs` is used.\n \"\"\"\n supports_sampler_stats = True\n\n def __init__(self, name=None, model=None, vars=None, keep_n=10,\n test_point=None):\n super().__init__(name, model, vars, test_point)\n self.keep_n = keep_n\n\n def setup(self, draws, chain, sampler_vars=None) ->None:\n \"\"\"Perform chain-specific setup.\n\n Parameters\n ----------\n draws : int\n Expected number of draws\n chain : int\n Chain number\n sampler_vars : list of dicts\n Names and dtypes of the variables that are\n exported by the samplers.\n \"\"\"\n _debug(f'Setting up ring buffer backend of size {self.keep_n}.')\n super(pm.backends.ndarray.NDArray, self).setup(draws, chain,\n sampler_vars)\n self.chain = chain\n _debug(f'I am chain {chain}.')\n if self.samples:\n _debug('Concatenating old samples.')\n old_draws = len(self)\n self.draws = old_draws + draws\n self.draw_idx = old_draws\n for varname, shape in self.var_shapes.items():\n old_var_samples = self.samples[varname]\n _debug(\n f'Initializing container for {varname} of shape {shape} which has old samples.'\n )\n new_var_samples = np.empty((max(0, self.keep_n - old_draws)\n ,) + shape, self.var_dtypes[varname])\n _debug(f'Concatenating old samples to {varname}.')\n self.samples[varname] = np.concatenate((old_var_samples,\n new_var_samples), axis=0)\n _debug(f'Finished concatenating old samples for {varname}.')\n else:\n self.draws = draws\n for varname, shape in self.var_shapes.items():\n _debug(f'Initializing container for {varname} of shape {shape}'\n )\n self.samples[varname] = np.empty((self.keep_n,) + shape,\n dtype=self.var_dtypes[varname])\n if sampler_vars is None:\n return\n if self._stats is None:\n self._stats = []\n for sampler in sampler_vars:\n data = dict()\n self._stats.append(data)\n for varname, dtype in sampler.items():\n data[varname] = np.empty(draws, dtype=dtype)\n else:\n for data, vars in zip(self._stats, sampler_vars):\n if vars.keys() != data.keys():\n raise ValueError(\"Sampler vars can't change\")\n old_draws = len(self)\n for varname, dtype in vars.items():\n old = data[varname]\n new = np.empty(draws, dtype=dtype)\n data[varname] = np.concatenate([old, new])\n\n def record(self, point, sampler_stats=None) ->None:\n \"\"\"Record results of a sampling iteration.\n\n Parameters\n ----------\n point : dict\n Values mapped to variable names\n \"\"\"\n for varname, value in zip(self.varnames, self.fn(point)):\n self.samples[varname][self.draw_idx % self.keep_n] = value\n if self._stats is not None and sampler_stats is None:\n raise ValueError('Expected sampler_stats')\n if self._stats is None and sampler_stats is not None:\n raise ValueError('Unknown sampler_stats')\n if sampler_stats is not None:\n for data, vars in zip(self._stats, sampler_stats):\n for key, val in vars.items():\n data[key][self.draw_idx % self.keep_n] = val\n self.draw_idx += 1\n\n def close(self):\n if self.draw_idx == self.draws:\n return\n elif self.draw_idx < self.keep_n:\n self.samples = {var: vtrace[:self.draw_idx] for var, vtrace in\n self.samples.items()}\n else:\n self.samples = {var: np.concatenate([vtrace[self.draw_idx %\n self.keep_n:], vtrace[:self.draw_idx % self.keep_n]], axis=\n 0) for var, vtrace in self.samples.items()}\n if self._stats is not None:\n self._stats = [{var: trace[:self.draw_idx] for var, trace in\n stats.items()} for stats in self._stats]\n\n def __len__(self):\n if not self.samples:\n return 0\n return min(self.draw_idx, self.keep_n)\n\n\ndef split_sampler_traces(trace, statname):\n nsteps = len(trace)\n nchains = len(trace.chains)\n varshape = trace[statname][0].shape\n out = np.empty((nchains, nsteps, *varshape))\n for i in trace.chains:\n chain_start = i * nsteps\n chain_stop = (i + 1) * nsteps\n out[i] = trace[statname][chain_start:chain_stop]\n return out\n\n\n<function token>\n",
"<import token>\n<function token>\n\n\nclass RingBuff(pm.backends.ndarray.NDArray):\n \"\"\"NDArray trace object\n\n Parameters\n ----------\n name : str\n Name of backend. This has no meaning for the NDArray backend.\n model : Model\n If None, the model is taken from the `with` context.\n keep_n : int\n Number of samples to keep.\n vars : list of variables\n Sampling values will be stored for these variables. If None,\n `model.unobserved_RVs` is used.\n \"\"\"\n supports_sampler_stats = True\n\n def __init__(self, name=None, model=None, vars=None, keep_n=10,\n test_point=None):\n super().__init__(name, model, vars, test_point)\n self.keep_n = keep_n\n\n def setup(self, draws, chain, sampler_vars=None) ->None:\n \"\"\"Perform chain-specific setup.\n\n Parameters\n ----------\n draws : int\n Expected number of draws\n chain : int\n Chain number\n sampler_vars : list of dicts\n Names and dtypes of the variables that are\n exported by the samplers.\n \"\"\"\n _debug(f'Setting up ring buffer backend of size {self.keep_n}.')\n super(pm.backends.ndarray.NDArray, self).setup(draws, chain,\n sampler_vars)\n self.chain = chain\n _debug(f'I am chain {chain}.')\n if self.samples:\n _debug('Concatenating old samples.')\n old_draws = len(self)\n self.draws = old_draws + draws\n self.draw_idx = old_draws\n for varname, shape in self.var_shapes.items():\n old_var_samples = self.samples[varname]\n _debug(\n f'Initializing container for {varname} of shape {shape} which has old samples.'\n )\n new_var_samples = np.empty((max(0, self.keep_n - old_draws)\n ,) + shape, self.var_dtypes[varname])\n _debug(f'Concatenating old samples to {varname}.')\n self.samples[varname] = np.concatenate((old_var_samples,\n new_var_samples), axis=0)\n _debug(f'Finished concatenating old samples for {varname}.')\n else:\n self.draws = draws\n for varname, shape in self.var_shapes.items():\n _debug(f'Initializing container for {varname} of shape {shape}'\n )\n self.samples[varname] = np.empty((self.keep_n,) + shape,\n dtype=self.var_dtypes[varname])\n if sampler_vars is None:\n return\n if self._stats is None:\n self._stats = []\n for sampler in sampler_vars:\n data = dict()\n self._stats.append(data)\n for varname, dtype in sampler.items():\n data[varname] = np.empty(draws, dtype=dtype)\n else:\n for data, vars in zip(self._stats, sampler_vars):\n if vars.keys() != data.keys():\n raise ValueError(\"Sampler vars can't change\")\n old_draws = len(self)\n for varname, dtype in vars.items():\n old = data[varname]\n new = np.empty(draws, dtype=dtype)\n data[varname] = np.concatenate([old, new])\n\n def record(self, point, sampler_stats=None) ->None:\n \"\"\"Record results of a sampling iteration.\n\n Parameters\n ----------\n point : dict\n Values mapped to variable names\n \"\"\"\n for varname, value in zip(self.varnames, self.fn(point)):\n self.samples[varname][self.draw_idx % self.keep_n] = value\n if self._stats is not None and sampler_stats is None:\n raise ValueError('Expected sampler_stats')\n if self._stats is None and sampler_stats is not None:\n raise ValueError('Unknown sampler_stats')\n if sampler_stats is not None:\n for data, vars in zip(self._stats, sampler_stats):\n for key, val in vars.items():\n data[key][self.draw_idx % self.keep_n] = val\n self.draw_idx += 1\n\n def close(self):\n if self.draw_idx == self.draws:\n return\n elif self.draw_idx < self.keep_n:\n self.samples = {var: vtrace[:self.draw_idx] for var, vtrace in\n self.samples.items()}\n else:\n self.samples = {var: np.concatenate([vtrace[self.draw_idx %\n self.keep_n:], vtrace[:self.draw_idx % self.keep_n]], axis=\n 0) for var, vtrace in self.samples.items()}\n if self._stats is not None:\n self._stats = [{var: trace[:self.draw_idx] for var, trace in\n stats.items()} for stats in self._stats]\n\n def __len__(self):\n if not self.samples:\n return 0\n return min(self.draw_idx, self.keep_n)\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass RingBuff(pm.backends.ndarray.NDArray):\n <docstring token>\n supports_sampler_stats = True\n\n def __init__(self, name=None, model=None, vars=None, keep_n=10,\n test_point=None):\n super().__init__(name, model, vars, test_point)\n self.keep_n = keep_n\n\n def setup(self, draws, chain, sampler_vars=None) ->None:\n \"\"\"Perform chain-specific setup.\n\n Parameters\n ----------\n draws : int\n Expected number of draws\n chain : int\n Chain number\n sampler_vars : list of dicts\n Names and dtypes of the variables that are\n exported by the samplers.\n \"\"\"\n _debug(f'Setting up ring buffer backend of size {self.keep_n}.')\n super(pm.backends.ndarray.NDArray, self).setup(draws, chain,\n sampler_vars)\n self.chain = chain\n _debug(f'I am chain {chain}.')\n if self.samples:\n _debug('Concatenating old samples.')\n old_draws = len(self)\n self.draws = old_draws + draws\n self.draw_idx = old_draws\n for varname, shape in self.var_shapes.items():\n old_var_samples = self.samples[varname]\n _debug(\n f'Initializing container for {varname} of shape {shape} which has old samples.'\n )\n new_var_samples = np.empty((max(0, self.keep_n - old_draws)\n ,) + shape, self.var_dtypes[varname])\n _debug(f'Concatenating old samples to {varname}.')\n self.samples[varname] = np.concatenate((old_var_samples,\n new_var_samples), axis=0)\n _debug(f'Finished concatenating old samples for {varname}.')\n else:\n self.draws = draws\n for varname, shape in self.var_shapes.items():\n _debug(f'Initializing container for {varname} of shape {shape}'\n )\n self.samples[varname] = np.empty((self.keep_n,) + shape,\n dtype=self.var_dtypes[varname])\n if sampler_vars is None:\n return\n if self._stats is None:\n self._stats = []\n for sampler in sampler_vars:\n data = dict()\n self._stats.append(data)\n for varname, dtype in sampler.items():\n data[varname] = np.empty(draws, dtype=dtype)\n else:\n for data, vars in zip(self._stats, sampler_vars):\n if vars.keys() != data.keys():\n raise ValueError(\"Sampler vars can't change\")\n old_draws = len(self)\n for varname, dtype in vars.items():\n old = data[varname]\n new = np.empty(draws, dtype=dtype)\n data[varname] = np.concatenate([old, new])\n\n def record(self, point, sampler_stats=None) ->None:\n \"\"\"Record results of a sampling iteration.\n\n Parameters\n ----------\n point : dict\n Values mapped to variable names\n \"\"\"\n for varname, value in zip(self.varnames, self.fn(point)):\n self.samples[varname][self.draw_idx % self.keep_n] = value\n if self._stats is not None and sampler_stats is None:\n raise ValueError('Expected sampler_stats')\n if self._stats is None and sampler_stats is not None:\n raise ValueError('Unknown sampler_stats')\n if sampler_stats is not None:\n for data, vars in zip(self._stats, sampler_stats):\n for key, val in vars.items():\n data[key][self.draw_idx % self.keep_n] = val\n self.draw_idx += 1\n\n def close(self):\n if self.draw_idx == self.draws:\n return\n elif self.draw_idx < self.keep_n:\n self.samples = {var: vtrace[:self.draw_idx] for var, vtrace in\n self.samples.items()}\n else:\n self.samples = {var: np.concatenate([vtrace[self.draw_idx %\n self.keep_n:], vtrace[:self.draw_idx % self.keep_n]], axis=\n 0) for var, vtrace in self.samples.items()}\n if self._stats is not None:\n self._stats = [{var: trace[:self.draw_idx] for var, trace in\n stats.items()} for stats in self._stats]\n\n def __len__(self):\n if not self.samples:\n return 0\n return min(self.draw_idx, self.keep_n)\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass RingBuff(pm.backends.ndarray.NDArray):\n <docstring token>\n <assignment token>\n\n def __init__(self, name=None, model=None, vars=None, keep_n=10,\n test_point=None):\n super().__init__(name, model, vars, test_point)\n self.keep_n = keep_n\n\n def setup(self, draws, chain, sampler_vars=None) ->None:\n \"\"\"Perform chain-specific setup.\n\n Parameters\n ----------\n draws : int\n Expected number of draws\n chain : int\n Chain number\n sampler_vars : list of dicts\n Names and dtypes of the variables that are\n exported by the samplers.\n \"\"\"\n _debug(f'Setting up ring buffer backend of size {self.keep_n}.')\n super(pm.backends.ndarray.NDArray, self).setup(draws, chain,\n sampler_vars)\n self.chain = chain\n _debug(f'I am chain {chain}.')\n if self.samples:\n _debug('Concatenating old samples.')\n old_draws = len(self)\n self.draws = old_draws + draws\n self.draw_idx = old_draws\n for varname, shape in self.var_shapes.items():\n old_var_samples = self.samples[varname]\n _debug(\n f'Initializing container for {varname} of shape {shape} which has old samples.'\n )\n new_var_samples = np.empty((max(0, self.keep_n - old_draws)\n ,) + shape, self.var_dtypes[varname])\n _debug(f'Concatenating old samples to {varname}.')\n self.samples[varname] = np.concatenate((old_var_samples,\n new_var_samples), axis=0)\n _debug(f'Finished concatenating old samples for {varname}.')\n else:\n self.draws = draws\n for varname, shape in self.var_shapes.items():\n _debug(f'Initializing container for {varname} of shape {shape}'\n )\n self.samples[varname] = np.empty((self.keep_n,) + shape,\n dtype=self.var_dtypes[varname])\n if sampler_vars is None:\n return\n if self._stats is None:\n self._stats = []\n for sampler in sampler_vars:\n data = dict()\n self._stats.append(data)\n for varname, dtype in sampler.items():\n data[varname] = np.empty(draws, dtype=dtype)\n else:\n for data, vars in zip(self._stats, sampler_vars):\n if vars.keys() != data.keys():\n raise ValueError(\"Sampler vars can't change\")\n old_draws = len(self)\n for varname, dtype in vars.items():\n old = data[varname]\n new = np.empty(draws, dtype=dtype)\n data[varname] = np.concatenate([old, new])\n\n def record(self, point, sampler_stats=None) ->None:\n \"\"\"Record results of a sampling iteration.\n\n Parameters\n ----------\n point : dict\n Values mapped to variable names\n \"\"\"\n for varname, value in zip(self.varnames, self.fn(point)):\n self.samples[varname][self.draw_idx % self.keep_n] = value\n if self._stats is not None and sampler_stats is None:\n raise ValueError('Expected sampler_stats')\n if self._stats is None and sampler_stats is not None:\n raise ValueError('Unknown sampler_stats')\n if sampler_stats is not None:\n for data, vars in zip(self._stats, sampler_stats):\n for key, val in vars.items():\n data[key][self.draw_idx % self.keep_n] = val\n self.draw_idx += 1\n\n def close(self):\n if self.draw_idx == self.draws:\n return\n elif self.draw_idx < self.keep_n:\n self.samples = {var: vtrace[:self.draw_idx] for var, vtrace in\n self.samples.items()}\n else:\n self.samples = {var: np.concatenate([vtrace[self.draw_idx %\n self.keep_n:], vtrace[:self.draw_idx % self.keep_n]], axis=\n 0) for var, vtrace in self.samples.items()}\n if self._stats is not None:\n self._stats = [{var: trace[:self.draw_idx] for var, trace in\n stats.items()} for stats in self._stats]\n\n def __len__(self):\n if not self.samples:\n return 0\n return min(self.draw_idx, self.keep_n)\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass RingBuff(pm.backends.ndarray.NDArray):\n <docstring token>\n <assignment token>\n\n def __init__(self, name=None, model=None, vars=None, keep_n=10,\n test_point=None):\n super().__init__(name, model, vars, test_point)\n self.keep_n = keep_n\n <function token>\n\n def record(self, point, sampler_stats=None) ->None:\n \"\"\"Record results of a sampling iteration.\n\n Parameters\n ----------\n point : dict\n Values mapped to variable names\n \"\"\"\n for varname, value in zip(self.varnames, self.fn(point)):\n self.samples[varname][self.draw_idx % self.keep_n] = value\n if self._stats is not None and sampler_stats is None:\n raise ValueError('Expected sampler_stats')\n if self._stats is None and sampler_stats is not None:\n raise ValueError('Unknown sampler_stats')\n if sampler_stats is not None:\n for data, vars in zip(self._stats, sampler_stats):\n for key, val in vars.items():\n data[key][self.draw_idx % self.keep_n] = val\n self.draw_idx += 1\n\n def close(self):\n if self.draw_idx == self.draws:\n return\n elif self.draw_idx < self.keep_n:\n self.samples = {var: vtrace[:self.draw_idx] for var, vtrace in\n self.samples.items()}\n else:\n self.samples = {var: np.concatenate([vtrace[self.draw_idx %\n self.keep_n:], vtrace[:self.draw_idx % self.keep_n]], axis=\n 0) for var, vtrace in self.samples.items()}\n if self._stats is not None:\n self._stats = [{var: trace[:self.draw_idx] for var, trace in\n stats.items()} for stats in self._stats]\n\n def __len__(self):\n if not self.samples:\n return 0\n return min(self.draw_idx, self.keep_n)\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass RingBuff(pm.backends.ndarray.NDArray):\n <docstring token>\n <assignment token>\n\n def __init__(self, name=None, model=None, vars=None, keep_n=10,\n test_point=None):\n super().__init__(name, model, vars, test_point)\n self.keep_n = keep_n\n <function token>\n\n def record(self, point, sampler_stats=None) ->None:\n \"\"\"Record results of a sampling iteration.\n\n Parameters\n ----------\n point : dict\n Values mapped to variable names\n \"\"\"\n for varname, value in zip(self.varnames, self.fn(point)):\n self.samples[varname][self.draw_idx % self.keep_n] = value\n if self._stats is not None and sampler_stats is None:\n raise ValueError('Expected sampler_stats')\n if self._stats is None and sampler_stats is not None:\n raise ValueError('Unknown sampler_stats')\n if sampler_stats is not None:\n for data, vars in zip(self._stats, sampler_stats):\n for key, val in vars.items():\n data[key][self.draw_idx % self.keep_n] = val\n self.draw_idx += 1\n\n def close(self):\n if self.draw_idx == self.draws:\n return\n elif self.draw_idx < self.keep_n:\n self.samples = {var: vtrace[:self.draw_idx] for var, vtrace in\n self.samples.items()}\n else:\n self.samples = {var: np.concatenate([vtrace[self.draw_idx %\n self.keep_n:], vtrace[:self.draw_idx % self.keep_n]], axis=\n 0) for var, vtrace in self.samples.items()}\n if self._stats is not None:\n self._stats = [{var: trace[:self.draw_idx] for var, trace in\n stats.items()} for stats in self._stats]\n <function token>\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass RingBuff(pm.backends.ndarray.NDArray):\n <docstring token>\n <assignment token>\n\n def __init__(self, name=None, model=None, vars=None, keep_n=10,\n test_point=None):\n super().__init__(name, model, vars, test_point)\n self.keep_n = keep_n\n <function token>\n <function token>\n\n def close(self):\n if self.draw_idx == self.draws:\n return\n elif self.draw_idx < self.keep_n:\n self.samples = {var: vtrace[:self.draw_idx] for var, vtrace in\n self.samples.items()}\n else:\n self.samples = {var: np.concatenate([vtrace[self.draw_idx %\n self.keep_n:], vtrace[:self.draw_idx % self.keep_n]], axis=\n 0) for var, vtrace in self.samples.items()}\n if self._stats is not None:\n self._stats = [{var: trace[:self.draw_idx] for var, trace in\n stats.items()} for stats in self._stats]\n <function token>\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass RingBuff(pm.backends.ndarray.NDArray):\n <docstring token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n def close(self):\n if self.draw_idx == self.draws:\n return\n elif self.draw_idx < self.keep_n:\n self.samples = {var: vtrace[:self.draw_idx] for var, vtrace in\n self.samples.items()}\n else:\n self.samples = {var: np.concatenate([vtrace[self.draw_idx %\n self.keep_n:], vtrace[:self.draw_idx % self.keep_n]], axis=\n 0) for var, vtrace in self.samples.items()}\n if self._stats is not None:\n self._stats = [{var: trace[:self.draw_idx] for var, trace in\n stats.items()} for stats in self._stats]\n <function token>\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass RingBuff(pm.backends.ndarray.NDArray):\n <docstring token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<class token>\n<function token>\n<function token>\n"
] | false |
99,678 |
05695cf6202daef89d4049116a51475f225998d4
|
def group_equal(els):
if len(els) == 0:
return []
answer_list = [[els[0]]]
for i in range(1, len(els)):
el = els[i]
if el == answer_list[len(answer_list) - 1][0]:
answer_list[len(answer_list) - 1].append(el)
else:
answer_list.append([el])
return answer_list
print(group_equal([1, 1, 4, 4, 4, "hello", "hello", 4]) == [[1, 1], [4, 4, 4], ["hello", "hello"], [4]])
print(group_equal([1, 2, 3, 4]) == [[1], [2], [3], [4]])
print(group_equal([1]) == [[1]])
print(group_equal([]) == [])
|
[
"def group_equal(els):\n if len(els) == 0:\n return []\n answer_list = [[els[0]]]\n for i in range(1, len(els)):\n el = els[i]\n if el == answer_list[len(answer_list) - 1][0]:\n answer_list[len(answer_list) - 1].append(el)\n else:\n answer_list.append([el])\n return answer_list\n\n\nprint(group_equal([1, 1, 4, 4, 4, \"hello\", \"hello\", 4]) == [[1, 1], [4, 4, 4], [\"hello\", \"hello\"], [4]])\nprint(group_equal([1, 2, 3, 4]) == [[1], [2], [3], [4]])\nprint(group_equal([1]) == [[1]])\nprint(group_equal([]) == [])\n",
"def group_equal(els):\n if len(els) == 0:\n return []\n answer_list = [[els[0]]]\n for i in range(1, len(els)):\n el = els[i]\n if el == answer_list[len(answer_list) - 1][0]:\n answer_list[len(answer_list) - 1].append(el)\n else:\n answer_list.append([el])\n return answer_list\n\n\nprint(group_equal([1, 1, 4, 4, 4, 'hello', 'hello', 4]) == [[1, 1], [4, 4, \n 4], ['hello', 'hello'], [4]])\nprint(group_equal([1, 2, 3, 4]) == [[1], [2], [3], [4]])\nprint(group_equal([1]) == [[1]])\nprint(group_equal([]) == [])\n",
"def group_equal(els):\n if len(els) == 0:\n return []\n answer_list = [[els[0]]]\n for i in range(1, len(els)):\n el = els[i]\n if el == answer_list[len(answer_list) - 1][0]:\n answer_list[len(answer_list) - 1].append(el)\n else:\n answer_list.append([el])\n return answer_list\n\n\n<code token>\n",
"<function token>\n<code token>\n"
] | false |
99,679 |
4d380c5e8994a98c5027980d7fd4db0bb42765ea
|
from django.shortcuts import render
from .forms import ContractorForm
from django.http import HttpResponse
from .models import Contractor
def Contractor_List(request) :
return render(request, 'contractor/contractor_index.html', {'contractors' : Contractor.objects.all()})
def Contractor_Details(request, con_id) :
context = {'contractor' : Contractor.objects.get(id=con_id)}
return render(request, 'contractor/details.html', context)
def Contractor_Apply(request) :
if request.method == 'POST': # data sent by user
form = ContractorForm(request.POST)
if form.is_valid():
form.save() # this will save details to database
return HttpResponse('Contractor estimation added to database')
else: # display empty form
form = ContractorForm()
return render(request, 'contractor/estimate.html', {'contractor_estimate_form' : form})
|
[
"from django.shortcuts import render\nfrom .forms import ContractorForm\nfrom django.http import HttpResponse\nfrom .models import Contractor\n\ndef Contractor_List(request) :\n return render(request, 'contractor/contractor_index.html', {'contractors' : Contractor.objects.all()})\n\ndef Contractor_Details(request, con_id) :\n context = {'contractor' : Contractor.objects.get(id=con_id)}\n return render(request, 'contractor/details.html', context)\n\ndef Contractor_Apply(request) :\n if request.method == 'POST': # data sent by user\n form = ContractorForm(request.POST)\n if form.is_valid():\n form.save() # this will save details to database\n return HttpResponse('Contractor estimation added to database')\n else: # display empty form\n form = ContractorForm()\n return render(request, 'contractor/estimate.html', {'contractor_estimate_form' : form})",
"from django.shortcuts import render\nfrom .forms import ContractorForm\nfrom django.http import HttpResponse\nfrom .models import Contractor\n\n\ndef Contractor_List(request):\n return render(request, 'contractor/contractor_index.html', {\n 'contractors': Contractor.objects.all()})\n\n\ndef Contractor_Details(request, con_id):\n context = {'contractor': Contractor.objects.get(id=con_id)}\n return render(request, 'contractor/details.html', context)\n\n\ndef Contractor_Apply(request):\n if request.method == 'POST':\n form = ContractorForm(request.POST)\n if form.is_valid():\n form.save()\n return HttpResponse('Contractor estimation added to database')\n else:\n form = ContractorForm()\n return render(request, 'contractor/estimate.html', {\n 'contractor_estimate_form': form})\n",
"<import token>\n\n\ndef Contractor_List(request):\n return render(request, 'contractor/contractor_index.html', {\n 'contractors': Contractor.objects.all()})\n\n\ndef Contractor_Details(request, con_id):\n context = {'contractor': Contractor.objects.get(id=con_id)}\n return render(request, 'contractor/details.html', context)\n\n\ndef Contractor_Apply(request):\n if request.method == 'POST':\n form = ContractorForm(request.POST)\n if form.is_valid():\n form.save()\n return HttpResponse('Contractor estimation added to database')\n else:\n form = ContractorForm()\n return render(request, 'contractor/estimate.html', {\n 'contractor_estimate_form': form})\n",
"<import token>\n\n\ndef Contractor_List(request):\n return render(request, 'contractor/contractor_index.html', {\n 'contractors': Contractor.objects.all()})\n\n\ndef Contractor_Details(request, con_id):\n context = {'contractor': Contractor.objects.get(id=con_id)}\n return render(request, 'contractor/details.html', context)\n\n\n<function token>\n",
"<import token>\n\n\ndef Contractor_List(request):\n return render(request, 'contractor/contractor_index.html', {\n 'contractors': Contractor.objects.all()})\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,680 |
10f9b50b75d14d2ba76dbef89f91e5a2cff2e756
|
from math import sqrt
class Matrix:
@staticmethod
def check_number_of_arguments(number_of_arguments):
if not sqrt(number_of_arguments).is_integer():
raise Error ("Wrong size of array")
def __init__(self, *list_passsed):
self.number_of_arguments = len(list_passsed)
Matrix.check_number_of_arguments(self.number_of_arguments)
self.dimention = int(sqrt(self.number_of_arguments))
self.elements = [ [] for i in range(self.dimention)]
for i in range(self.dimention):
self.elements[i] = [j for j in list_passsed[i * self.dimention : (i+1) * self.dimention]]
def add(self, matrix_to_add):
temp = []
for i in range(self.dimention):
for j in range(self.dimention):
temp.append(self.elements[i][j] + matrix_to_add.elements[i][j])
return Matrix(*temp)
def main():
matrix_1 = Matrix(4,5,6,7)
print(matrix_1.elements)
matrix_2 = Matrix(2,2,2,1)
print(matrix_2.elements)
matrix_3 = matrix_2.add(matrix_1)
print(matrix_3.elements)
if __name__ == "__main__":
main()
|
[
"from math import sqrt\n\nclass Matrix:\n\n @staticmethod\n def check_number_of_arguments(number_of_arguments):\n if not sqrt(number_of_arguments).is_integer():\n\t raise Error (\"Wrong size of array\")\n\n def __init__(self, *list_passsed):\n self.number_of_arguments = len(list_passsed)\n Matrix.check_number_of_arguments(self.number_of_arguments)\n self.dimention = int(sqrt(self.number_of_arguments))\n self.elements = [ [] for i in range(self.dimention)] \n for i in range(self.dimention):\n self.elements[i] = [j for j in list_passsed[i * self.dimention : (i+1) * self.dimention]]\n\n def add(self, matrix_to_add):\n temp = []\n for i in range(self.dimention):\n for j in range(self.dimention):\n temp.append(self.elements[i][j] + matrix_to_add.elements[i][j])\n return Matrix(*temp)\n\n \ndef main():\n matrix_1 = Matrix(4,5,6,7)\n print(matrix_1.elements)\n matrix_2 = Matrix(2,2,2,1)\n print(matrix_2.elements)\n matrix_3 = matrix_2.add(matrix_1)\n print(matrix_3.elements)\n\nif __name__ == \"__main__\":\n main()",
"from math import sqrt\n\n\nclass Matrix:\n\n @staticmethod\n def check_number_of_arguments(number_of_arguments):\n if not sqrt(number_of_arguments).is_integer():\n raise Error('Wrong size of array')\n\n def __init__(self, *list_passsed):\n self.number_of_arguments = len(list_passsed)\n Matrix.check_number_of_arguments(self.number_of_arguments)\n self.dimention = int(sqrt(self.number_of_arguments))\n self.elements = [[] for i in range(self.dimention)]\n for i in range(self.dimention):\n self.elements[i] = [j for j in list_passsed[i * self.dimention:\n (i + 1) * self.dimention]]\n\n def add(self, matrix_to_add):\n temp = []\n for i in range(self.dimention):\n for j in range(self.dimention):\n temp.append(self.elements[i][j] + matrix_to_add.elements[i][j])\n return Matrix(*temp)\n\n\ndef main():\n matrix_1 = Matrix(4, 5, 6, 7)\n print(matrix_1.elements)\n matrix_2 = Matrix(2, 2, 2, 1)\n print(matrix_2.elements)\n matrix_3 = matrix_2.add(matrix_1)\n print(matrix_3.elements)\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n\n\nclass Matrix:\n\n @staticmethod\n def check_number_of_arguments(number_of_arguments):\n if not sqrt(number_of_arguments).is_integer():\n raise Error('Wrong size of array')\n\n def __init__(self, *list_passsed):\n self.number_of_arguments = len(list_passsed)\n Matrix.check_number_of_arguments(self.number_of_arguments)\n self.dimention = int(sqrt(self.number_of_arguments))\n self.elements = [[] for i in range(self.dimention)]\n for i in range(self.dimention):\n self.elements[i] = [j for j in list_passsed[i * self.dimention:\n (i + 1) * self.dimention]]\n\n def add(self, matrix_to_add):\n temp = []\n for i in range(self.dimention):\n for j in range(self.dimention):\n temp.append(self.elements[i][j] + matrix_to_add.elements[i][j])\n return Matrix(*temp)\n\n\ndef main():\n matrix_1 = Matrix(4, 5, 6, 7)\n print(matrix_1.elements)\n matrix_2 = Matrix(2, 2, 2, 1)\n print(matrix_2.elements)\n matrix_3 = matrix_2.add(matrix_1)\n print(matrix_3.elements)\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n\n\nclass Matrix:\n\n @staticmethod\n def check_number_of_arguments(number_of_arguments):\n if not sqrt(number_of_arguments).is_integer():\n raise Error('Wrong size of array')\n\n def __init__(self, *list_passsed):\n self.number_of_arguments = len(list_passsed)\n Matrix.check_number_of_arguments(self.number_of_arguments)\n self.dimention = int(sqrt(self.number_of_arguments))\n self.elements = [[] for i in range(self.dimention)]\n for i in range(self.dimention):\n self.elements[i] = [j for j in list_passsed[i * self.dimention:\n (i + 1) * self.dimention]]\n\n def add(self, matrix_to_add):\n temp = []\n for i in range(self.dimention):\n for j in range(self.dimention):\n temp.append(self.elements[i][j] + matrix_to_add.elements[i][j])\n return Matrix(*temp)\n\n\ndef main():\n matrix_1 = Matrix(4, 5, 6, 7)\n print(matrix_1.elements)\n matrix_2 = Matrix(2, 2, 2, 1)\n print(matrix_2.elements)\n matrix_3 = matrix_2.add(matrix_1)\n print(matrix_3.elements)\n\n\n<code token>\n",
"<import token>\n\n\nclass Matrix:\n\n @staticmethod\n def check_number_of_arguments(number_of_arguments):\n if not sqrt(number_of_arguments).is_integer():\n raise Error('Wrong size of array')\n\n def __init__(self, *list_passsed):\n self.number_of_arguments = len(list_passsed)\n Matrix.check_number_of_arguments(self.number_of_arguments)\n self.dimention = int(sqrt(self.number_of_arguments))\n self.elements = [[] for i in range(self.dimention)]\n for i in range(self.dimention):\n self.elements[i] = [j for j in list_passsed[i * self.dimention:\n (i + 1) * self.dimention]]\n\n def add(self, matrix_to_add):\n temp = []\n for i in range(self.dimention):\n for j in range(self.dimention):\n temp.append(self.elements[i][j] + matrix_to_add.elements[i][j])\n return Matrix(*temp)\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass Matrix:\n\n @staticmethod\n def check_number_of_arguments(number_of_arguments):\n if not sqrt(number_of_arguments).is_integer():\n raise Error('Wrong size of array')\n\n def __init__(self, *list_passsed):\n self.number_of_arguments = len(list_passsed)\n Matrix.check_number_of_arguments(self.number_of_arguments)\n self.dimention = int(sqrt(self.number_of_arguments))\n self.elements = [[] for i in range(self.dimention)]\n for i in range(self.dimention):\n self.elements[i] = [j for j in list_passsed[i * self.dimention:\n (i + 1) * self.dimention]]\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass Matrix:\n\n @staticmethod\n def check_number_of_arguments(number_of_arguments):\n if not sqrt(number_of_arguments).is_integer():\n raise Error('Wrong size of array')\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass Matrix:\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n<class token>\n<function token>\n<code token>\n"
] | false |
99,681 |
4c6eb5ee990bfb1cd618182f8b3a7f685eedb149
|
from future.utils import with_metaclass
from ia32_python.utils.ia32_struct import *
from ia32_python.utils.ia32_enum import *
from ia32_python.utils.ia32_bit_field import *
__doc__ = """
@brief IA32_MTRR_FIX16K(x)
IA32_MTRR_FIX16K(x).
"""
IA32_MTRR_FIX16K_BASE = 0x80000
IA32_MTRR_FIX16K_SIZE = 0x4000
IA32_MTRR_FIX16K_80000 = 0x258
IA32_MTRR_FIX16K_A0000 = 0x259
|
[
"from future.utils import with_metaclass\nfrom ia32_python.utils.ia32_struct import *\nfrom ia32_python.utils.ia32_enum import *\nfrom ia32_python.utils.ia32_bit_field import *\n\n\n__doc__ = \"\"\"\n@brief IA32_MTRR_FIX16K(x)\n\nIA32_MTRR_FIX16K(x).\n\"\"\"\n\n\nIA32_MTRR_FIX16K_BASE = 0x80000\n\n\nIA32_MTRR_FIX16K_SIZE = 0x4000\n\n\nIA32_MTRR_FIX16K_80000 = 0x258\n\n\nIA32_MTRR_FIX16K_A0000 = 0x259\n\n\n",
"from future.utils import with_metaclass\nfrom ia32_python.utils.ia32_struct import *\nfrom ia32_python.utils.ia32_enum import *\nfrom ia32_python.utils.ia32_bit_field import *\n__doc__ = \"\"\"\n@brief IA32_MTRR_FIX16K(x)\n\nIA32_MTRR_FIX16K(x).\n\"\"\"\nIA32_MTRR_FIX16K_BASE = 524288\nIA32_MTRR_FIX16K_SIZE = 16384\nIA32_MTRR_FIX16K_80000 = 600\nIA32_MTRR_FIX16K_A0000 = 601\n",
"<import token>\n__doc__ = \"\"\"\n@brief IA32_MTRR_FIX16K(x)\n\nIA32_MTRR_FIX16K(x).\n\"\"\"\nIA32_MTRR_FIX16K_BASE = 524288\nIA32_MTRR_FIX16K_SIZE = 16384\nIA32_MTRR_FIX16K_80000 = 600\nIA32_MTRR_FIX16K_A0000 = 601\n",
"<import token>\n<assignment token>\n"
] | false |
99,682 |
d99bfdff9760260e30691c193dd42655fb8eb4f8
|
import subprocess
import unittest
class TestBrickset(unittest.TestCase):
def test_help(self):
# when
abandon_output = subprocess.check_output('../bin/brickset -h'.split())
# then
self.assertTrue(abandon_output.startswith('usage: brickset'))
|
[
"import subprocess\nimport unittest\n\n\nclass TestBrickset(unittest.TestCase):\n\n def test_help(self):\n # when\n abandon_output = subprocess.check_output('../bin/brickset -h'.split())\n\n # then\n self.assertTrue(abandon_output.startswith('usage: brickset'))\n",
"import subprocess\nimport unittest\n\n\nclass TestBrickset(unittest.TestCase):\n\n def test_help(self):\n abandon_output = subprocess.check_output('../bin/brickset -h'.split())\n self.assertTrue(abandon_output.startswith('usage: brickset'))\n",
"<import token>\n\n\nclass TestBrickset(unittest.TestCase):\n\n def test_help(self):\n abandon_output = subprocess.check_output('../bin/brickset -h'.split())\n self.assertTrue(abandon_output.startswith('usage: brickset'))\n",
"<import token>\n\n\nclass TestBrickset(unittest.TestCase):\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
99,683 |
35ce0f551951eb9c31826249c4dda37da39d938c
|
while True:
l = [1, 2, 3, 4, 5, 7, 9, 10, 12]
p = int(input("Digite um número para pesquisar: "))
for c in l: #lembrar que o c representa o l em uma determinada posição
print(c)
if c == p:
print("Elemento encontrado!!!")
break
else: # é interessante notar que esse else é um else do for, se analisar tudo e nada acontercer, da o else
print("Elemento não encontrado")
|
[
"while True: \n l = [1, 2, 3, 4, 5, 7, 9, 10, 12]\n p = int(input(\"Digite um número para pesquisar: \"))\n for c in l: #lembrar que o c representa o l em uma determinada posição\n print(c)\n if c == p:\n print(\"Elemento encontrado!!!\")\n break\n else: # é interessante notar que esse else é um else do for, se analisar tudo e nada acontercer, da o else\n print(\"Elemento não encontrado\")",
"while True:\n l = [1, 2, 3, 4, 5, 7, 9, 10, 12]\n p = int(input('Digite um número para pesquisar: '))\n for c in l:\n print(c)\n if c == p:\n print('Elemento encontrado!!!')\n break\n else:\n print('Elemento não encontrado')\n",
"<code token>\n"
] | false |
99,684 |
e8db29a4a320ad44bee845672e87b8c167c0801a
|
import numpy as np
import pandas as pd
import math
import random
from pandas import DataFrame
from matplotlib import pyplot as plt
from sklearn import datasets
from sklearn.preprocessing import scale
# from TSP import Dynamic
# from TSP import Greedy
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
names = ['user_id', 'item_id', 'rating', 'timestamp']
df = pd.read_csv('u.data', sep='\t', names=names)
df.head()
# unique function check to check the unique numbers of user id and item id
n_users = df.user_id.unique().shape[0]
n_items = df.item_id.unique().shape[0]
# print (str(n_users) + ' users')
# print (str(n_items) + ' items')
ratingsBinary = np.zeros((n_users, n_items))
ratings = np.zeros((n_users, n_items))
threshold = 3
# df.intertuples will run the code directly
for row in df.itertuples():
# mark ratings that over 3 and 4 to 1; the rest of them will be set to 0.
if row[3] > threshold:
ratingsBinary[row[1]-1, row[2]-1] = 1
ratings[row[1]-1, row[2]-1] = row[3]
# print(ratings)
# print(ratingsBinary)
# save binary contents to a csv file
df = DataFrame(ratingsBinary)
df.to_csv('binary_values.csv', index = False, header = False)
# randomly select 400 items from 0, 1000; replace = False (there is no back)
biclusters_num = 400
biclusters_rows = int(biclusters_num**0.5)
biclusters_cols = int(biclusters_num**0.5)
selection_index = np.random.choice(a = 1000, size= biclusters_num, replace = False)
# print(selection_index)
# read file from matlab which generates biclustering files
filename = "biclusters.csv"
cluster_no = 0
clusters_number = 1000
f = open(filename)
# create sub matrix to record biclustering
for i in range(0, clusters_number):
# obtain the index of rows and cols separately
rows = f.readline().split()
cols = f.readline().split()
# valid whether the bimax runned correctly or not
i = np.zeros((len(rows), len(cols)))
# put the ratings back to biclustering matrix to valid the performance
row_count = 0
for j in rows:
col_count = 0
for k in cols:
i[row_count, col_count] = ratings[int(j) - 1, int(k) - 1]
col_count += 1
row_count += 1
col_count = 0
# print(i)
f.close()
# read file from matlab which generates biclustering files
filename = "biclusters.csv"
cluster_no = 0
clusters_number = 1000
f = open(filename)
# save the index of bicluster index
# data type = biclustername : [rows index][cols index] eg. 1:[[2,3,4][3,4,5]]
dict_clusters = {}
# save the detail ratings of the biclusters
# data type = arrayname : [detail ratings] eg. 1:[]
dict_clusters_ratings = {}
# create sub matrix to record biclustering
for i in range(0, clusters_number):
# obtain the index of rows and cols separately
dictname = str(i)
rows = f.readline().split()
cols = f.readline().split()
# put user and item index into the dictionary -- dict_clusters
dict_clusters[dictname] = [rows, cols]
f.close()
# print(dict_clusters['bicluster_999'])
# get the specific index of what selected
for i in selection_index:
dictname = str(i)
arrayname = str(i)
rows = dict_clusters[dictname][0]
cols = dict_clusters[dictname][1]
a = np.zeros((len(rows), len(cols)))
row_count = 0
for j in rows:
col_count = 0
for k in cols:
a[row_count, col_count] = ratings[int(j) - 1, int(k) - 1]
col_count += 1
row_count += 1
col_count = 0
# put array into the dictionary dict_clusters_ratings
dict_clusters_ratings[arrayname] = a
# print(dict_clusters_ratings['array_1'])
# show the location of each compressed point
x = []
y = []
PCA_dict_clusters = dict_clusters_ratings.copy()
for i in PCA_dict_clusters.keys():
# print(i)
PCA_dict_clusters[i] = StandardScaler().fit_transform(PCA_dict_clusters[i])
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(PCA_dict_clusters[i])
PCA_dict_clusters[i] = np.mean(np.abs(principalComponents), axis=0)
# PCA_dict_clusters[i] = np.mean(np.power(principalComponents, 2), axis = 0)
x.append(PCA_dict_clusters[i][0])
y.append(PCA_dict_clusters[i][1])
x = np.array(x)
y = np.array(y)
plt.scatter(x,y, s = 30, alpha=0.3)
plt.xlabel('Principal Component 1')
plt.ylabel('Principal Component 2')
plt.title('2 Component PCA by abs')
# plt.show()
# calculate the distance between A and B points;
# A : array, B : array
def eucliDist(A,B):
return math.sqrt(sum([(a - b)**2 for (a,b) in zip(A,B)]))
# distance table to recorder the distance between two different data points
dist_matrix = np.zeros((biclusters_num, biclusters_num))
for i in range(biclusters_num):
for j in range(biclusters_num):
# assign value to dist
dist_matrix[i,j] = eucliDist(PCA_dict_clusters[list(PCA_dict_clusters.keys())[i]]
, PCA_dict_clusters[list(PCA_dict_clusters.keys())[j]])
# print(PCA_dict_clusters.keys())
# dist_matrix
dist_matrix = dist_matrix.tolist()
# save weights for each length
path_length = []
# save the vertex that has been visited to prevent revisit again
path_vertexs = []
# real routes
path = []
def find_path(j, vertex_len):
path_vertexs.append(j)
row = dist_matrix[j]
# copy_row: delete the vertex that has been visited --> prevent to operate it in the original rows directly
copy_row = [value for value in row]
walked_vertex = []
# save the vertex that has been visited to walked vertex
for i in path_vertexs:
walked_vertex.append(copy_row[i])
# remove the vertex that has been visited in the copy_row
for vertex in walked_vertex:
copy_row.remove(vertex)
# find the shortest value that never accessed in the row
if len(path_vertexs) < vertex_len:
min_e = min(copy_row)
j = row.index(min_e)
path_length.append(min_e)
find_path(j, vertex_len)
else:
min_e = dist_matrix[j][0]
path_length.append(min_e)
path_vertexs.append(0)
return path_vertexs, path_length
def print_path(vertexs, lengths):
vertexs = [vertex + 1 for vertex in vertexs]
for i, vertex in enumerate(vertexs):
path.append(vertex)
if i == len(dist_matrix):
break
# ("the smallest total value is:", sum(lengths))
# print("path is:", path)
path_vertexs, path_length = find_path(0, len(dist_matrix))
print_path(path_vertexs, path_length)
# put the selected 400 biclusters into a new dict biclusters
# refactor name and index to make it easy to be found in the next stage
new_dict_biclusters = {}
k = 1
for i in selection_index:
new_dict_biclusters[k] = dict_clusters[str(i)]
k += 1
# print(new_dict_biclusters)
states = np.zeros((biclusters_rows, biclusters_cols))
# recorder the index of path array
k = 0
increment = range(1, int(biclusters_cols), 1)
decrement = range(int(biclusters_cols - 1), 0, -1)
states[0][0] = path[k]
for row in range(biclusters_rows):
if row % 2 == 0:
cols = increment
elif row % 2 == 1:
cols = decrement
for col in cols:
k += 1
states[row][col] = path[k]
for j in range(biclusters_rows-1, 0, -1):
k += 1
states[j][0] = path[k]
# print(states)
# new_states = np.zeros((biclusters_rows, biclusters_cols))
# for row in range(biclusters_rows):
# for col in range(biclusters_cols):
# new_states[row][col] = new_dict_biclusters[states[row][col]]
|
[
"import numpy as np\r\nimport pandas as pd\r\nimport math\r\nimport random\r\nfrom pandas import DataFrame\r\n\r\nfrom matplotlib import pyplot as plt\r\n\r\nfrom sklearn import datasets\r\nfrom sklearn.preprocessing import scale\r\n\r\n# from TSP import Dynamic\r\n# from TSP import Greedy\r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.decomposition import PCA\r\n\r\nnames = ['user_id', 'item_id', 'rating', 'timestamp']\r\ndf = pd.read_csv('u.data', sep='\\t', names=names)\r\ndf.head()\r\n\r\n# unique function check to check the unique numbers of user id and item id\r\nn_users = df.user_id.unique().shape[0]\r\nn_items = df.item_id.unique().shape[0]\r\n# print (str(n_users) + ' users')\r\n# print (str(n_items) + ' items')\r\n\r\nratingsBinary = np.zeros((n_users, n_items))\r\nratings = np.zeros((n_users, n_items))\r\n\r\nthreshold = 3\r\n# df.intertuples will run the code directly\r\nfor row in df.itertuples():\r\n # mark ratings that over 3 and 4 to 1; the rest of them will be set to 0.\r\n if row[3] > threshold:\r\n ratingsBinary[row[1]-1, row[2]-1] = 1\r\n ratings[row[1]-1, row[2]-1] = row[3]\r\n# print(ratings)\r\n# print(ratingsBinary)\r\n\r\n# save binary contents to a csv file\r\ndf = DataFrame(ratingsBinary)\r\ndf.to_csv('binary_values.csv', index = False, header = False)\r\n\r\n# randomly select 400 items from 0, 1000; replace = False (there is no back)\r\nbiclusters_num = 400\r\nbiclusters_rows = int(biclusters_num**0.5)\r\nbiclusters_cols = int(biclusters_num**0.5)\r\nselection_index = np.random.choice(a = 1000, size= biclusters_num, replace = False)\r\n# print(selection_index)\r\n\r\n# read file from matlab which generates biclustering files\r\nfilename = \"biclusters.csv\"\r\ncluster_no = 0\r\n\r\nclusters_number = 1000\r\n\r\nf = open(filename)\r\n\r\n# create sub matrix to record biclustering\r\nfor i in range(0, clusters_number):\r\n\r\n # obtain the index of rows and cols separately\r\n rows = f.readline().split()\r\n cols = f.readline().split()\r\n\r\n # valid whether the bimax runned correctly or not\r\n i = np.zeros((len(rows), len(cols)))\r\n\r\n # put the ratings back to biclustering matrix to valid the performance\r\n row_count = 0\r\n for j in rows:\r\n col_count = 0\r\n\r\n for k in cols:\r\n i[row_count, col_count] = ratings[int(j) - 1, int(k) - 1]\r\n col_count += 1\r\n row_count += 1\r\n col_count = 0\r\n # print(i)\r\nf.close()\r\n\r\n# read file from matlab which generates biclustering files\r\nfilename = \"biclusters.csv\"\r\ncluster_no = 0\r\n\r\nclusters_number = 1000\r\n\r\nf = open(filename)\r\n\r\n# save the index of bicluster index\r\n# data type = biclustername : [rows index][cols index] eg. 1:[[2,3,4][3,4,5]]\r\ndict_clusters = {}\r\n# save the detail ratings of the biclusters\r\n# data type = arrayname : [detail ratings] eg. 1:[]\r\ndict_clusters_ratings = {}\r\n\r\n# create sub matrix to record biclustering\r\nfor i in range(0, clusters_number):\r\n # obtain the index of rows and cols separately\r\n dictname = str(i)\r\n\r\n rows = f.readline().split()\r\n cols = f.readline().split()\r\n\r\n # put user and item index into the dictionary -- dict_clusters\r\n dict_clusters[dictname] = [rows, cols]\r\nf.close()\r\n# print(dict_clusters['bicluster_999'])\r\n\r\n# get the specific index of what selected\r\nfor i in selection_index:\r\n dictname = str(i)\r\n arrayname = str(i)\r\n\r\n rows = dict_clusters[dictname][0]\r\n cols = dict_clusters[dictname][1]\r\n\r\n a = np.zeros((len(rows), len(cols)))\r\n\r\n row_count = 0\r\n for j in rows:\r\n col_count = 0\r\n\r\n for k in cols:\r\n a[row_count, col_count] = ratings[int(j) - 1, int(k) - 1]\r\n col_count += 1\r\n row_count += 1\r\n col_count = 0\r\n # put array into the dictionary dict_clusters_ratings\r\n dict_clusters_ratings[arrayname] = a\r\n\r\n# print(dict_clusters_ratings['array_1'])\r\n\r\n# show the location of each compressed point\r\nx = []\r\ny = []\r\n\r\nPCA_dict_clusters = dict_clusters_ratings.copy()\r\n\r\nfor i in PCA_dict_clusters.keys():\r\n # print(i)\r\n\r\n PCA_dict_clusters[i] = StandardScaler().fit_transform(PCA_dict_clusters[i])\r\n pca = PCA(n_components=2)\r\n principalComponents = pca.fit_transform(PCA_dict_clusters[i])\r\n\r\n PCA_dict_clusters[i] = np.mean(np.abs(principalComponents), axis=0)\r\n # PCA_dict_clusters[i] = np.mean(np.power(principalComponents, 2), axis = 0)\r\n\r\n x.append(PCA_dict_clusters[i][0])\r\n y.append(PCA_dict_clusters[i][1])\r\n\r\nx = np.array(x)\r\ny = np.array(y)\r\n\r\nplt.scatter(x,y, s = 30, alpha=0.3)\r\nplt.xlabel('Principal Component 1')\r\nplt.ylabel('Principal Component 2')\r\nplt.title('2 Component PCA by abs')\r\n\r\n# plt.show()\r\n\r\n# calculate the distance between A and B points;\r\n# A : array, B : array\r\ndef eucliDist(A,B):\r\n return math.sqrt(sum([(a - b)**2 for (a,b) in zip(A,B)]))\r\n\r\n# distance table to recorder the distance between two different data points\r\ndist_matrix = np.zeros((biclusters_num, biclusters_num))\r\nfor i in range(biclusters_num):\r\n for j in range(biclusters_num):\r\n # assign value to dist\r\n dist_matrix[i,j] = eucliDist(PCA_dict_clusters[list(PCA_dict_clusters.keys())[i]]\r\n , PCA_dict_clusters[list(PCA_dict_clusters.keys())[j]])\r\n# print(PCA_dict_clusters.keys())\r\n\r\n# dist_matrix\r\ndist_matrix = dist_matrix.tolist()\r\n\r\n# save weights for each length\r\npath_length = []\r\n# save the vertex that has been visited to prevent revisit again\r\npath_vertexs = []\r\n# real routes\r\npath = []\r\n\r\n\r\ndef find_path(j, vertex_len):\r\n path_vertexs.append(j)\r\n row = dist_matrix[j]\r\n\r\n # copy_row: delete the vertex that has been visited --> prevent to operate it in the original rows directly\r\n copy_row = [value for value in row]\r\n\r\n walked_vertex = []\r\n\r\n # save the vertex that has been visited to walked vertex\r\n for i in path_vertexs:\r\n walked_vertex.append(copy_row[i])\r\n\r\n # remove the vertex that has been visited in the copy_row\r\n for vertex in walked_vertex:\r\n copy_row.remove(vertex)\r\n\r\n # find the shortest value that never accessed in the row\r\n if len(path_vertexs) < vertex_len:\r\n min_e = min(copy_row)\r\n j = row.index(min_e)\r\n path_length.append(min_e)\r\n find_path(j, vertex_len)\r\n else:\r\n min_e = dist_matrix[j][0]\r\n path_length.append(min_e)\r\n path_vertexs.append(0)\r\n return path_vertexs, path_length\r\n\r\n\r\ndef print_path(vertexs, lengths):\r\n vertexs = [vertex + 1 for vertex in vertexs]\r\n for i, vertex in enumerate(vertexs):\r\n path.append(vertex)\r\n\r\n if i == len(dist_matrix):\r\n break\r\n\r\n # (\"the smallest total value is:\", sum(lengths))\r\n # print(\"path is:\", path)\r\n\r\n\r\npath_vertexs, path_length = find_path(0, len(dist_matrix))\r\nprint_path(path_vertexs, path_length)\r\n\r\n# put the selected 400 biclusters into a new dict biclusters\r\n# refactor name and index to make it easy to be found in the next stage\r\nnew_dict_biclusters = {}\r\n\r\nk = 1\r\nfor i in selection_index:\r\n new_dict_biclusters[k] = dict_clusters[str(i)]\r\n k += 1\r\n# print(new_dict_biclusters)\r\n\r\nstates = np.zeros((biclusters_rows, biclusters_cols))\r\n\r\n# recorder the index of path array\r\nk = 0\r\n\r\nincrement = range(1, int(biclusters_cols), 1)\r\ndecrement = range(int(biclusters_cols - 1), 0, -1)\r\n\r\nstates[0][0] = path[k]\r\n\r\nfor row in range(biclusters_rows):\r\n if row % 2 == 0:\r\n cols = increment\r\n elif row % 2 == 1:\r\n cols = decrement\r\n\r\n for col in cols:\r\n k += 1\r\n states[row][col] = path[k]\r\n\r\nfor j in range(biclusters_rows-1, 0, -1):\r\n k += 1\r\n states[j][0] = path[k]\r\n\r\n# print(states)\r\n# new_states = np.zeros((biclusters_rows, biclusters_cols))\r\n# for row in range(biclusters_rows):\r\n# for col in range(biclusters_cols):\r\n# new_states[row][col] = new_dict_biclusters[states[row][col]]\r\n\r\n\r\n",
"import numpy as np\nimport pandas as pd\nimport math\nimport random\nfrom pandas import DataFrame\nfrom matplotlib import pyplot as plt\nfrom sklearn import datasets\nfrom sklearn.preprocessing import scale\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\nnames = ['user_id', 'item_id', 'rating', 'timestamp']\ndf = pd.read_csv('u.data', sep='\\t', names=names)\ndf.head()\nn_users = df.user_id.unique().shape[0]\nn_items = df.item_id.unique().shape[0]\nratingsBinary = np.zeros((n_users, n_items))\nratings = np.zeros((n_users, n_items))\nthreshold = 3\nfor row in df.itertuples():\n if row[3] > threshold:\n ratingsBinary[row[1] - 1, row[2] - 1] = 1\n ratings[row[1] - 1, row[2] - 1] = row[3]\ndf = DataFrame(ratingsBinary)\ndf.to_csv('binary_values.csv', index=False, header=False)\nbiclusters_num = 400\nbiclusters_rows = int(biclusters_num ** 0.5)\nbiclusters_cols = int(biclusters_num ** 0.5)\nselection_index = np.random.choice(a=1000, size=biclusters_num, replace=False)\nfilename = 'biclusters.csv'\ncluster_no = 0\nclusters_number = 1000\nf = open(filename)\nfor i in range(0, clusters_number):\n rows = f.readline().split()\n cols = f.readline().split()\n i = np.zeros((len(rows), len(cols)))\n row_count = 0\n for j in rows:\n col_count = 0\n for k in cols:\n i[row_count, col_count] = ratings[int(j) - 1, int(k) - 1]\n col_count += 1\n row_count += 1\n col_count = 0\nf.close()\nfilename = 'biclusters.csv'\ncluster_no = 0\nclusters_number = 1000\nf = open(filename)\ndict_clusters = {}\ndict_clusters_ratings = {}\nfor i in range(0, clusters_number):\n dictname = str(i)\n rows = f.readline().split()\n cols = f.readline().split()\n dict_clusters[dictname] = [rows, cols]\nf.close()\nfor i in selection_index:\n dictname = str(i)\n arrayname = str(i)\n rows = dict_clusters[dictname][0]\n cols = dict_clusters[dictname][1]\n a = np.zeros((len(rows), len(cols)))\n row_count = 0\n for j in rows:\n col_count = 0\n for k in cols:\n a[row_count, col_count] = ratings[int(j) - 1, int(k) - 1]\n col_count += 1\n row_count += 1\n col_count = 0\n dict_clusters_ratings[arrayname] = a\nx = []\ny = []\nPCA_dict_clusters = dict_clusters_ratings.copy()\nfor i in PCA_dict_clusters.keys():\n PCA_dict_clusters[i] = StandardScaler().fit_transform(PCA_dict_clusters[i])\n pca = PCA(n_components=2)\n principalComponents = pca.fit_transform(PCA_dict_clusters[i])\n PCA_dict_clusters[i] = np.mean(np.abs(principalComponents), axis=0)\n x.append(PCA_dict_clusters[i][0])\n y.append(PCA_dict_clusters[i][1])\nx = np.array(x)\ny = np.array(y)\nplt.scatter(x, y, s=30, alpha=0.3)\nplt.xlabel('Principal Component 1')\nplt.ylabel('Principal Component 2')\nplt.title('2 Component PCA by abs')\n\n\ndef eucliDist(A, B):\n return math.sqrt(sum([((a - b) ** 2) for a, b in zip(A, B)]))\n\n\ndist_matrix = np.zeros((biclusters_num, biclusters_num))\nfor i in range(biclusters_num):\n for j in range(biclusters_num):\n dist_matrix[i, j] = eucliDist(PCA_dict_clusters[list(\n PCA_dict_clusters.keys())[i]], PCA_dict_clusters[list(\n PCA_dict_clusters.keys())[j]])\ndist_matrix = dist_matrix.tolist()\npath_length = []\npath_vertexs = []\npath = []\n\n\ndef find_path(j, vertex_len):\n path_vertexs.append(j)\n row = dist_matrix[j]\n copy_row = [value for value in row]\n walked_vertex = []\n for i in path_vertexs:\n walked_vertex.append(copy_row[i])\n for vertex in walked_vertex:\n copy_row.remove(vertex)\n if len(path_vertexs) < vertex_len:\n min_e = min(copy_row)\n j = row.index(min_e)\n path_length.append(min_e)\n find_path(j, vertex_len)\n else:\n min_e = dist_matrix[j][0]\n path_length.append(min_e)\n path_vertexs.append(0)\n return path_vertexs, path_length\n\n\ndef print_path(vertexs, lengths):\n vertexs = [(vertex + 1) for vertex in vertexs]\n for i, vertex in enumerate(vertexs):\n path.append(vertex)\n if i == len(dist_matrix):\n break\n\n\npath_vertexs, path_length = find_path(0, len(dist_matrix))\nprint_path(path_vertexs, path_length)\nnew_dict_biclusters = {}\nk = 1\nfor i in selection_index:\n new_dict_biclusters[k] = dict_clusters[str(i)]\n k += 1\nstates = np.zeros((biclusters_rows, biclusters_cols))\nk = 0\nincrement = range(1, int(biclusters_cols), 1)\ndecrement = range(int(biclusters_cols - 1), 0, -1)\nstates[0][0] = path[k]\nfor row in range(biclusters_rows):\n if row % 2 == 0:\n cols = increment\n elif row % 2 == 1:\n cols = decrement\n for col in cols:\n k += 1\n states[row][col] = path[k]\nfor j in range(biclusters_rows - 1, 0, -1):\n k += 1\n states[j][0] = path[k]\n",
"<import token>\nnames = ['user_id', 'item_id', 'rating', 'timestamp']\ndf = pd.read_csv('u.data', sep='\\t', names=names)\ndf.head()\nn_users = df.user_id.unique().shape[0]\nn_items = df.item_id.unique().shape[0]\nratingsBinary = np.zeros((n_users, n_items))\nratings = np.zeros((n_users, n_items))\nthreshold = 3\nfor row in df.itertuples():\n if row[3] > threshold:\n ratingsBinary[row[1] - 1, row[2] - 1] = 1\n ratings[row[1] - 1, row[2] - 1] = row[3]\ndf = DataFrame(ratingsBinary)\ndf.to_csv('binary_values.csv', index=False, header=False)\nbiclusters_num = 400\nbiclusters_rows = int(biclusters_num ** 0.5)\nbiclusters_cols = int(biclusters_num ** 0.5)\nselection_index = np.random.choice(a=1000, size=biclusters_num, replace=False)\nfilename = 'biclusters.csv'\ncluster_no = 0\nclusters_number = 1000\nf = open(filename)\nfor i in range(0, clusters_number):\n rows = f.readline().split()\n cols = f.readline().split()\n i = np.zeros((len(rows), len(cols)))\n row_count = 0\n for j in rows:\n col_count = 0\n for k in cols:\n i[row_count, col_count] = ratings[int(j) - 1, int(k) - 1]\n col_count += 1\n row_count += 1\n col_count = 0\nf.close()\nfilename = 'biclusters.csv'\ncluster_no = 0\nclusters_number = 1000\nf = open(filename)\ndict_clusters = {}\ndict_clusters_ratings = {}\nfor i in range(0, clusters_number):\n dictname = str(i)\n rows = f.readline().split()\n cols = f.readline().split()\n dict_clusters[dictname] = [rows, cols]\nf.close()\nfor i in selection_index:\n dictname = str(i)\n arrayname = str(i)\n rows = dict_clusters[dictname][0]\n cols = dict_clusters[dictname][1]\n a = np.zeros((len(rows), len(cols)))\n row_count = 0\n for j in rows:\n col_count = 0\n for k in cols:\n a[row_count, col_count] = ratings[int(j) - 1, int(k) - 1]\n col_count += 1\n row_count += 1\n col_count = 0\n dict_clusters_ratings[arrayname] = a\nx = []\ny = []\nPCA_dict_clusters = dict_clusters_ratings.copy()\nfor i in PCA_dict_clusters.keys():\n PCA_dict_clusters[i] = StandardScaler().fit_transform(PCA_dict_clusters[i])\n pca = PCA(n_components=2)\n principalComponents = pca.fit_transform(PCA_dict_clusters[i])\n PCA_dict_clusters[i] = np.mean(np.abs(principalComponents), axis=0)\n x.append(PCA_dict_clusters[i][0])\n y.append(PCA_dict_clusters[i][1])\nx = np.array(x)\ny = np.array(y)\nplt.scatter(x, y, s=30, alpha=0.3)\nplt.xlabel('Principal Component 1')\nplt.ylabel('Principal Component 2')\nplt.title('2 Component PCA by abs')\n\n\ndef eucliDist(A, B):\n return math.sqrt(sum([((a - b) ** 2) for a, b in zip(A, B)]))\n\n\ndist_matrix = np.zeros((biclusters_num, biclusters_num))\nfor i in range(biclusters_num):\n for j in range(biclusters_num):\n dist_matrix[i, j] = eucliDist(PCA_dict_clusters[list(\n PCA_dict_clusters.keys())[i]], PCA_dict_clusters[list(\n PCA_dict_clusters.keys())[j]])\ndist_matrix = dist_matrix.tolist()\npath_length = []\npath_vertexs = []\npath = []\n\n\ndef find_path(j, vertex_len):\n path_vertexs.append(j)\n row = dist_matrix[j]\n copy_row = [value for value in row]\n walked_vertex = []\n for i in path_vertexs:\n walked_vertex.append(copy_row[i])\n for vertex in walked_vertex:\n copy_row.remove(vertex)\n if len(path_vertexs) < vertex_len:\n min_e = min(copy_row)\n j = row.index(min_e)\n path_length.append(min_e)\n find_path(j, vertex_len)\n else:\n min_e = dist_matrix[j][0]\n path_length.append(min_e)\n path_vertexs.append(0)\n return path_vertexs, path_length\n\n\ndef print_path(vertexs, lengths):\n vertexs = [(vertex + 1) for vertex in vertexs]\n for i, vertex in enumerate(vertexs):\n path.append(vertex)\n if i == len(dist_matrix):\n break\n\n\npath_vertexs, path_length = find_path(0, len(dist_matrix))\nprint_path(path_vertexs, path_length)\nnew_dict_biclusters = {}\nk = 1\nfor i in selection_index:\n new_dict_biclusters[k] = dict_clusters[str(i)]\n k += 1\nstates = np.zeros((biclusters_rows, biclusters_cols))\nk = 0\nincrement = range(1, int(biclusters_cols), 1)\ndecrement = range(int(biclusters_cols - 1), 0, -1)\nstates[0][0] = path[k]\nfor row in range(biclusters_rows):\n if row % 2 == 0:\n cols = increment\n elif row % 2 == 1:\n cols = decrement\n for col in cols:\n k += 1\n states[row][col] = path[k]\nfor j in range(biclusters_rows - 1, 0, -1):\n k += 1\n states[j][0] = path[k]\n",
"<import token>\n<assignment token>\ndf.head()\n<assignment token>\nfor row in df.itertuples():\n if row[3] > threshold:\n ratingsBinary[row[1] - 1, row[2] - 1] = 1\n ratings[row[1] - 1, row[2] - 1] = row[3]\n<assignment token>\ndf.to_csv('binary_values.csv', index=False, header=False)\n<assignment token>\nfor i in range(0, clusters_number):\n rows = f.readline().split()\n cols = f.readline().split()\n i = np.zeros((len(rows), len(cols)))\n row_count = 0\n for j in rows:\n col_count = 0\n for k in cols:\n i[row_count, col_count] = ratings[int(j) - 1, int(k) - 1]\n col_count += 1\n row_count += 1\n col_count = 0\nf.close()\n<assignment token>\nfor i in range(0, clusters_number):\n dictname = str(i)\n rows = f.readline().split()\n cols = f.readline().split()\n dict_clusters[dictname] = [rows, cols]\nf.close()\nfor i in selection_index:\n dictname = str(i)\n arrayname = str(i)\n rows = dict_clusters[dictname][0]\n cols = dict_clusters[dictname][1]\n a = np.zeros((len(rows), len(cols)))\n row_count = 0\n for j in rows:\n col_count = 0\n for k in cols:\n a[row_count, col_count] = ratings[int(j) - 1, int(k) - 1]\n col_count += 1\n row_count += 1\n col_count = 0\n dict_clusters_ratings[arrayname] = a\n<assignment token>\nfor i in PCA_dict_clusters.keys():\n PCA_dict_clusters[i] = StandardScaler().fit_transform(PCA_dict_clusters[i])\n pca = PCA(n_components=2)\n principalComponents = pca.fit_transform(PCA_dict_clusters[i])\n PCA_dict_clusters[i] = np.mean(np.abs(principalComponents), axis=0)\n x.append(PCA_dict_clusters[i][0])\n y.append(PCA_dict_clusters[i][1])\n<assignment token>\nplt.scatter(x, y, s=30, alpha=0.3)\nplt.xlabel('Principal Component 1')\nplt.ylabel('Principal Component 2')\nplt.title('2 Component PCA by abs')\n\n\ndef eucliDist(A, B):\n return math.sqrt(sum([((a - b) ** 2) for a, b in zip(A, B)]))\n\n\n<assignment token>\nfor i in range(biclusters_num):\n for j in range(biclusters_num):\n dist_matrix[i, j] = eucliDist(PCA_dict_clusters[list(\n PCA_dict_clusters.keys())[i]], PCA_dict_clusters[list(\n PCA_dict_clusters.keys())[j]])\n<assignment token>\n\n\ndef find_path(j, vertex_len):\n path_vertexs.append(j)\n row = dist_matrix[j]\n copy_row = [value for value in row]\n walked_vertex = []\n for i in path_vertexs:\n walked_vertex.append(copy_row[i])\n for vertex in walked_vertex:\n copy_row.remove(vertex)\n if len(path_vertexs) < vertex_len:\n min_e = min(copy_row)\n j = row.index(min_e)\n path_length.append(min_e)\n find_path(j, vertex_len)\n else:\n min_e = dist_matrix[j][0]\n path_length.append(min_e)\n path_vertexs.append(0)\n return path_vertexs, path_length\n\n\ndef print_path(vertexs, lengths):\n vertexs = [(vertex + 1) for vertex in vertexs]\n for i, vertex in enumerate(vertexs):\n path.append(vertex)\n if i == len(dist_matrix):\n break\n\n\n<assignment token>\nprint_path(path_vertexs, path_length)\n<assignment token>\nfor i in selection_index:\n new_dict_biclusters[k] = dict_clusters[str(i)]\n k += 1\n<assignment token>\nfor row in range(biclusters_rows):\n if row % 2 == 0:\n cols = increment\n elif row % 2 == 1:\n cols = decrement\n for col in cols:\n k += 1\n states[row][col] = path[k]\nfor j in range(biclusters_rows - 1, 0, -1):\n k += 1\n states[j][0] = path[k]\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef eucliDist(A, B):\n return math.sqrt(sum([((a - b) ** 2) for a, b in zip(A, B)]))\n\n\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef find_path(j, vertex_len):\n path_vertexs.append(j)\n row = dist_matrix[j]\n copy_row = [value for value in row]\n walked_vertex = []\n for i in path_vertexs:\n walked_vertex.append(copy_row[i])\n for vertex in walked_vertex:\n copy_row.remove(vertex)\n if len(path_vertexs) < vertex_len:\n min_e = min(copy_row)\n j = row.index(min_e)\n path_length.append(min_e)\n find_path(j, vertex_len)\n else:\n min_e = dist_matrix[j][0]\n path_length.append(min_e)\n path_vertexs.append(0)\n return path_vertexs, path_length\n\n\ndef print_path(vertexs, lengths):\n vertexs = [(vertex + 1) for vertex in vertexs]\n for i, vertex in enumerate(vertexs):\n path.append(vertex)\n if i == len(dist_matrix):\n break\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef eucliDist(A, B):\n return math.sqrt(sum([((a - b) ** 2) for a, b in zip(A, B)]))\n\n\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef print_path(vertexs, lengths):\n vertexs = [(vertex + 1) for vertex in vertexs]\n for i, vertex in enumerate(vertexs):\n path.append(vertex)\n if i == len(dist_matrix):\n break\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef print_path(vertexs, lengths):\n vertexs = [(vertex + 1) for vertex in vertexs]\n for i, vertex in enumerate(vertexs):\n path.append(vertex)\n if i == len(dist_matrix):\n break\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,685 |
06f5463f4cb609d3671b2a63a95b1ee421d7d7f5
|
from pizzapy import Customer, StoreLocator, Order, ConsoleInput
TAXRATE = .0825
def searchMenu(menu):
print("You are now searching the menu...")
item = input("Type an item to look for: ").strip().lower()
if len(item) > 1:
item = item[0].upper() + item[1:]
print(f"Results for: {item}\n")
menu.search(Name=item)
print()
else:
print("invalid, exiting search...")
def addToOrder(order):
print("Type the codes of the items you want to order...")
print("Press ENTER to stop ordering.")
while True:
item = input("Code: ").upper()
try:
order.add_item(item)
except:
if item == "":
break
print("Invalid Code...")
def show_stores(customer, k):
print("\nFinding Closest Stores...")
print("\n- CLOSEST STORES -")
k_local_dominos = StoreLocator.find_k_closest_stores_to_customer(customer, k)
for i, store in enumerate(k_local_dominos):
print(str(i+1) + ".")
print(store)
print()
customer = Customer("Noah", "Foley", "[email protected]", "9039181598", "40 Bay Street, Toronto, ON, M5J2X2")
# customer = ConsoleInput.get_new_customer()
my_local_dominos = StoreLocator.find_closest_store_to_customer(customer)
print("\nClosest Store: ")
print(my_local_dominos)
ans = input("Would you like to order from this store (y/n)? ")
if ans.lower() not in ["yes", "y"]:
print("Goodbye!")
quit()
print("\nMENU\n")
menu = my_local_dominos.get_menu()
order = Order.begin_customer_order(customer, my_local_dominos, "ca")
while True:
searchMenu(menu)
addToOrder(order)
answer = input("Would you like to add more items (y/n)? ")
if answer.lower() not in ["yes", "y"]:
break
subtotal = 0
taxAmount = 0
total = 0
print("\nYour order is... ")
for item in order.data["Products"]:
price = item["Price"]
subtotal += float(item["Price"])
taxAmount = subtotal * TAXRATE
total = subtotal + taxAmount
# TODO: remove items from order using order.remove_item('ITEM CODE HERE')
print("\nYour total before tax is: $" + str(subtotal) + "\n")
print("\nYour tax amount is: $" + str(taxAmount) + "\n")
print("\nYour total after tax is: $" + str(total) + "\n")
payment = input("\nWill you being paying with cash or a credit card? (cash, credit card)")
if payment.lower() in ["card", "credit card"]:
card = ConsoleInput.get_credit_card()
else:
card = False
ans = input("Would you like to place this order (y/n)? ")
if ans.lower() in ["y", "yes"]:
order.place(card)
my_local_dominos.place_order(order, card)
print("Order Placed!")
else:
print("Goodbye!")
|
[
"from pizzapy import Customer, StoreLocator, Order, ConsoleInput\n\nTAXRATE = .0825\n\ndef searchMenu(menu):\n print(\"You are now searching the menu...\")\n item = input(\"Type an item to look for: \").strip().lower()\n\n if len(item) > 1:\n item = item[0].upper() + item[1:]\n print(f\"Results for: {item}\\n\")\n menu.search(Name=item)\n print()\n else:\n print(\"invalid, exiting search...\")\n\ndef addToOrder(order):\n print(\"Type the codes of the items you want to order...\")\n print(\"Press ENTER to stop ordering.\")\n while True:\n item = input(\"Code: \").upper()\n try:\n order.add_item(item)\n except:\n if item == \"\":\n break\n print(\"Invalid Code...\")\n\ndef show_stores(customer, k):\n print(\"\\nFinding Closest Stores...\")\n print(\"\\n- CLOSEST STORES -\")\n k_local_dominos = StoreLocator.find_k_closest_stores_to_customer(customer, k)\n for i, store in enumerate(k_local_dominos):\n print(str(i+1) + \".\")\n print(store)\n print()\n\ncustomer = Customer(\"Noah\", \"Foley\", \"[email protected]\", \"9039181598\", \"40 Bay Street, Toronto, ON, M5J2X2\")\n# customer = ConsoleInput.get_new_customer()\nmy_local_dominos = StoreLocator.find_closest_store_to_customer(customer)\nprint(\"\\nClosest Store: \")\nprint(my_local_dominos)\n\nans = input(\"Would you like to order from this store (y/n)? \")\nif ans.lower() not in [\"yes\", \"y\"]:\n print(\"Goodbye!\")\n quit()\n\nprint(\"\\nMENU\\n\")\nmenu = my_local_dominos.get_menu()\norder = Order.begin_customer_order(customer, my_local_dominos, \"ca\")\n\nwhile True:\n searchMenu(menu)\n addToOrder(order)\n answer = input(\"Would you like to add more items (y/n)? \")\n if answer.lower() not in [\"yes\", \"y\"]:\n break\n\nsubtotal = 0\ntaxAmount = 0\ntotal = 0\nprint(\"\\nYour order is... \")\nfor item in order.data[\"Products\"]:\n price = item[\"Price\"]\n subtotal += float(item[\"Price\"])\n taxAmount = subtotal * TAXRATE\n total = subtotal + taxAmount\n # TODO: remove items from order using order.remove_item('ITEM CODE HERE')\n\nprint(\"\\nYour total before tax is: $\" + str(subtotal) + \"\\n\")\nprint(\"\\nYour tax amount is: $\" + str(taxAmount) + \"\\n\")\nprint(\"\\nYour total after tax is: $\" + str(total) + \"\\n\")\n\npayment = input(\"\\nWill you being paying with cash or a credit card? (cash, credit card)\")\nif payment.lower() in [\"card\", \"credit card\"]:\n card = ConsoleInput.get_credit_card()\nelse:\n card = False\n\nans = input(\"Would you like to place this order (y/n)? \")\nif ans.lower() in [\"y\", \"yes\"]:\n order.place(card)\n my_local_dominos.place_order(order, card)\n print(\"Order Placed!\")\nelse:\n print(\"Goodbye!\")\n",
"from pizzapy import Customer, StoreLocator, Order, ConsoleInput\nTAXRATE = 0.0825\n\n\ndef searchMenu(menu):\n print('You are now searching the menu...')\n item = input('Type an item to look for: ').strip().lower()\n if len(item) > 1:\n item = item[0].upper() + item[1:]\n print(f'Results for: {item}\\n')\n menu.search(Name=item)\n print()\n else:\n print('invalid, exiting search...')\n\n\ndef addToOrder(order):\n print('Type the codes of the items you want to order...')\n print('Press ENTER to stop ordering.')\n while True:\n item = input('Code: ').upper()\n try:\n order.add_item(item)\n except:\n if item == '':\n break\n print('Invalid Code...')\n\n\ndef show_stores(customer, k):\n print('\\nFinding Closest Stores...')\n print('\\n- CLOSEST STORES -')\n k_local_dominos = StoreLocator.find_k_closest_stores_to_customer(customer,\n k)\n for i, store in enumerate(k_local_dominos):\n print(str(i + 1) + '.')\n print(store)\n print()\n\n\ncustomer = Customer('Noah', 'Foley', '[email protected]', '9039181598',\n '40 Bay Street, Toronto, ON, M5J2X2')\nmy_local_dominos = StoreLocator.find_closest_store_to_customer(customer)\nprint('\\nClosest Store: ')\nprint(my_local_dominos)\nans = input('Would you like to order from this store (y/n)? ')\nif ans.lower() not in ['yes', 'y']:\n print('Goodbye!')\n quit()\nprint('\\nMENU\\n')\nmenu = my_local_dominos.get_menu()\norder = Order.begin_customer_order(customer, my_local_dominos, 'ca')\nwhile True:\n searchMenu(menu)\n addToOrder(order)\n answer = input('Would you like to add more items (y/n)? ')\n if answer.lower() not in ['yes', 'y']:\n break\nsubtotal = 0\ntaxAmount = 0\ntotal = 0\nprint(\"\"\"\nYour order is... \"\"\")\nfor item in order.data['Products']:\n price = item['Price']\n subtotal += float(item['Price'])\n taxAmount = subtotal * TAXRATE\n total = subtotal + taxAmount\nprint(\"\"\"\nYour total before tax is: $\"\"\" + str(subtotal) + '\\n')\nprint(\"\"\"\nYour tax amount is: $\"\"\" + str(taxAmount) + '\\n')\nprint(\"\"\"\nYour total after tax is: $\"\"\" + str(total) + '\\n')\npayment = input(\n \"\"\"\nWill you being paying with cash or a credit card? (cash, credit card)\"\"\"\n )\nif payment.lower() in ['card', 'credit card']:\n card = ConsoleInput.get_credit_card()\nelse:\n card = False\nans = input('Would you like to place this order (y/n)? ')\nif ans.lower() in ['y', 'yes']:\n order.place(card)\n my_local_dominos.place_order(order, card)\n print('Order Placed!')\nelse:\n print('Goodbye!')\n",
"<import token>\nTAXRATE = 0.0825\n\n\ndef searchMenu(menu):\n print('You are now searching the menu...')\n item = input('Type an item to look for: ').strip().lower()\n if len(item) > 1:\n item = item[0].upper() + item[1:]\n print(f'Results for: {item}\\n')\n menu.search(Name=item)\n print()\n else:\n print('invalid, exiting search...')\n\n\ndef addToOrder(order):\n print('Type the codes of the items you want to order...')\n print('Press ENTER to stop ordering.')\n while True:\n item = input('Code: ').upper()\n try:\n order.add_item(item)\n except:\n if item == '':\n break\n print('Invalid Code...')\n\n\ndef show_stores(customer, k):\n print('\\nFinding Closest Stores...')\n print('\\n- CLOSEST STORES -')\n k_local_dominos = StoreLocator.find_k_closest_stores_to_customer(customer,\n k)\n for i, store in enumerate(k_local_dominos):\n print(str(i + 1) + '.')\n print(store)\n print()\n\n\ncustomer = Customer('Noah', 'Foley', '[email protected]', '9039181598',\n '40 Bay Street, Toronto, ON, M5J2X2')\nmy_local_dominos = StoreLocator.find_closest_store_to_customer(customer)\nprint('\\nClosest Store: ')\nprint(my_local_dominos)\nans = input('Would you like to order from this store (y/n)? ')\nif ans.lower() not in ['yes', 'y']:\n print('Goodbye!')\n quit()\nprint('\\nMENU\\n')\nmenu = my_local_dominos.get_menu()\norder = Order.begin_customer_order(customer, my_local_dominos, 'ca')\nwhile True:\n searchMenu(menu)\n addToOrder(order)\n answer = input('Would you like to add more items (y/n)? ')\n if answer.lower() not in ['yes', 'y']:\n break\nsubtotal = 0\ntaxAmount = 0\ntotal = 0\nprint(\"\"\"\nYour order is... \"\"\")\nfor item in order.data['Products']:\n price = item['Price']\n subtotal += float(item['Price'])\n taxAmount = subtotal * TAXRATE\n total = subtotal + taxAmount\nprint(\"\"\"\nYour total before tax is: $\"\"\" + str(subtotal) + '\\n')\nprint(\"\"\"\nYour tax amount is: $\"\"\" + str(taxAmount) + '\\n')\nprint(\"\"\"\nYour total after tax is: $\"\"\" + str(total) + '\\n')\npayment = input(\n \"\"\"\nWill you being paying with cash or a credit card? (cash, credit card)\"\"\"\n )\nif payment.lower() in ['card', 'credit card']:\n card = ConsoleInput.get_credit_card()\nelse:\n card = False\nans = input('Would you like to place this order (y/n)? ')\nif ans.lower() in ['y', 'yes']:\n order.place(card)\n my_local_dominos.place_order(order, card)\n print('Order Placed!')\nelse:\n print('Goodbye!')\n",
"<import token>\n<assignment token>\n\n\ndef searchMenu(menu):\n print('You are now searching the menu...')\n item = input('Type an item to look for: ').strip().lower()\n if len(item) > 1:\n item = item[0].upper() + item[1:]\n print(f'Results for: {item}\\n')\n menu.search(Name=item)\n print()\n else:\n print('invalid, exiting search...')\n\n\ndef addToOrder(order):\n print('Type the codes of the items you want to order...')\n print('Press ENTER to stop ordering.')\n while True:\n item = input('Code: ').upper()\n try:\n order.add_item(item)\n except:\n if item == '':\n break\n print('Invalid Code...')\n\n\ndef show_stores(customer, k):\n print('\\nFinding Closest Stores...')\n print('\\n- CLOSEST STORES -')\n k_local_dominos = StoreLocator.find_k_closest_stores_to_customer(customer,\n k)\n for i, store in enumerate(k_local_dominos):\n print(str(i + 1) + '.')\n print(store)\n print()\n\n\n<assignment token>\nprint('\\nClosest Store: ')\nprint(my_local_dominos)\n<assignment token>\nif ans.lower() not in ['yes', 'y']:\n print('Goodbye!')\n quit()\nprint('\\nMENU\\n')\n<assignment token>\nwhile True:\n searchMenu(menu)\n addToOrder(order)\n answer = input('Would you like to add more items (y/n)? ')\n if answer.lower() not in ['yes', 'y']:\n break\n<assignment token>\nprint(\"\"\"\nYour order is... \"\"\")\nfor item in order.data['Products']:\n price = item['Price']\n subtotal += float(item['Price'])\n taxAmount = subtotal * TAXRATE\n total = subtotal + taxAmount\nprint(\"\"\"\nYour total before tax is: $\"\"\" + str(subtotal) + '\\n')\nprint(\"\"\"\nYour tax amount is: $\"\"\" + str(taxAmount) + '\\n')\nprint(\"\"\"\nYour total after tax is: $\"\"\" + str(total) + '\\n')\n<assignment token>\nif payment.lower() in ['card', 'credit card']:\n card = ConsoleInput.get_credit_card()\nelse:\n card = False\n<assignment token>\nif ans.lower() in ['y', 'yes']:\n order.place(card)\n my_local_dominos.place_order(order, card)\n print('Order Placed!')\nelse:\n print('Goodbye!')\n",
"<import token>\n<assignment token>\n\n\ndef searchMenu(menu):\n print('You are now searching the menu...')\n item = input('Type an item to look for: ').strip().lower()\n if len(item) > 1:\n item = item[0].upper() + item[1:]\n print(f'Results for: {item}\\n')\n menu.search(Name=item)\n print()\n else:\n print('invalid, exiting search...')\n\n\ndef addToOrder(order):\n print('Type the codes of the items you want to order...')\n print('Press ENTER to stop ordering.')\n while True:\n item = input('Code: ').upper()\n try:\n order.add_item(item)\n except:\n if item == '':\n break\n print('Invalid Code...')\n\n\ndef show_stores(customer, k):\n print('\\nFinding Closest Stores...')\n print('\\n- CLOSEST STORES -')\n k_local_dominos = StoreLocator.find_k_closest_stores_to_customer(customer,\n k)\n for i, store in enumerate(k_local_dominos):\n print(str(i + 1) + '.')\n print(store)\n print()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\ndef addToOrder(order):\n print('Type the codes of the items you want to order...')\n print('Press ENTER to stop ordering.')\n while True:\n item = input('Code: ').upper()\n try:\n order.add_item(item)\n except:\n if item == '':\n break\n print('Invalid Code...')\n\n\ndef show_stores(customer, k):\n print('\\nFinding Closest Stores...')\n print('\\n- CLOSEST STORES -')\n k_local_dominos = StoreLocator.find_k_closest_stores_to_customer(customer,\n k)\n for i, store in enumerate(k_local_dominos):\n print(str(i + 1) + '.')\n print(store)\n print()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef show_stores(customer, k):\n print('\\nFinding Closest Stores...')\n print('\\n- CLOSEST STORES -')\n k_local_dominos = StoreLocator.find_k_closest_stores_to_customer(customer,\n k)\n for i, store in enumerate(k_local_dominos):\n print(str(i + 1) + '.')\n print(store)\n print()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,686 |
138a1ccea56ede9568c920299d177599b5883f8f
|
"""Tests the confusion matrix functionality against known values"""
import unittest
from sparktkregtests.lib import sparktk_test
class ConfusionMatrix(sparktk_test.SparkTKTestCase):
def test_confusion_matrix(self):
"""Tests the confusion matrix functionality"""
perf = self.get_file("classification_metrics.csv")
schema = [("value", int), ("predicted", int)]
# [true_positive, false_negative, false_positive, true_negative]
actual_result = [64, 15, 23, 96]
frame = self.context.frame.import_csv(perf, schema=schema)
cm = frame.binary_classification_metrics('value', 'predicted', 1, 1)
conf_matrix = cm.confusion_matrix.values
cumulative_matrix_list = [conf_matrix[0][0],
conf_matrix[0][1],
conf_matrix[1][0],
conf_matrix[1][1]]
self.assertEqual(actual_result, cumulative_matrix_list)
if __name__ == '__main__':
unittest.main()
|
[
"\"\"\"Tests the confusion matrix functionality against known values\"\"\"\nimport unittest\n\nfrom sparktkregtests.lib import sparktk_test\n\n\nclass ConfusionMatrix(sparktk_test.SparkTKTestCase):\n\n def test_confusion_matrix(self):\n \"\"\"Tests the confusion matrix functionality\"\"\"\n perf = self.get_file(\"classification_metrics.csv\")\n schema = [(\"value\", int), (\"predicted\", int)]\n # [true_positive, false_negative, false_positive, true_negative]\n actual_result = [64, 15, 23, 96]\n\n frame = self.context.frame.import_csv(perf, schema=schema)\n\n cm = frame.binary_classification_metrics('value', 'predicted', 1, 1)\n\n conf_matrix = cm.confusion_matrix.values\n cumulative_matrix_list = [conf_matrix[0][0],\n conf_matrix[0][1],\n conf_matrix[1][0],\n conf_matrix[1][1]]\n self.assertEqual(actual_result, cumulative_matrix_list)\n\nif __name__ == '__main__':\n unittest.main()\n",
"<docstring token>\nimport unittest\nfrom sparktkregtests.lib import sparktk_test\n\n\nclass ConfusionMatrix(sparktk_test.SparkTKTestCase):\n\n def test_confusion_matrix(self):\n \"\"\"Tests the confusion matrix functionality\"\"\"\n perf = self.get_file('classification_metrics.csv')\n schema = [('value', int), ('predicted', int)]\n actual_result = [64, 15, 23, 96]\n frame = self.context.frame.import_csv(perf, schema=schema)\n cm = frame.binary_classification_metrics('value', 'predicted', 1, 1)\n conf_matrix = cm.confusion_matrix.values\n cumulative_matrix_list = [conf_matrix[0][0], conf_matrix[0][1],\n conf_matrix[1][0], conf_matrix[1][1]]\n self.assertEqual(actual_result, cumulative_matrix_list)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<docstring token>\n<import token>\n\n\nclass ConfusionMatrix(sparktk_test.SparkTKTestCase):\n\n def test_confusion_matrix(self):\n \"\"\"Tests the confusion matrix functionality\"\"\"\n perf = self.get_file('classification_metrics.csv')\n schema = [('value', int), ('predicted', int)]\n actual_result = [64, 15, 23, 96]\n frame = self.context.frame.import_csv(perf, schema=schema)\n cm = frame.binary_classification_metrics('value', 'predicted', 1, 1)\n conf_matrix = cm.confusion_matrix.values\n cumulative_matrix_list = [conf_matrix[0][0], conf_matrix[0][1],\n conf_matrix[1][0], conf_matrix[1][1]]\n self.assertEqual(actual_result, cumulative_matrix_list)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<docstring token>\n<import token>\n\n\nclass ConfusionMatrix(sparktk_test.SparkTKTestCase):\n\n def test_confusion_matrix(self):\n \"\"\"Tests the confusion matrix functionality\"\"\"\n perf = self.get_file('classification_metrics.csv')\n schema = [('value', int), ('predicted', int)]\n actual_result = [64, 15, 23, 96]\n frame = self.context.frame.import_csv(perf, schema=schema)\n cm = frame.binary_classification_metrics('value', 'predicted', 1, 1)\n conf_matrix = cm.confusion_matrix.values\n cumulative_matrix_list = [conf_matrix[0][0], conf_matrix[0][1],\n conf_matrix[1][0], conf_matrix[1][1]]\n self.assertEqual(actual_result, cumulative_matrix_list)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass ConfusionMatrix(sparktk_test.SparkTKTestCase):\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<class token>\n<code token>\n"
] | false |
99,687 |
0de2ad6e99e121f284ee25533f532872e11c1919
|
import numpy as np
#Gobal array which represents decision attribute values drawn for X_test data
X_test_decision=[]
'''
This function is used to predict the decision atrribute value by using X_test data.
Parameters used are:
X_test =Testing data which contains attribute values excluding decision attribute values
Y_test =Testing data which contains decision attribute values
attributes=It is a array of attribute values
remaining_attributes=It is a array which contains attributes other than decision attribute.
index_of_decision_attribute=Represents index of decision attribute in a dataset.
node=It is a dictionary which contains decision tree
'''
def predict(X_test,Y_test,attributes,remaining_attributes,index_of_decision_attribute,node):
#Stack is used to draw decisions from tree
stack = []
rules=[]
#rulesSet function gives set of rules drawn from decision tree.
def rulesSet(node, stack):
#creating rule list
rule=[]
#If node contains label,this means that this is the end of one decision rule.
if 'label' in node:
#Append label to node.
stack.append(node['label'])
#Performing transformation for calculations.
rule=np.array(stack)
rule.tolist()
rules.append(rule)
#Drawing decision atrribute value by using decision rule.
#drawDecision(X_test,Y_test,attributes,remaining_attributes,index_of_decision_attribute,rule)
stack.pop()
elif 'attribute' in node:
#Append attribute to stack
stack.append(node['attribute'])
#Iterating through attribute subnodes(attribute values)
for subnode in node['nodes']:
#Append subnode to stack
stack.append(subnode)
#Iterating through subnodes
rulesSet(node['nodes'][subnode], stack)
stack.pop()
stack.pop()
rulesSet(node,stack)
rules=np.array(rules)
decision=drawDecision(X_test,Y_test,attributes,remaining_attributes,index_of_decision_attribute,rules)
Accuracy=accuracy(Y_test,decision)
return Accuracy
'''
Drawing decision atrribute value by using decision rule
Parameters used:
X_test =Testing data which contains attribute values excluding decision attribute values
Y_test =Testing data which contains decision attribute values
attributes=It is a array of attribute values
remaining_attributes=It is a array which contains attributes other than decision attribute.
index_of_decision_attribute=Represents index of decision attribute in a dataset.
node=It is a dictionary which contains decision tree
'''
def drawDecision(X_test,Y_test,attributes,remaining_attributes,index_of_decision_attribute,rules):
#print()
k=0
rule_info={}
for rule in rules:
#Calculating number of attributes in a rule array
number_of_attributes=int((len(rule)-1)/2)
#Finiding attributes in a rule
attribute=[]
for i in range(number_of_attributes):
attribute.append(rule[i*2])
#Finiding poisition of decision in a rule array
decision_position=len(rule)-1
index=[]
#Gives index of attributes that are considered for decision in a decision rule
for i in range(len(attribute)):
count=0
for j in range(len(remaining_attributes)):
if remaining_attributes[j]==attribute[i]:
break
count+=1
index.append(count)
#Drawing attribute values in a rule
#In every rule array,odd index shows attribute values
values=[]
for i in range(len(index)):
values.append(rule[(i*2)+1])
#if j not in rule_info.keys():
rule_info[k] = {
'number_of_attributes':number_of_attributes,#list(),
'attribute':attribute,#list(),
'decision_position':decision_position,#list(),
'index':index,#list(),
'values':values#list()
}
#rule_info[k]['number_of_attributes'].append(number_of_attributes)
#rule_info[k]['attribute'].append(attribute)
#rule_info[k]['decision_position'].append(decision_position)
#rule_info[k]['index'].append(index)
#rule_info[k]['values'].append(values)
k+=1
#Represents decision for each X_test
decision={}
#Predicting values for test data
for i in range(len(X_test)):
#Checking for all rules
for j in range(len(rule_info)):
#Used for decision making
flag=0
#Represents attribute values which are required for matching decision rule
X_test_attribute_value=[]
#Stroing X_test attribute values
for k in rule_info[j]['index']:
X_test_attribute_value.append(X_test[i,k])
#Comparing values of X_test attributes and decision rule attribute values
z=0
for v in rule_info[j]['values']:
if (X_test_attribute_value[z]==v):
z+=1
flag=0
else:
#If one attibute value is new and not matching,considering which are matching.
if(z>=(len(rule_info[j]['values'])-1)):
flag=0
break
else:
flag=1
break
if(flag==0):
#Considering rule for which Attribute values are matched
rule=rules[j]
decision_position=rule_info[j]['decision_position']
#Storing results
decision[i] = {
'decision':rule[decision_position]
}
return decision
'''
Calculating accuracy
Parameters used:
Y_test=Testing data which contains decision attribute values
X_test_decision=Gobal array which represents decision attribute values drawn for X_test data
'''
def accuracy(Y_test,decision):
correct = 0
#print("Y_test"+repr(len(Y_test))+"Decision"+repr(len(decision)))
for i in range(len(Y_test)):
if Y_test[i] == decision[i]['decision']:
#Finding correctness
correct += 1
return (correct/float(len(Y_test))) * 100.0
|
[
"import numpy as np\n#Gobal array which represents decision attribute values drawn for X_test data \nX_test_decision=[]\n\n'''\nThis function is used to predict the decision atrribute value by using X_test data. \nParameters used are:\n X_test =Testing data which contains attribute values excluding decision attribute values\n Y_test =Testing data which contains decision attribute values\n attributes=It is a array of attribute values\n remaining_attributes=It is a array which contains attributes other than decision attribute.\n index_of_decision_attribute=Represents index of decision attribute in a dataset.\n node=It is a dictionary which contains decision tree\n'''\ndef predict(X_test,Y_test,attributes,remaining_attributes,index_of_decision_attribute,node):\n #Stack is used to draw decisions from tree\n stack = []\n rules=[]\n #rulesSet function gives set of rules drawn from decision tree.\n def rulesSet(node, stack):\n #creating rule list\n rule=[]\n\n #If node contains label,this means that this is the end of one decision rule.\n if 'label' in node:\n #Append label to node.\n stack.append(node['label'])\n #Performing transformation for calculations.\n rule=np.array(stack)\n rule.tolist()\n rules.append(rule)\n #Drawing decision atrribute value by using decision rule. \n #drawDecision(X_test,Y_test,attributes,remaining_attributes,index_of_decision_attribute,rule)\n stack.pop()\n elif 'attribute' in node:\n #Append attribute to stack\n stack.append(node['attribute'])\n #Iterating through attribute subnodes(attribute values)\n for subnode in node['nodes']:\n #Append subnode to stack\n stack.append(subnode)\n #Iterating through subnodes\n rulesSet(node['nodes'][subnode], stack)\n stack.pop()\n stack.pop()\n\n rulesSet(node,stack)\n rules=np.array(rules)\n decision=drawDecision(X_test,Y_test,attributes,remaining_attributes,index_of_decision_attribute,rules)\n Accuracy=accuracy(Y_test,decision)\n\n return Accuracy\n'''\n Drawing decision atrribute value by using decision rule\n Parameters used:\n X_test =Testing data which contains attribute values excluding decision attribute values\n Y_test =Testing data which contains decision attribute values\n attributes=It is a array of attribute values\n remaining_attributes=It is a array which contains attributes other than decision attribute.\n index_of_decision_attribute=Represents index of decision attribute in a dataset.\n node=It is a dictionary which contains decision tree\n\n'''\ndef drawDecision(X_test,Y_test,attributes,remaining_attributes,index_of_decision_attribute,rules):\n #print()\n k=0\n rule_info={}\n for rule in rules:\n\n #Calculating number of attributes in a rule array\n number_of_attributes=int((len(rule)-1)/2)\n \n #Finiding attributes in a rule\n attribute=[]\n for i in range(number_of_attributes):\n attribute.append(rule[i*2])\n\n #Finiding poisition of decision in a rule array\n decision_position=len(rule)-1\n index=[]\n \n #Gives index of attributes that are considered for decision in a decision rule\n for i in range(len(attribute)):\n count=0\n for j in range(len(remaining_attributes)):\n if remaining_attributes[j]==attribute[i]:\n break\n count+=1\n index.append(count)\n\n #Drawing attribute values in a rule\n #In every rule array,odd index shows attribute values\n values=[]\n for i in range(len(index)):\n values.append(rule[(i*2)+1]) \n \n #if j not in rule_info.keys():\n rule_info[k] = {\n 'number_of_attributes':number_of_attributes,#list(),\n 'attribute':attribute,#list(),\n 'decision_position':decision_position,#list(),\n 'index':index,#list(),\n 'values':values#list()\n }\n #rule_info[k]['number_of_attributes'].append(number_of_attributes)\n #rule_info[k]['attribute'].append(attribute)\n #rule_info[k]['decision_position'].append(decision_position)\n #rule_info[k]['index'].append(index)\n #rule_info[k]['values'].append(values)\n k+=1\n\n #Represents decision for each X_test\n decision={}\n \n #Predicting values for test data\n for i in range(len(X_test)):\n #Checking for all rules\n for j in range(len(rule_info)):\n #Used for decision making\n flag=0\n #Represents attribute values which are required for matching decision rule \n X_test_attribute_value=[] \n #Stroing X_test attribute values \n for k in rule_info[j]['index']:\n X_test_attribute_value.append(X_test[i,k])\n \n #Comparing values of X_test attributes and decision rule attribute values \n z=0\n for v in rule_info[j]['values']:\n if (X_test_attribute_value[z]==v): \n z+=1\n flag=0\n else:\n #If one attibute value is new and not matching,considering which are matching.\n if(z>=(len(rule_info[j]['values'])-1)):\n flag=0\n break\n else:\n flag=1\n break\n if(flag==0):\n #Considering rule for which Attribute values are matched\n rule=rules[j]\n decision_position=rule_info[j]['decision_position']\n #Storing results \n decision[i] = {\n 'decision':rule[decision_position]\n } \n return decision\n\n'''\n Calculating accuracy\n Parameters used:\n Y_test=Testing data which contains decision attribute values\n X_test_decision=Gobal array which represents decision attribute values drawn for X_test data \n'''\ndef accuracy(Y_test,decision):\n correct = 0\n #print(\"Y_test\"+repr(len(Y_test))+\"Decision\"+repr(len(decision)))\n for i in range(len(Y_test)):\n if Y_test[i] == decision[i]['decision']:\n #Finding correctness \n correct += 1\n return (correct/float(len(Y_test))) * 100.0 \n",
"import numpy as np\nX_test_decision = []\n<docstring token>\n\n\ndef predict(X_test, Y_test, attributes, remaining_attributes,\n index_of_decision_attribute, node):\n stack = []\n rules = []\n\n def rulesSet(node, stack):\n rule = []\n if 'label' in node:\n stack.append(node['label'])\n rule = np.array(stack)\n rule.tolist()\n rules.append(rule)\n stack.pop()\n elif 'attribute' in node:\n stack.append(node['attribute'])\n for subnode in node['nodes']:\n stack.append(subnode)\n rulesSet(node['nodes'][subnode], stack)\n stack.pop()\n stack.pop()\n rulesSet(node, stack)\n rules = np.array(rules)\n decision = drawDecision(X_test, Y_test, attributes,\n remaining_attributes, index_of_decision_attribute, rules)\n Accuracy = accuracy(Y_test, decision)\n return Accuracy\n\n\n<docstring token>\n\n\ndef drawDecision(X_test, Y_test, attributes, remaining_attributes,\n index_of_decision_attribute, rules):\n k = 0\n rule_info = {}\n for rule in rules:\n number_of_attributes = int((len(rule) - 1) / 2)\n attribute = []\n for i in range(number_of_attributes):\n attribute.append(rule[i * 2])\n decision_position = len(rule) - 1\n index = []\n for i in range(len(attribute)):\n count = 0\n for j in range(len(remaining_attributes)):\n if remaining_attributes[j] == attribute[i]:\n break\n count += 1\n index.append(count)\n values = []\n for i in range(len(index)):\n values.append(rule[i * 2 + 1])\n rule_info[k] = {'number_of_attributes': number_of_attributes,\n 'attribute': attribute, 'decision_position': decision_position,\n 'index': index, 'values': values}\n k += 1\n decision = {}\n for i in range(len(X_test)):\n for j in range(len(rule_info)):\n flag = 0\n X_test_attribute_value = []\n for k in rule_info[j]['index']:\n X_test_attribute_value.append(X_test[i, k])\n z = 0\n for v in rule_info[j]['values']:\n if X_test_attribute_value[z] == v:\n z += 1\n flag = 0\n elif z >= len(rule_info[j]['values']) - 1:\n flag = 0\n break\n else:\n flag = 1\n break\n if flag == 0:\n rule = rules[j]\n decision_position = rule_info[j]['decision_position']\n decision[i] = {'decision': rule[decision_position]}\n return decision\n\n\n<docstring token>\n\n\ndef accuracy(Y_test, decision):\n correct = 0\n for i in range(len(Y_test)):\n if Y_test[i] == decision[i]['decision']:\n correct += 1\n return correct / float(len(Y_test)) * 100.0\n",
"<import token>\nX_test_decision = []\n<docstring token>\n\n\ndef predict(X_test, Y_test, attributes, remaining_attributes,\n index_of_decision_attribute, node):\n stack = []\n rules = []\n\n def rulesSet(node, stack):\n rule = []\n if 'label' in node:\n stack.append(node['label'])\n rule = np.array(stack)\n rule.tolist()\n rules.append(rule)\n stack.pop()\n elif 'attribute' in node:\n stack.append(node['attribute'])\n for subnode in node['nodes']:\n stack.append(subnode)\n rulesSet(node['nodes'][subnode], stack)\n stack.pop()\n stack.pop()\n rulesSet(node, stack)\n rules = np.array(rules)\n decision = drawDecision(X_test, Y_test, attributes,\n remaining_attributes, index_of_decision_attribute, rules)\n Accuracy = accuracy(Y_test, decision)\n return Accuracy\n\n\n<docstring token>\n\n\ndef drawDecision(X_test, Y_test, attributes, remaining_attributes,\n index_of_decision_attribute, rules):\n k = 0\n rule_info = {}\n for rule in rules:\n number_of_attributes = int((len(rule) - 1) / 2)\n attribute = []\n for i in range(number_of_attributes):\n attribute.append(rule[i * 2])\n decision_position = len(rule) - 1\n index = []\n for i in range(len(attribute)):\n count = 0\n for j in range(len(remaining_attributes)):\n if remaining_attributes[j] == attribute[i]:\n break\n count += 1\n index.append(count)\n values = []\n for i in range(len(index)):\n values.append(rule[i * 2 + 1])\n rule_info[k] = {'number_of_attributes': number_of_attributes,\n 'attribute': attribute, 'decision_position': decision_position,\n 'index': index, 'values': values}\n k += 1\n decision = {}\n for i in range(len(X_test)):\n for j in range(len(rule_info)):\n flag = 0\n X_test_attribute_value = []\n for k in rule_info[j]['index']:\n X_test_attribute_value.append(X_test[i, k])\n z = 0\n for v in rule_info[j]['values']:\n if X_test_attribute_value[z] == v:\n z += 1\n flag = 0\n elif z >= len(rule_info[j]['values']) - 1:\n flag = 0\n break\n else:\n flag = 1\n break\n if flag == 0:\n rule = rules[j]\n decision_position = rule_info[j]['decision_position']\n decision[i] = {'decision': rule[decision_position]}\n return decision\n\n\n<docstring token>\n\n\ndef accuracy(Y_test, decision):\n correct = 0\n for i in range(len(Y_test)):\n if Y_test[i] == decision[i]['decision']:\n correct += 1\n return correct / float(len(Y_test)) * 100.0\n",
"<import token>\n<assignment token>\n<docstring token>\n\n\ndef predict(X_test, Y_test, attributes, remaining_attributes,\n index_of_decision_attribute, node):\n stack = []\n rules = []\n\n def rulesSet(node, stack):\n rule = []\n if 'label' in node:\n stack.append(node['label'])\n rule = np.array(stack)\n rule.tolist()\n rules.append(rule)\n stack.pop()\n elif 'attribute' in node:\n stack.append(node['attribute'])\n for subnode in node['nodes']:\n stack.append(subnode)\n rulesSet(node['nodes'][subnode], stack)\n stack.pop()\n stack.pop()\n rulesSet(node, stack)\n rules = np.array(rules)\n decision = drawDecision(X_test, Y_test, attributes,\n remaining_attributes, index_of_decision_attribute, rules)\n Accuracy = accuracy(Y_test, decision)\n return Accuracy\n\n\n<docstring token>\n\n\ndef drawDecision(X_test, Y_test, attributes, remaining_attributes,\n index_of_decision_attribute, rules):\n k = 0\n rule_info = {}\n for rule in rules:\n number_of_attributes = int((len(rule) - 1) / 2)\n attribute = []\n for i in range(number_of_attributes):\n attribute.append(rule[i * 2])\n decision_position = len(rule) - 1\n index = []\n for i in range(len(attribute)):\n count = 0\n for j in range(len(remaining_attributes)):\n if remaining_attributes[j] == attribute[i]:\n break\n count += 1\n index.append(count)\n values = []\n for i in range(len(index)):\n values.append(rule[i * 2 + 1])\n rule_info[k] = {'number_of_attributes': number_of_attributes,\n 'attribute': attribute, 'decision_position': decision_position,\n 'index': index, 'values': values}\n k += 1\n decision = {}\n for i in range(len(X_test)):\n for j in range(len(rule_info)):\n flag = 0\n X_test_attribute_value = []\n for k in rule_info[j]['index']:\n X_test_attribute_value.append(X_test[i, k])\n z = 0\n for v in rule_info[j]['values']:\n if X_test_attribute_value[z] == v:\n z += 1\n flag = 0\n elif z >= len(rule_info[j]['values']) - 1:\n flag = 0\n break\n else:\n flag = 1\n break\n if flag == 0:\n rule = rules[j]\n decision_position = rule_info[j]['decision_position']\n decision[i] = {'decision': rule[decision_position]}\n return decision\n\n\n<docstring token>\n\n\ndef accuracy(Y_test, decision):\n correct = 0\n for i in range(len(Y_test)):\n if Y_test[i] == decision[i]['decision']:\n correct += 1\n return correct / float(len(Y_test)) * 100.0\n",
"<import token>\n<assignment token>\n<docstring token>\n<function token>\n<docstring token>\n\n\ndef drawDecision(X_test, Y_test, attributes, remaining_attributes,\n index_of_decision_attribute, rules):\n k = 0\n rule_info = {}\n for rule in rules:\n number_of_attributes = int((len(rule) - 1) / 2)\n attribute = []\n for i in range(number_of_attributes):\n attribute.append(rule[i * 2])\n decision_position = len(rule) - 1\n index = []\n for i in range(len(attribute)):\n count = 0\n for j in range(len(remaining_attributes)):\n if remaining_attributes[j] == attribute[i]:\n break\n count += 1\n index.append(count)\n values = []\n for i in range(len(index)):\n values.append(rule[i * 2 + 1])\n rule_info[k] = {'number_of_attributes': number_of_attributes,\n 'attribute': attribute, 'decision_position': decision_position,\n 'index': index, 'values': values}\n k += 1\n decision = {}\n for i in range(len(X_test)):\n for j in range(len(rule_info)):\n flag = 0\n X_test_attribute_value = []\n for k in rule_info[j]['index']:\n X_test_attribute_value.append(X_test[i, k])\n z = 0\n for v in rule_info[j]['values']:\n if X_test_attribute_value[z] == v:\n z += 1\n flag = 0\n elif z >= len(rule_info[j]['values']) - 1:\n flag = 0\n break\n else:\n flag = 1\n break\n if flag == 0:\n rule = rules[j]\n decision_position = rule_info[j]['decision_position']\n decision[i] = {'decision': rule[decision_position]}\n return decision\n\n\n<docstring token>\n\n\ndef accuracy(Y_test, decision):\n correct = 0\n for i in range(len(Y_test)):\n if Y_test[i] == decision[i]['decision']:\n correct += 1\n return correct / float(len(Y_test)) * 100.0\n",
"<import token>\n<assignment token>\n<docstring token>\n<function token>\n<docstring token>\n<function token>\n<docstring token>\n\n\ndef accuracy(Y_test, decision):\n correct = 0\n for i in range(len(Y_test)):\n if Y_test[i] == decision[i]['decision']:\n correct += 1\n return correct / float(len(Y_test)) * 100.0\n",
"<import token>\n<assignment token>\n<docstring token>\n<function token>\n<docstring token>\n<function token>\n<docstring token>\n<function token>\n"
] | false |
99,688 |
7eb3cbac5c96165c8dc643d11288591ecee10a99
|
sx, sy, gx, gy = map(int,input().split())
dx = gx - sx
dy = gy - sy
ans = 'U'*dy + 'R'*dx
ans += 'D'*dy + 'L'*dx
ans += 'L' + 'U'*(dy+1) + 'R'*(dx+1) + 'D'
ans += 'R' + 'D'*(dy+1) + 'L'*(dx+1) + 'U'
print(ans)
|
[
"sx, sy, gx, gy = map(int,input().split())\ndx = gx - sx\ndy = gy - sy\n\nans = 'U'*dy + 'R'*dx\nans += 'D'*dy + 'L'*dx\nans += 'L' + 'U'*(dy+1) + 'R'*(dx+1) + 'D'\nans += 'R' + 'D'*(dy+1) + 'L'*(dx+1) + 'U'\n\nprint(ans)",
"sx, sy, gx, gy = map(int, input().split())\ndx = gx - sx\ndy = gy - sy\nans = 'U' * dy + 'R' * dx\nans += 'D' * dy + 'L' * dx\nans += 'L' + 'U' * (dy + 1) + 'R' * (dx + 1) + 'D'\nans += 'R' + 'D' * (dy + 1) + 'L' * (dx + 1) + 'U'\nprint(ans)\n",
"<assignment token>\nans += 'D' * dy + 'L' * dx\nans += 'L' + 'U' * (dy + 1) + 'R' * (dx + 1) + 'D'\nans += 'R' + 'D' * (dy + 1) + 'L' * (dx + 1) + 'U'\nprint(ans)\n",
"<assignment token>\n<code token>\n"
] | false |
99,689 |
e9fd319f1356a991c73fd7c47f0d24bd6c139b43
|
a = input()
dizi = a.split()
result = []
def karsilastir(kelime1, kelime2):
kelime1dizi = [0] * 300
kelime2dizi = [0] * 300
for i in kelime1:
kelime1dizi[ord(i) - ord('a')] += 1
for i in kelime2:
kelime2dizi[ord(i) - ord('a')] += 1
result = True
for i in range (0, 25):
if(kelime1dizi[i] != kelime2dizi[i]):
result = False
return result
for i in range(1, len(dizi)):
for j in range(0, i):
if(karsilastir(dizi[i], dizi[j])):
if(dizi[i] != dizi[j]):
result.append((dizi[i], dizi[j]))
print(result)
|
[
"a = input()\n\ndizi = a.split()\n\nresult = []\n\ndef karsilastir(kelime1, kelime2):\n kelime1dizi = [0] * 300\n kelime2dizi = [0] * 300\n for i in kelime1:\n kelime1dizi[ord(i) - ord('a')] += 1\n for i in kelime2:\n kelime2dizi[ord(i) - ord('a')] += 1\n result = True\n for i in range (0, 25):\n if(kelime1dizi[i] != kelime2dizi[i]):\n result = False\n return result\n\nfor i in range(1, len(dizi)):\n for j in range(0, i):\n if(karsilastir(dizi[i], dizi[j])):\n if(dizi[i] != dizi[j]):\n result.append((dizi[i], dizi[j]))\n\nprint(result)\n",
"a = input()\ndizi = a.split()\nresult = []\n\n\ndef karsilastir(kelime1, kelime2):\n kelime1dizi = [0] * 300\n kelime2dizi = [0] * 300\n for i in kelime1:\n kelime1dizi[ord(i) - ord('a')] += 1\n for i in kelime2:\n kelime2dizi[ord(i) - ord('a')] += 1\n result = True\n for i in range(0, 25):\n if kelime1dizi[i] != kelime2dizi[i]:\n result = False\n return result\n\n\nfor i in range(1, len(dizi)):\n for j in range(0, i):\n if karsilastir(dizi[i], dizi[j]):\n if dizi[i] != dizi[j]:\n result.append((dizi[i], dizi[j]))\nprint(result)\n",
"<assignment token>\n\n\ndef karsilastir(kelime1, kelime2):\n kelime1dizi = [0] * 300\n kelime2dizi = [0] * 300\n for i in kelime1:\n kelime1dizi[ord(i) - ord('a')] += 1\n for i in kelime2:\n kelime2dizi[ord(i) - ord('a')] += 1\n result = True\n for i in range(0, 25):\n if kelime1dizi[i] != kelime2dizi[i]:\n result = False\n return result\n\n\nfor i in range(1, len(dizi)):\n for j in range(0, i):\n if karsilastir(dizi[i], dizi[j]):\n if dizi[i] != dizi[j]:\n result.append((dizi[i], dizi[j]))\nprint(result)\n",
"<assignment token>\n\n\ndef karsilastir(kelime1, kelime2):\n kelime1dizi = [0] * 300\n kelime2dizi = [0] * 300\n for i in kelime1:\n kelime1dizi[ord(i) - ord('a')] += 1\n for i in kelime2:\n kelime2dizi[ord(i) - ord('a')] += 1\n result = True\n for i in range(0, 25):\n if kelime1dizi[i] != kelime2dizi[i]:\n result = False\n return result\n\n\n<code token>\n",
"<assignment token>\n<function token>\n<code token>\n"
] | false |
99,690 |
6f57544600ee7c529f804deba801a2c8c0712e8d
|
from selenium import webdriver
#指定ChromeDriver路徑(driver下載: https://sites.google.com/a/chromium.org/chromedriver/downloads)
driver = webdriver.Chrome("C:\selenium_driver_chrome\chromedriver.exe")
#開啟&取得指定頁面
driver.get("https://m.sportslottery.com.tw/zh/home")
#用Xpath找尋指定的屬性元素 然後輸入文字
#driver.find_element_by_xpath("//input[@id='lst-ib']").send_keys("天氣")
#用Xpath找尋指定的屬性元素 然後點擊
#driver.find_element_by_xpath("//input[@jsaction='sf.chk']").submit()
|
[
"from selenium import webdriver\n\n#指定ChromeDriver路徑(driver下載: https://sites.google.com/a/chromium.org/chromedriver/downloads)\ndriver = webdriver.Chrome(\"C:\\selenium_driver_chrome\\chromedriver.exe\")\n\n#開啟&取得指定頁面\ndriver.get(\"https://m.sportslottery.com.tw/zh/home\")\n\n#用Xpath找尋指定的屬性元素 然後輸入文字\n#driver.find_element_by_xpath(\"//input[@id='lst-ib']\").send_keys(\"天氣\")\n\n#用Xpath找尋指定的屬性元素 然後點擊\n#driver.find_element_by_xpath(\"//input[@jsaction='sf.chk']\").submit()\n",
"from selenium import webdriver\ndriver = webdriver.Chrome('C:\\\\selenium_driver_chrome\\\\chromedriver.exe')\ndriver.get('https://m.sportslottery.com.tw/zh/home')\n",
"<import token>\ndriver = webdriver.Chrome('C:\\\\selenium_driver_chrome\\\\chromedriver.exe')\ndriver.get('https://m.sportslottery.com.tw/zh/home')\n",
"<import token>\n<assignment token>\ndriver.get('https://m.sportslottery.com.tw/zh/home')\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
99,691 |
1cfce7c24b27d810106dd0cfcd6a68dd0aefb0ca
|
class Solution(object):
def checkVaild(self, i, j, visited, index):
if i < 0 or i >= self.m or j < 0 or j >= self.n or visited[i][j] == 1:
return False
if self.board[i][j] != self.word[index]:
return False
visited[i][j] = 1
if index == len(self.word) - 1:
return True
## check four directions
res = self.checkVaild(i - 1, j, visited, index + 1) or self.checkVaild(i + 1, j, visited, index + 1) \
or self.checkVaild(i, j - 1, visited, index + 1) or self.checkVaild(i, j + 1, visited, index + 1)
if res == True:
return True
else:
visited[i][j] = 0
return False
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
self.board = board
self.word = word
self.m = len(board)
self.n = len(board[0])
self.k = len(word)
if self.m <= 0 or len(word) <= 0 or self.k > self.m * self.n:
return False
wordList = {}
tarWordList = {}
visited = [[0 for i in range(self.n)] for j in range(self.m)]
for i in range(self.m):
for j in range(self.n):
if wordList.get(board[i][j], False) == False:
wordList[board[i][j]] = []
wordList[board[i][j]].append(i * self.n + j)
for ind in range(self.k):
if tarWordList.get(word[ind], False) == False:
tarWordList[word[ind]] = 1
else:
tarWordList[word[ind]] += 1
if wordList.get(word[0], False) == False:
return False
for char, cnt in tarWordList.items():
if wordList.get(char, False) == False or len(wordList[char]) < cnt:
return False
for pos in wordList[word[0]]:
startI = pos / self.n
startJ = pos % self.n
if self.checkVaild(startI, startJ, visited, 0) == True:
return True
return False
|
[
"class Solution(object):\n def checkVaild(self, i, j, visited, index):\n if i < 0 or i >= self.m or j < 0 or j >= self.n or visited[i][j] == 1:\n return False\n if self.board[i][j] != self.word[index]:\n return False\n visited[i][j] = 1\n if index == len(self.word) - 1:\n return True\n ## check four directions\n res = self.checkVaild(i - 1, j, visited, index + 1) or self.checkVaild(i + 1, j, visited, index + 1) \\\n or self.checkVaild(i, j - 1, visited, index + 1) or self.checkVaild(i, j + 1, visited, index + 1)\n if res == True:\n return True\n else:\n visited[i][j] = 0\n return False\n \n def exist(self, board, word):\n \"\"\"\n :type board: List[List[str]]\n :type word: str\n :rtype: bool\n \"\"\"\n self.board = board\n self.word = word\n self.m = len(board)\n self.n = len(board[0])\n self.k = len(word)\n if self.m <= 0 or len(word) <= 0 or self.k > self.m * self.n:\n return False\n wordList = {}\n tarWordList = {}\n visited = [[0 for i in range(self.n)] for j in range(self.m)]\n for i in range(self.m):\n for j in range(self.n):\n if wordList.get(board[i][j], False) == False:\n wordList[board[i][j]] = []\n wordList[board[i][j]].append(i * self.n + j)\n for ind in range(self.k):\n if tarWordList.get(word[ind], False) == False:\n tarWordList[word[ind]] = 1\n else:\n tarWordList[word[ind]] += 1\n \n if wordList.get(word[0], False) == False:\n return False\n for char, cnt in tarWordList.items():\n if wordList.get(char, False) == False or len(wordList[char]) < cnt:\n return False\n for pos in wordList[word[0]]:\n startI = pos / self.n\n startJ = pos % self.n\n if self.checkVaild(startI, startJ, visited, 0) == True:\n return True\n return False",
"class Solution(object):\n\n def checkVaild(self, i, j, visited, index):\n if i < 0 or i >= self.m or j < 0 or j >= self.n or visited[i][j] == 1:\n return False\n if self.board[i][j] != self.word[index]:\n return False\n visited[i][j] = 1\n if index == len(self.word) - 1:\n return True\n res = self.checkVaild(i - 1, j, visited, index + 1) or self.checkVaild(\n i + 1, j, visited, index + 1) or self.checkVaild(i, j - 1,\n visited, index + 1) or self.checkVaild(i, j + 1, visited, index + 1\n )\n if res == True:\n return True\n else:\n visited[i][j] = 0\n return False\n\n def exist(self, board, word):\n \"\"\"\n :type board: List[List[str]]\n :type word: str\n :rtype: bool\n \"\"\"\n self.board = board\n self.word = word\n self.m = len(board)\n self.n = len(board[0])\n self.k = len(word)\n if self.m <= 0 or len(word) <= 0 or self.k > self.m * self.n:\n return False\n wordList = {}\n tarWordList = {}\n visited = [[(0) for i in range(self.n)] for j in range(self.m)]\n for i in range(self.m):\n for j in range(self.n):\n if wordList.get(board[i][j], False) == False:\n wordList[board[i][j]] = []\n wordList[board[i][j]].append(i * self.n + j)\n for ind in range(self.k):\n if tarWordList.get(word[ind], False) == False:\n tarWordList[word[ind]] = 1\n else:\n tarWordList[word[ind]] += 1\n if wordList.get(word[0], False) == False:\n return False\n for char, cnt in tarWordList.items():\n if wordList.get(char, False) == False or len(wordList[char]) < cnt:\n return False\n for pos in wordList[word[0]]:\n startI = pos / self.n\n startJ = pos % self.n\n if self.checkVaild(startI, startJ, visited, 0) == True:\n return True\n return False\n",
"class Solution(object):\n\n def checkVaild(self, i, j, visited, index):\n if i < 0 or i >= self.m or j < 0 or j >= self.n or visited[i][j] == 1:\n return False\n if self.board[i][j] != self.word[index]:\n return False\n visited[i][j] = 1\n if index == len(self.word) - 1:\n return True\n res = self.checkVaild(i - 1, j, visited, index + 1) or self.checkVaild(\n i + 1, j, visited, index + 1) or self.checkVaild(i, j - 1,\n visited, index + 1) or self.checkVaild(i, j + 1, visited, index + 1\n )\n if res == True:\n return True\n else:\n visited[i][j] = 0\n return False\n <function token>\n",
"class Solution(object):\n <function token>\n <function token>\n",
"<class token>\n"
] | false |
99,692 |
d5e2bf42bde098533049b3eb9b890ee03f45e9d1
|
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse
from ultimatereview.forms import UserForm, UserProfileForm, UpdateProfileForm
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.contrib.auth import logout
from models import Review, Researcher, Query, Paper
import search
import datetime
import json
def index(request):
# Construct a dictionary to pass to the template engine as its context.
# Note the key boldmessage is the same as {{ boldmessage }} in the template!
context_dict = {'boldmessage': "I am bold font from the context"}
# Return a rendered response to send to the client.
# We make use of the shortcut function to make our lives easier.
# Note that the first parameter is the template we wish to use.
return render(request, 'ultimatereview/index.html', context_dict)
@login_required
def myprofile(request):
user = request.user
form = UserForm(initial={'username':user.username, 'email':user.email, 'password':user.password})
if request.method == 'POST':
user.username = request.POST['username']
user.email = request.POST['email']
if request.POST['password'] != "": # Checking for an empty password field.
user.set_password(request.POST['password']) # If password is not empty, then set a new password.
user.save() # All changes are saved.
# Now display the updated form details.
form = UserForm(initial={'username':user.username, 'email':user.email, 'password':user.password})
context = {
"form": form
}
return render(request, 'ultimatereview/myprofile.html', context)
def register(request):
# A boolean value for telling the template whether the registration was successful.
# Set to False initially. Code changes value to True when registration succeeds.
registered = False
# If it's a HTTP POST, we're interested in processing form data.
if request.method == 'POST':
# Attempt to grab information from the raw form information.
# Note that we make use of both UserForm and UserProfileForm.
user_form = UserForm(data=request.POST)
profile_form = UserProfileForm(data=request.POST)
# If the two forms are valid...
if user_form.is_valid() and profile_form.is_valid():
# Save the user's form data to the database.
user = user_form.save()
# Now we hash the password with the set_password method.
# Once hashed, we can update the user object.
user.set_password(user.password)
user.save()
# Now sort out the UserProfile instance.
# Since we need to set the user attribute ourselves, we set commit=False.
# This delays saving the model until we're ready to avoid integrity problems.
profile = profile_form.save(commit=False)
profile.user = user
# Did the user provide a profile picture?
# If so, we need to get it from the input form and put it in the UserProfile model.
if 'picture' in request.FILES:
profile.picture = request.FILES['picture']
# Now we save the UserProfile model instance.
profile.save()
# Update our variable to tell the template registration was successful.
registered = True
# Invalid form or forms - mistakes or something else?
# Print problems to the terminal.
# They'll also be shown to the user.
else:
print user_form.errors, profile_form.errors
# Not a HTTP POST, so we render our form using two ModelForm instances.
# These forms will be blank, ready for user input.
else:
user_form = UserForm()
profile_form = UserProfileForm()
# Render the template depending on the context.
return render(request,
'ultimatereview/register.html',
{'user_form': user_form, 'profile_form': profile_form, 'registered': registered} )
def user_login(request):
# If the request is a HTTP POST, try to pull out the relevant information.
if request.method == 'POST':
# Gather the username and password provided by the user.
# This information is obtained from the login form.
# We use request.POST.get('<variable>') as opposed to request.POST['<variable>'],
# because the request.POST.get('<variable>') returns None, if the value does not exist,
# while the request.POST['<variable>'] will raise key error exception
username = request.POST.get('username')
password = request.POST.get('password')
# Use Django's machinery to attempt to see if the username/password
# combination is valid - a User object is returned if it is.
user = authenticate(username=username, password=password)
# If we have a User object, the details are correct.
# If None (Python's way of representing the absence of a value), no user
# with matching credentials was found.
if user:
# Is the account active? It could have been disabled.
if user.is_active:
# If the account is valid and active, we can log the user in.
# We'll send the user back to the homepage.
login(request, user)
return HttpResponseRedirect('/ultimatereview/')
else:
# An inactive account was used - no logging in!
return HttpResponse("Your Ultimate Review account is disabled.")
else:
# Bad login details were provided. So we can't log the user in.
print "Invalid login details: {0}, {1}".format(username, password)
return HttpResponse("Invalid login details supplied.")
# The request is not a HTTP POST, so display the login form.
# This scenario would most likely be a HTTP GET.
else:
# No context variables to pass to the template system, hence the
# blank dictionary object...
return render(request, 'ultimatereview/login.html', {})
@login_required
def myreviews(request):
reviews = Review.objects.filter(user=request.user).order_by('-date_started')
if request.method == "POST":
review = Review.objects.create(user=request.user, title=request.POST.get('review', ""), date_started=datetime.datetime.now())
review.save()
reviews = Review.objects.filter(user=request.user).order_by('-date_started')
context = {
'reviews':reviews
}
return render(request, 'ultimatereview/myreviews.html', context)
@login_required
def single_review(request, review_name_slug):
context_dict = {}
try:
# Can we find a review name slug with the given name?
# If we can't, the .get() method raises a DoesNotExist exception.
# So the .get() method returns one model instance or raises an exception.
review = Review.objects.get(slug=review_name_slug)
context_dict['review_title'] = review.title
# Retrieve all of the associated pages.
# Note that filter returns >= 1 model instance.
queries = Query.objects.filter(review=review)
# Adds our results list to the template context under name pages.
context_dict['queries'] = queries
# We also add the category object from the database to the context dictionary.
# We'll use this in the template to verify that the category exists.
context_dict['review'] = review
except Review.DoesNotExist:
pass
return render(request, 'ultimatereview/querybuilder.html', context_dict)
@login_required
def abstractPool(request):
if request.method == "GET":
query = request.GET['query']
print query
return HttpResponse("Query is: " + query)
@login_required
def user_logout(request):
# Since we know the user is logged in, we can now just log them out.
logout(request)
# Take the user back to the homepage.
return HttpResponseRedirect('/ultimatereview/')
def indexQueried(request):
if request.method == "POST":
query = request.POST["queryField"]
abstractList = search.main(query,"relevance","5")
return HttpResponse(abstractList)
|
[
"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom ultimatereview.forms import UserForm, UserProfileForm, UpdateProfileForm\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import logout\nfrom models import Review, Researcher, Query, Paper\nimport search\nimport datetime\nimport json\n\ndef index(request):\n\n # Construct a dictionary to pass to the template engine as its context.\n # Note the key boldmessage is the same as {{ boldmessage }} in the template!\n context_dict = {'boldmessage': \"I am bold font from the context\"}\n\n # Return a rendered response to send to the client.\n # We make use of the shortcut function to make our lives easier.\n # Note that the first parameter is the template we wish to use.\n\n return render(request, 'ultimatereview/index.html', context_dict)\n\n@login_required\ndef myprofile(request):\n user = request.user\n form = UserForm(initial={'username':user.username, 'email':user.email, 'password':user.password})\n if request.method == 'POST':\n\n user.username = request.POST['username']\n user.email = request.POST['email']\n if request.POST['password'] != \"\": # Checking for an empty password field.\n user.set_password(request.POST['password']) # If password is not empty, then set a new password.\n\n user.save() # All changes are saved.\n\n # Now display the updated form details.\n form = UserForm(initial={'username':user.username, 'email':user.email, 'password':user.password})\n context = {\n \"form\": form\n }\n return render(request, 'ultimatereview/myprofile.html', context)\n\ndef register(request):\n\n # A boolean value for telling the template whether the registration was successful.\n # Set to False initially. Code changes value to True when registration succeeds.\n registered = False\n\n # If it's a HTTP POST, we're interested in processing form data.\n if request.method == 'POST':\n # Attempt to grab information from the raw form information.\n # Note that we make use of both UserForm and UserProfileForm.\n user_form = UserForm(data=request.POST)\n profile_form = UserProfileForm(data=request.POST)\n\n # If the two forms are valid...\n if user_form.is_valid() and profile_form.is_valid():\n # Save the user's form data to the database.\n user = user_form.save()\n\n # Now we hash the password with the set_password method.\n # Once hashed, we can update the user object.\n user.set_password(user.password)\n user.save()\n\n # Now sort out the UserProfile instance.\n # Since we need to set the user attribute ourselves, we set commit=False.\n # This delays saving the model until we're ready to avoid integrity problems.\n profile = profile_form.save(commit=False)\n profile.user = user\n\n # Did the user provide a profile picture?\n # If so, we need to get it from the input form and put it in the UserProfile model.\n if 'picture' in request.FILES:\n profile.picture = request.FILES['picture']\n\n # Now we save the UserProfile model instance.\n profile.save()\n\n # Update our variable to tell the template registration was successful.\n registered = True\n\n # Invalid form or forms - mistakes or something else?\n # Print problems to the terminal.\n # They'll also be shown to the user.\n else:\n print user_form.errors, profile_form.errors\n\n # Not a HTTP POST, so we render our form using two ModelForm instances.\n # These forms will be blank, ready for user input.\n else:\n user_form = UserForm()\n profile_form = UserProfileForm()\n\n # Render the template depending on the context.\n return render(request,\n 'ultimatereview/register.html',\n {'user_form': user_form, 'profile_form': profile_form, 'registered': registered} )\n\ndef user_login(request):\n\n # If the request is a HTTP POST, try to pull out the relevant information.\n if request.method == 'POST':\n # Gather the username and password provided by the user.\n # This information is obtained from the login form.\n # We use request.POST.get('<variable>') as opposed to request.POST['<variable>'],\n # because the request.POST.get('<variable>') returns None, if the value does not exist,\n # while the request.POST['<variable>'] will raise key error exception\n username = request.POST.get('username')\n password = request.POST.get('password')\n\n # Use Django's machinery to attempt to see if the username/password\n # combination is valid - a User object is returned if it is.\n user = authenticate(username=username, password=password)\n\n # If we have a User object, the details are correct.\n # If None (Python's way of representing the absence of a value), no user\n # with matching credentials was found.\n if user:\n # Is the account active? It could have been disabled.\n if user.is_active:\n # If the account is valid and active, we can log the user in.\n # We'll send the user back to the homepage.\n login(request, user)\n return HttpResponseRedirect('/ultimatereview/')\n else:\n # An inactive account was used - no logging in!\n return HttpResponse(\"Your Ultimate Review account is disabled.\")\n else:\n # Bad login details were provided. So we can't log the user in.\n print \"Invalid login details: {0}, {1}\".format(username, password)\n return HttpResponse(\"Invalid login details supplied.\")\n\n # The request is not a HTTP POST, so display the login form.\n # This scenario would most likely be a HTTP GET.\n else:\n # No context variables to pass to the template system, hence the\n # blank dictionary object...\n return render(request, 'ultimatereview/login.html', {})\n\n@login_required\ndef myreviews(request):\n reviews = Review.objects.filter(user=request.user).order_by('-date_started')\n if request.method == \"POST\":\n review = Review.objects.create(user=request.user, title=request.POST.get('review', \"\"), date_started=datetime.datetime.now())\n review.save()\n reviews = Review.objects.filter(user=request.user).order_by('-date_started')\n context = {\n 'reviews':reviews\n }\n return render(request, 'ultimatereview/myreviews.html', context)\n\n@login_required\ndef single_review(request, review_name_slug):\n context_dict = {}\n\n try:\n # Can we find a review name slug with the given name?\n # If we can't, the .get() method raises a DoesNotExist exception.\n # So the .get() method returns one model instance or raises an exception.\n review = Review.objects.get(slug=review_name_slug)\n context_dict['review_title'] = review.title\n\n # Retrieve all of the associated pages.\n # Note that filter returns >= 1 model instance.\n queries = Query.objects.filter(review=review)\n\n # Adds our results list to the template context under name pages.\n context_dict['queries'] = queries\n # We also add the category object from the database to the context dictionary.\n # We'll use this in the template to verify that the category exists.\n context_dict['review'] = review\n except Review.DoesNotExist:\n pass\n return render(request, 'ultimatereview/querybuilder.html', context_dict)\n\n@login_required\ndef abstractPool(request):\n if request.method == \"GET\":\n query = request.GET['query']\n print query\n return HttpResponse(\"Query is: \" + query)\n\n@login_required\ndef user_logout(request):\n # Since we know the user is logged in, we can now just log them out.\n logout(request)\n\n # Take the user back to the homepage.\n return HttpResponseRedirect('/ultimatereview/')\n\ndef indexQueried(request):\n if request.method == \"POST\":\n query = request.POST[\"queryField\"]\n abstractList = search.main(query,\"relevance\",\"5\")\n return HttpResponse(abstractList)\n"
] | true |
99,693 |
93e7d0026b9851931cb0152ad397ae4c457dfa4a
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 2 07:20:08 2019
This code is used for testing the noise2self code on our medical images.
@author: yxw
"""
from __future__ import print_function, division
import os
import torch
import pandas as pd
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from util import show, plot_images, plot_tensors
from mask import Masker
from models.babyunet import BabyUnet
from torch.nn import MSELoss
from torch.optim import Adam
from PIL import Image, ImageOps
from util_data import tensor_to_image
import time
class XrayDataset(Dataset):
# this class is used to read the x-ray dataset after format transform
def __init__(self, data_dir, mode='train', tsfm=None):
#tsfm stands for transform.
self.data_dir = data_dir
# load the image file names in the data folder.
self.img_list = os.listdir(data_dir)
#print(len(self.img_list))
# train or test mode
self.mode = mode
# transform is process of the loaded image
self.transform = tsfm
def __len__(self):
# for the number of image data.
return len(self.img_list)
def __getitem__(self, idx):
# get idx th image.
img_name = os.path.join(self.data_dir, self.img_list[idx])
image = Image.open(img_name)
#image = io.imread(img_name)
if self.mode == 'train':
# hard coded 2^n padding. not a good way.
image = pad_to_target(image, 256, 256, label=0)
else:
image = pad_to_target(image, 1024, 1024, label=0)
#print(image.size)
if self.transform:
image = self.transform(image)
sample = {'image': image}
return sample
def pad_to_target(img, target_height, target_width, label=0):
# Pad image with zeros to the specified height and width if needed
# This op does nothing if the image already has size bigger than target_height and target_width.
w, h = img.size
left = top = right = bottom = 0
doit = False
if target_width > w:
delta = target_width - w
left = delta // 2
right = delta - left
doit = True
if target_height > h:
delta = target_height - h
top = delta // 2
bottom = delta - top
doit = True
#print(img.size)
if doit:
img = ImageOps.expand(img, border=(left, top, right, bottom), fill=label)
#print(img.size)
assert img.size[0] >= target_width
assert img.size[1] >= target_height
return img
if __name__ == "__main__":
# set up parameter
use_gpu = True
# prepare dataset.
tsfm = transforms.Compose([transforms.RandomCrop(size=256, pad_if_needed=True),\
transforms.ToTensor()])
# tsfm = transforms.Compose([transforms.ToTensor()])
noisy_mnist_train = XrayDataset('data/LowQ_digest_train', mode='train', tsfm=tsfm)
# initialize mask for J-invariant fuc
masker = Masker(width = 4, mode='interpolate')
# initialize network
model = BabyUnet()
if use_gpu:
model.cuda()
# set loss function
loss_function = MSELoss()
# set optimizer
optimizer = Adam(model.parameters(), lr=0.001)
# train the model
data_loader = DataLoader(noisy_mnist_train, batch_size=32, shuffle=False, num_workers=4)
# set a count number to get different mask idx
count_batch = 0
# train the network
num_epoch = 51
# load a single image for test
tsfm = transforms.ToTensor()
noisy_mnist_test_in_sample = XrayDataset('data/LowQ_digest_train', mode='test', tsfm=tsfm)
noisy_mnist_test_out_sample = XrayDataset('data/LowQ_digest_test', mode='test', tsfm=tsfm)
test_data_loader_in_sample = DataLoader(noisy_mnist_test_in_sample, batch_size=1, shuffle=False, num_workers=4)
test_data_loader_out_sample = DataLoader(noisy_mnist_test_out_sample, batch_size=1, shuffle=False, num_workers=4)
i, test_batch_in_sample = next(enumerate(test_data_loader_in_sample))
j, test_batch_out_sample = next(enumerate(test_data_loader_out_sample))
noisy_in_sample = test_batch_in_sample['image']
noisy_out_sample = test_batch_out_sample['image']
if use_gpu:
noisy_in_sample = noisy_in_sample.cuda()
noisy_out_sample = noisy_out_sample.cuda()
tic = time.time()
for epoch_idx in range(num_epoch):
for i, batch in enumerate(data_loader):
model.train()
noisy_images = batch['image']
net_input, mask = masker.mask(noisy_images, count_batch)
if use_gpu:
noisy_images = noisy_images.cuda()
net_input = net_input.cuda()
mask = mask.cuda()
net_output = model(net_input)
# only use the masked pixel to calculate loss.
loss = loss_function(net_output*mask, noisy_images*mask)
optimizer.zero_grad()
loss.backward()
optimizer.step()
count_batch += 1
print('number of batch:',count_batch)
if i % 1 == 0:
print("Loss (", i, "): \t", round(loss.item(), 8))
if i == 100:
break
if epoch_idx % 5 == 0:
model.eval()
# calculate the denoise result on test set.
simple_output = model(noisy_in_sample)
plot_tensors([noisy_in_sample[0], simple_output[0]],["Noisy Image", "Single Pass Inference"], plot=False,\
save=True, img_dir='babyUnet_denoise_in_sample/', img_name='Epoch_'+str(epoch_idx))
simple_output = model(noisy_out_sample)
plot_tensors([noisy_out_sample[0], simple_output[0]],["Noisy Image", "Single Pass Inference"], plot=False,\
save=True, img_dir='babyUnet_denoise_out_sample/', img_name='Epoch_'+str(epoch_idx))
# save the model
torch.save(model.state_dict(), 'BabyUnet_denoise.pth')
torch.cuda.empty_cache()
toc = time.time()
print('Run Time:{}s'.format(toc-tic))
#%% test the model on test image.
#noisy_mnist_test = XrayDataset('data/LowQ_digest_test', mode='test', tsfm=tsfm)
# test_data_loader = DataLoader(noisy_mnist_test, batch_size=32, shuffle=False, num_workers=4)
# i, test_batch = next(enumerate(test_data_loader))
# noisy = test_batch['image']
# model.eval()
# # calculate the denoise result on test set.
# simple_output = model(noisy)
# #model.eval()
# #invariant_output = masker.infer_full_image(noisy, model)
# idx = 3
# plot_tensors([noisy[idx], simple_output[idx]],\
# ["Noisy Image", "Single Pass Inference"])
|
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 2 07:20:08 2019\nThis code is used for testing the noise2self code on our medical images.\n@author: yxw\n\"\"\"\n\nfrom __future__ import print_function, division\nimport os\nimport torch\nimport pandas as pd\nfrom skimage import io, transform\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\nfrom util import show, plot_images, plot_tensors\nfrom mask import Masker\nfrom models.babyunet import BabyUnet\nfrom torch.nn import MSELoss\nfrom torch.optim import Adam\nfrom PIL import Image, ImageOps\nfrom util_data import tensor_to_image\nimport time\n\n\nclass XrayDataset(Dataset):\n # this class is used to read the x-ray dataset after format transform\n def __init__(self, data_dir, mode='train', tsfm=None):\n #tsfm stands for transform. \n \n self.data_dir = data_dir\n # load the image file names in the data folder.\n self.img_list = os.listdir(data_dir)\n \n \n #print(len(self.img_list))\n # train or test mode\n self.mode = mode\n # transform is process of the loaded image\n self.transform = tsfm\n \n def __len__(self):\n # for the number of image data.\n return len(self.img_list)\n \n def __getitem__(self, idx):\n # get idx th image.\n img_name = os.path.join(self.data_dir, self.img_list[idx])\n image = Image.open(img_name)\n \n #image = io.imread(img_name) \n if self.mode == 'train':\n # hard coded 2^n padding. not a good way.\n image = pad_to_target(image, 256, 256, label=0)\n else:\n image = pad_to_target(image, 1024, 1024, label=0)\n #print(image.size)\n if self.transform:\n image = self.transform(image)\n sample = {'image': image} \n \n return sample\n \ndef pad_to_target(img, target_height, target_width, label=0):\n # Pad image with zeros to the specified height and width if needed\n # This op does nothing if the image already has size bigger than target_height and target_width.\n w, h = img.size\n left = top = right = bottom = 0\n doit = False\n if target_width > w:\n delta = target_width - w\n left = delta // 2\n right = delta - left\n doit = True\n if target_height > h:\n delta = target_height - h\n top = delta // 2\n bottom = delta - top\n doit = True\n #print(img.size)\n if doit:\n img = ImageOps.expand(img, border=(left, top, right, bottom), fill=label)\n #print(img.size)\n assert img.size[0] >= target_width\n assert img.size[1] >= target_height\n return img \n\n\nif __name__ == \"__main__\":\n \n # set up parameter\n use_gpu = True\n \n \n \n # prepare dataset.\n tsfm = transforms.Compose([transforms.RandomCrop(size=256, pad_if_needed=True),\\\n transforms.ToTensor()])\n# tsfm = transforms.Compose([transforms.ToTensor()])\n noisy_mnist_train = XrayDataset('data/LowQ_digest_train', mode='train', tsfm=tsfm)\n \n # initialize mask for J-invariant fuc\n masker = Masker(width = 4, mode='interpolate')\n \n # initialize network\n model = BabyUnet()\n if use_gpu:\n model.cuda()\n \n # set loss function\n loss_function = MSELoss()\n \n # set optimizer\n optimizer = Adam(model.parameters(), lr=0.001)\n \n # train the model\n data_loader = DataLoader(noisy_mnist_train, batch_size=32, shuffle=False, num_workers=4)\n # set a count number to get different mask idx\n count_batch = 0\n \n # train the network\n num_epoch = 51\n \n \n # load a single image for test\n tsfm = transforms.ToTensor()\n noisy_mnist_test_in_sample = XrayDataset('data/LowQ_digest_train', mode='test', tsfm=tsfm)\n noisy_mnist_test_out_sample = XrayDataset('data/LowQ_digest_test', mode='test', tsfm=tsfm)\n test_data_loader_in_sample = DataLoader(noisy_mnist_test_in_sample, batch_size=1, shuffle=False, num_workers=4) \n test_data_loader_out_sample = DataLoader(noisy_mnist_test_out_sample, batch_size=1, shuffle=False, num_workers=4) \n i, test_batch_in_sample = next(enumerate(test_data_loader_in_sample))\n j, test_batch_out_sample = next(enumerate(test_data_loader_out_sample))\n noisy_in_sample = test_batch_in_sample['image']\n noisy_out_sample = test_batch_out_sample['image']\n if use_gpu:\n noisy_in_sample = noisy_in_sample.cuda()\n noisy_out_sample = noisy_out_sample.cuda()\n \n tic = time.time()\n for epoch_idx in range(num_epoch):\n for i, batch in enumerate(data_loader):\n model.train()\n noisy_images = batch['image'] \n net_input, mask = masker.mask(noisy_images, count_batch)\n if use_gpu:\n noisy_images = noisy_images.cuda()\n net_input = net_input.cuda()\n mask = mask.cuda()\n net_output = model(net_input)\n # only use the masked pixel to calculate loss.\n loss = loss_function(net_output*mask, noisy_images*mask)\n \n optimizer.zero_grad()\n \n loss.backward()\n \n optimizer.step()\n count_batch += 1\n print('number of batch:',count_batch)\n if i % 1 == 0:\n print(\"Loss (\", i, \"): \\t\", round(loss.item(), 8))\n \n\n if i == 100:\n break \n \n if epoch_idx % 5 == 0:\n model.eval()\n # calculate the denoise result on test set.\n simple_output = model(noisy_in_sample)\n \n plot_tensors([noisy_in_sample[0], simple_output[0]],[\"Noisy Image\", \"Single Pass Inference\"], plot=False,\\\n save=True, img_dir='babyUnet_denoise_in_sample/', img_name='Epoch_'+str(epoch_idx)) \n simple_output = model(noisy_out_sample)\n plot_tensors([noisy_out_sample[0], simple_output[0]],[\"Noisy Image\", \"Single Pass Inference\"], plot=False,\\\n save=True, img_dir='babyUnet_denoise_out_sample/', img_name='Epoch_'+str(epoch_idx)) \n \n \n # save the model\n torch.save(model.state_dict(), 'BabyUnet_denoise.pth') \n torch.cuda.empty_cache() \n toc = time.time()\n print('Run Time:{}s'.format(toc-tic)) \n #%% test the model on test image. \n #noisy_mnist_test = XrayDataset('data/LowQ_digest_test', mode='test', tsfm=tsfm) \n# test_data_loader = DataLoader(noisy_mnist_test, batch_size=32, shuffle=False, num_workers=4) \n# i, test_batch = next(enumerate(test_data_loader))\n# noisy = test_batch['image']\n# model.eval()\n# # calculate the denoise result on test set.\n# simple_output = model(noisy)\n# #model.eval()\n# #invariant_output = masker.infer_full_image(noisy, model)\n# idx = 3\n# plot_tensors([noisy[idx], simple_output[idx]],\\\n# [\"Noisy Image\", \"Single Pass Inference\"])\n \n \n \n \n \n ",
"<docstring token>\nfrom __future__ import print_function, division\nimport os\nimport torch\nimport pandas as pd\nfrom skimage import io, transform\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\nfrom util import show, plot_images, plot_tensors\nfrom mask import Masker\nfrom models.babyunet import BabyUnet\nfrom torch.nn import MSELoss\nfrom torch.optim import Adam\nfrom PIL import Image, ImageOps\nfrom util_data import tensor_to_image\nimport time\n\n\nclass XrayDataset(Dataset):\n\n def __init__(self, data_dir, mode='train', tsfm=None):\n self.data_dir = data_dir\n self.img_list = os.listdir(data_dir)\n self.mode = mode\n self.transform = tsfm\n\n def __len__(self):\n return len(self.img_list)\n\n def __getitem__(self, idx):\n img_name = os.path.join(self.data_dir, self.img_list[idx])\n image = Image.open(img_name)\n if self.mode == 'train':\n image = pad_to_target(image, 256, 256, label=0)\n else:\n image = pad_to_target(image, 1024, 1024, label=0)\n if self.transform:\n image = self.transform(image)\n sample = {'image': image}\n return sample\n\n\ndef pad_to_target(img, target_height, target_width, label=0):\n w, h = img.size\n left = top = right = bottom = 0\n doit = False\n if target_width > w:\n delta = target_width - w\n left = delta // 2\n right = delta - left\n doit = True\n if target_height > h:\n delta = target_height - h\n top = delta // 2\n bottom = delta - top\n doit = True\n if doit:\n img = ImageOps.expand(img, border=(left, top, right, bottom), fill=\n label)\n assert img.size[0] >= target_width\n assert img.size[1] >= target_height\n return img\n\n\nif __name__ == '__main__':\n use_gpu = True\n tsfm = transforms.Compose([transforms.RandomCrop(size=256,\n pad_if_needed=True), transforms.ToTensor()])\n noisy_mnist_train = XrayDataset('data/LowQ_digest_train', mode='train',\n tsfm=tsfm)\n masker = Masker(width=4, mode='interpolate')\n model = BabyUnet()\n if use_gpu:\n model.cuda()\n loss_function = MSELoss()\n optimizer = Adam(model.parameters(), lr=0.001)\n data_loader = DataLoader(noisy_mnist_train, batch_size=32, shuffle=\n False, num_workers=4)\n count_batch = 0\n num_epoch = 51\n tsfm = transforms.ToTensor()\n noisy_mnist_test_in_sample = XrayDataset('data/LowQ_digest_train', mode\n ='test', tsfm=tsfm)\n noisy_mnist_test_out_sample = XrayDataset('data/LowQ_digest_test', mode\n ='test', tsfm=tsfm)\n test_data_loader_in_sample = DataLoader(noisy_mnist_test_in_sample,\n batch_size=1, shuffle=False, num_workers=4)\n test_data_loader_out_sample = DataLoader(noisy_mnist_test_out_sample,\n batch_size=1, shuffle=False, num_workers=4)\n i, test_batch_in_sample = next(enumerate(test_data_loader_in_sample))\n j, test_batch_out_sample = next(enumerate(test_data_loader_out_sample))\n noisy_in_sample = test_batch_in_sample['image']\n noisy_out_sample = test_batch_out_sample['image']\n if use_gpu:\n noisy_in_sample = noisy_in_sample.cuda()\n noisy_out_sample = noisy_out_sample.cuda()\n tic = time.time()\n for epoch_idx in range(num_epoch):\n for i, batch in enumerate(data_loader):\n model.train()\n noisy_images = batch['image']\n net_input, mask = masker.mask(noisy_images, count_batch)\n if use_gpu:\n noisy_images = noisy_images.cuda()\n net_input = net_input.cuda()\n mask = mask.cuda()\n net_output = model(net_input)\n loss = loss_function(net_output * mask, noisy_images * mask)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n count_batch += 1\n print('number of batch:', count_batch)\n if i % 1 == 0:\n print('Loss (', i, '): \\t', round(loss.item(), 8))\n if i == 100:\n break\n if epoch_idx % 5 == 0:\n model.eval()\n simple_output = model(noisy_in_sample)\n plot_tensors([noisy_in_sample[0], simple_output[0]], [\n 'Noisy Image', 'Single Pass Inference'], plot=False, save=\n True, img_dir='babyUnet_denoise_in_sample/', img_name=\n 'Epoch_' + str(epoch_idx))\n simple_output = model(noisy_out_sample)\n plot_tensors([noisy_out_sample[0], simple_output[0]], [\n 'Noisy Image', 'Single Pass Inference'], plot=False, save=\n True, img_dir='babyUnet_denoise_out_sample/', img_name=\n 'Epoch_' + str(epoch_idx))\n torch.save(model.state_dict(), 'BabyUnet_denoise.pth')\n torch.cuda.empty_cache()\n toc = time.time()\n print('Run Time:{}s'.format(toc - tic))\n",
"<docstring token>\n<import token>\n\n\nclass XrayDataset(Dataset):\n\n def __init__(self, data_dir, mode='train', tsfm=None):\n self.data_dir = data_dir\n self.img_list = os.listdir(data_dir)\n self.mode = mode\n self.transform = tsfm\n\n def __len__(self):\n return len(self.img_list)\n\n def __getitem__(self, idx):\n img_name = os.path.join(self.data_dir, self.img_list[idx])\n image = Image.open(img_name)\n if self.mode == 'train':\n image = pad_to_target(image, 256, 256, label=0)\n else:\n image = pad_to_target(image, 1024, 1024, label=0)\n if self.transform:\n image = self.transform(image)\n sample = {'image': image}\n return sample\n\n\ndef pad_to_target(img, target_height, target_width, label=0):\n w, h = img.size\n left = top = right = bottom = 0\n doit = False\n if target_width > w:\n delta = target_width - w\n left = delta // 2\n right = delta - left\n doit = True\n if target_height > h:\n delta = target_height - h\n top = delta // 2\n bottom = delta - top\n doit = True\n if doit:\n img = ImageOps.expand(img, border=(left, top, right, bottom), fill=\n label)\n assert img.size[0] >= target_width\n assert img.size[1] >= target_height\n return img\n\n\nif __name__ == '__main__':\n use_gpu = True\n tsfm = transforms.Compose([transforms.RandomCrop(size=256,\n pad_if_needed=True), transforms.ToTensor()])\n noisy_mnist_train = XrayDataset('data/LowQ_digest_train', mode='train',\n tsfm=tsfm)\n masker = Masker(width=4, mode='interpolate')\n model = BabyUnet()\n if use_gpu:\n model.cuda()\n loss_function = MSELoss()\n optimizer = Adam(model.parameters(), lr=0.001)\n data_loader = DataLoader(noisy_mnist_train, batch_size=32, shuffle=\n False, num_workers=4)\n count_batch = 0\n num_epoch = 51\n tsfm = transforms.ToTensor()\n noisy_mnist_test_in_sample = XrayDataset('data/LowQ_digest_train', mode\n ='test', tsfm=tsfm)\n noisy_mnist_test_out_sample = XrayDataset('data/LowQ_digest_test', mode\n ='test', tsfm=tsfm)\n test_data_loader_in_sample = DataLoader(noisy_mnist_test_in_sample,\n batch_size=1, shuffle=False, num_workers=4)\n test_data_loader_out_sample = DataLoader(noisy_mnist_test_out_sample,\n batch_size=1, shuffle=False, num_workers=4)\n i, test_batch_in_sample = next(enumerate(test_data_loader_in_sample))\n j, test_batch_out_sample = next(enumerate(test_data_loader_out_sample))\n noisy_in_sample = test_batch_in_sample['image']\n noisy_out_sample = test_batch_out_sample['image']\n if use_gpu:\n noisy_in_sample = noisy_in_sample.cuda()\n noisy_out_sample = noisy_out_sample.cuda()\n tic = time.time()\n for epoch_idx in range(num_epoch):\n for i, batch in enumerate(data_loader):\n model.train()\n noisy_images = batch['image']\n net_input, mask = masker.mask(noisy_images, count_batch)\n if use_gpu:\n noisy_images = noisy_images.cuda()\n net_input = net_input.cuda()\n mask = mask.cuda()\n net_output = model(net_input)\n loss = loss_function(net_output * mask, noisy_images * mask)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n count_batch += 1\n print('number of batch:', count_batch)\n if i % 1 == 0:\n print('Loss (', i, '): \\t', round(loss.item(), 8))\n if i == 100:\n break\n if epoch_idx % 5 == 0:\n model.eval()\n simple_output = model(noisy_in_sample)\n plot_tensors([noisy_in_sample[0], simple_output[0]], [\n 'Noisy Image', 'Single Pass Inference'], plot=False, save=\n True, img_dir='babyUnet_denoise_in_sample/', img_name=\n 'Epoch_' + str(epoch_idx))\n simple_output = model(noisy_out_sample)\n plot_tensors([noisy_out_sample[0], simple_output[0]], [\n 'Noisy Image', 'Single Pass Inference'], plot=False, save=\n True, img_dir='babyUnet_denoise_out_sample/', img_name=\n 'Epoch_' + str(epoch_idx))\n torch.save(model.state_dict(), 'BabyUnet_denoise.pth')\n torch.cuda.empty_cache()\n toc = time.time()\n print('Run Time:{}s'.format(toc - tic))\n",
"<docstring token>\n<import token>\n\n\nclass XrayDataset(Dataset):\n\n def __init__(self, data_dir, mode='train', tsfm=None):\n self.data_dir = data_dir\n self.img_list = os.listdir(data_dir)\n self.mode = mode\n self.transform = tsfm\n\n def __len__(self):\n return len(self.img_list)\n\n def __getitem__(self, idx):\n img_name = os.path.join(self.data_dir, self.img_list[idx])\n image = Image.open(img_name)\n if self.mode == 'train':\n image = pad_to_target(image, 256, 256, label=0)\n else:\n image = pad_to_target(image, 1024, 1024, label=0)\n if self.transform:\n image = self.transform(image)\n sample = {'image': image}\n return sample\n\n\ndef pad_to_target(img, target_height, target_width, label=0):\n w, h = img.size\n left = top = right = bottom = 0\n doit = False\n if target_width > w:\n delta = target_width - w\n left = delta // 2\n right = delta - left\n doit = True\n if target_height > h:\n delta = target_height - h\n top = delta // 2\n bottom = delta - top\n doit = True\n if doit:\n img = ImageOps.expand(img, border=(left, top, right, bottom), fill=\n label)\n assert img.size[0] >= target_width\n assert img.size[1] >= target_height\n return img\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass XrayDataset(Dataset):\n\n def __init__(self, data_dir, mode='train', tsfm=None):\n self.data_dir = data_dir\n self.img_list = os.listdir(data_dir)\n self.mode = mode\n self.transform = tsfm\n\n def __len__(self):\n return len(self.img_list)\n\n def __getitem__(self, idx):\n img_name = os.path.join(self.data_dir, self.img_list[idx])\n image = Image.open(img_name)\n if self.mode == 'train':\n image = pad_to_target(image, 256, 256, label=0)\n else:\n image = pad_to_target(image, 1024, 1024, label=0)\n if self.transform:\n image = self.transform(image)\n sample = {'image': image}\n return sample\n\n\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass XrayDataset(Dataset):\n <function token>\n\n def __len__(self):\n return len(self.img_list)\n\n def __getitem__(self, idx):\n img_name = os.path.join(self.data_dir, self.img_list[idx])\n image = Image.open(img_name)\n if self.mode == 'train':\n image = pad_to_target(image, 256, 256, label=0)\n else:\n image = pad_to_target(image, 1024, 1024, label=0)\n if self.transform:\n image = self.transform(image)\n sample = {'image': image}\n return sample\n\n\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass XrayDataset(Dataset):\n <function token>\n <function token>\n\n def __getitem__(self, idx):\n img_name = os.path.join(self.data_dir, self.img_list[idx])\n image = Image.open(img_name)\n if self.mode == 'train':\n image = pad_to_target(image, 256, 256, label=0)\n else:\n image = pad_to_target(image, 1024, 1024, label=0)\n if self.transform:\n image = self.transform(image)\n sample = {'image': image}\n return sample\n\n\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass XrayDataset(Dataset):\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<class token>\n<function token>\n<code token>\n"
] | false |
99,694 |
9a92eeea49790131e0b10edde7ebff5bbf210271
|
# python local variable sent to function, modified, but was not passed back to program
grade_list, grade_count = Gather_Intel(grade_list, grade_count)
|
[
"# python local variable sent to function, modified, but was not passed back to program\ngrade_list, grade_count = Gather_Intel(grade_list, grade_count)\n",
"grade_list, grade_count = Gather_Intel(grade_list, grade_count)\n",
"<assignment token>\n"
] | false |
99,695 |
16bd311ff987e614fb602afabf77b0c125d9307a
|
from magma import *
def test():
And2 = DeclareCircuit("And2", "I0", In(Bit), "I1", In(Bit), "O", Out(Bit))
assert str(And2) == 'And2'
print(repr(And2))
assert repr(And2) == 'And2 = DeclareCircuit("And2", "I0", In(Bit), "I1", In(Bit), "O", Out(Bit))'
and2 = And2()
and2.name = 'and2'
assert str(and2) == 'and2'
print(repr(and2))
assert str(and2.I0) == 'I0'
print(repr(and2.I0))
test()
|
[
"from magma import *\n\ndef test():\n And2 = DeclareCircuit(\"And2\", \"I0\", In(Bit), \"I1\", In(Bit), \"O\", Out(Bit))\n assert str(And2) == 'And2'\n print(repr(And2))\n assert repr(And2) == 'And2 = DeclareCircuit(\"And2\", \"I0\", In(Bit), \"I1\", In(Bit), \"O\", Out(Bit))'\n\n and2 = And2()\n and2.name = 'and2'\n assert str(and2) == 'and2'\n print(repr(and2))\n assert str(and2.I0) == 'I0'\n print(repr(and2.I0))\n\ntest()\n",
"from magma import *\n\n\ndef test():\n And2 = DeclareCircuit('And2', 'I0', In(Bit), 'I1', In(Bit), 'O', Out(Bit))\n assert str(And2) == 'And2'\n print(repr(And2))\n assert repr(And2\n ) == 'And2 = DeclareCircuit(\"And2\", \"I0\", In(Bit), \"I1\", In(Bit), \"O\", Out(Bit))'\n and2 = And2()\n and2.name = 'and2'\n assert str(and2) == 'and2'\n print(repr(and2))\n assert str(and2.I0) == 'I0'\n print(repr(and2.I0))\n\n\ntest()\n",
"<import token>\n\n\ndef test():\n And2 = DeclareCircuit('And2', 'I0', In(Bit), 'I1', In(Bit), 'O', Out(Bit))\n assert str(And2) == 'And2'\n print(repr(And2))\n assert repr(And2\n ) == 'And2 = DeclareCircuit(\"And2\", \"I0\", In(Bit), \"I1\", In(Bit), \"O\", Out(Bit))'\n and2 = And2()\n and2.name = 'and2'\n assert str(and2) == 'and2'\n print(repr(and2))\n assert str(and2.I0) == 'I0'\n print(repr(and2.I0))\n\n\ntest()\n",
"<import token>\n\n\ndef test():\n And2 = DeclareCircuit('And2', 'I0', In(Bit), 'I1', In(Bit), 'O', Out(Bit))\n assert str(And2) == 'And2'\n print(repr(And2))\n assert repr(And2\n ) == 'And2 = DeclareCircuit(\"And2\", \"I0\", In(Bit), \"I1\", In(Bit), \"O\", Out(Bit))'\n and2 = And2()\n and2.name = 'and2'\n assert str(and2) == 'and2'\n print(repr(and2))\n assert str(and2.I0) == 'I0'\n print(repr(and2.I0))\n\n\n<code token>\n",
"<import token>\n<function token>\n<code token>\n"
] | false |
99,696 |
59d0a2a8fe2aa7fe6c70f2eabb88ea30ab00793b
|
from charms.reactive import when
from charms.reactive import when_not
from charms.reactive import set_state
from charms.reactive import remove_state
from charms.reactive.helpers import data_changed
from charms.layer.hadoop_base import get_hadoop_base
from jujubigdata.handlers import HDFS
from jujubigdata import utils
from charmhelpers.core import hookenv, unitdata
from charms import leadership
from charms.layer.apache_hadoop_namenode import get_cluster_nodes
from charms.layer.apache_hadoop_namenode import set_cluster_nodes
@when('hadoop.installed')
@when_not('namenode.started')
@when('leadership.is_leader') # don't both starting standalone if not leader
@when('leadership.set.cluster-nodes')
def configure_namenode():
hadoop = get_hadoop_base()
hdfs = HDFS(hadoop)
hdfs.configure_namenode(get_cluster_nodes())
hdfs.format_namenode()
hdfs.start_namenode()
hdfs.create_hdfs_dirs()
hadoop.open_ports('namenode')
utils.initialize_kv_host()
utils.manage_etc_hosts()
set_state('namenode.started')
@when('hadoop.installed', 'leadership.is_leader')
@when_not('leadership.set.ssh-key-pub')
def generate_ssh_key():
utils.generate_ssh_key('hdfs')
leadership.leader_set({
'ssh-key-priv': utils.ssh_priv_key('hdfs').text(),
'ssh-key-pub': utils.ssh_pub_key('hdfs').text(),
})
@when('leadership.changed.ssh-key-pub')
def install_ssh_pub_key():
ssh_dir = utils.ssh_key_dir('hdfs')
ssh_dir.makedirs_p()
authfile = ssh_dir / 'authorized_keys'
authfile.write_lines([leadership.leader_get('ssh-key-pub')], append=True)
@when('leadership.changed.ssh-key-priv')
@when_not('leadership.is_leader')
def install_ssh_priv_key():
ssh_dir = utils.ssh_key_dir('hdfs')
ssh_dir.makedirs_p()
keyfile = ssh_dir / 'id_rsa'
keyfile.write_text(leadership.leader_get('ssh-key-priv'))
@when('datanode.joined')
def manage_datanode_hosts(datanode):
utils.update_kv_hosts(datanode.hosts_map())
utils.manage_etc_hosts()
datanode.send_hosts_map(utils.get_kv_hosts())
@when('datanode.joined', 'leadership.set.ssh-key-pub')
def send_ssh_key(datanode):
datanode.send_ssh_key(leadership.leader_get('ssh-key-pub'))
@when('leadership.is_leader')
@when_not('leadership.set.cluster-nodes')
def init_cluster_nodes():
local_hostname = hookenv.local_unit().replace('/', '-')
set_cluster_nodes([local_hostname])
@when('namenode.started', 'datanode.joined')
def send_info(datanode):
hadoop = get_hadoop_base()
hdfs_port = hadoop.dist_config.port('namenode')
webhdfs_port = hadoop.dist_config.port('nn_webapp_http')
datanode.send_spec(hadoop.spec())
datanode.send_clustername(hookenv.service_name())
datanode.send_namenodes(get_cluster_nodes())
datanode.send_ports(hdfs_port, webhdfs_port)
@when('namenode.started', 'datanode.joined')
def update_slaves(datanode):
hadoop = get_hadoop_base()
hdfs = HDFS(hadoop)
slaves = datanode.nodes()
if data_changed('namenode.slaves', slaves):
unitdata.kv().set('namenode.slaves', slaves)
hdfs.register_slaves(slaves)
hdfs.reload_slaves()
set_state('namenode.ready')
@when('namenode.started', 'datanode.joined')
@when('leadership.changed.cluster-nodes')
def update_nodes(datanode):
datanode.send_namenodes(get_cluster_nodes())
@when('namenode.ready')
@when('namenode.clients')
def accept_clients(clients):
hadoop = get_hadoop_base()
hdfs_port = hadoop.dist_config.port('namenode')
webhdfs_port = hadoop.dist_config.port('nn_webapp_http')
clients.send_spec(hadoop.spec())
clients.send_clustername(hookenv.service_name())
clients.send_namenodes(get_cluster_nodes())
clients.send_ports(hdfs_port, webhdfs_port)
clients.send_hosts_map(utils.get_kv_hosts())
clients.send_ready(True)
@when('namenode.ready')
@when('namenode.clients')
@when('leadership.changed.cluster-nodes')
def update_clients(clients):
clients.send_namenodes(get_cluster_nodes())
@when('namenode.clients')
@when_not('namenode.ready')
def reject_clients(clients):
clients.send_ready(False)
@when('namenode.started', 'datanode.departing')
def unregister_datanode(datanode):
hadoop = get_hadoop_base()
hdfs = HDFS(hadoop)
slaves = unitdata.kv().get('namenode.slaves', [])
slaves_leaving = datanode.nodes() # only returns nodes in "leaving" state
hookenv.log('Slaves leaving: {}'.format(slaves_leaving))
slaves_remaining = list(set(slaves) - set(slaves_leaving))
unitdata.kv().set('namenode.slaves', slaves_remaining)
hdfs.register_slaves(slaves_remaining)
hdfs.reload_slaves()
utils.remove_kv_hosts(slaves_leaving)
utils.manage_etc_hosts()
if not slaves_remaining:
remove_state('namenode.ready')
datanode.dismiss()
@when('benchmark.joined')
def register_benchmarks(benchmark):
benchmark.register('nnbench', 'testdfsio')
|
[
"from charms.reactive import when\nfrom charms.reactive import when_not\nfrom charms.reactive import set_state\nfrom charms.reactive import remove_state\nfrom charms.reactive.helpers import data_changed\nfrom charms.layer.hadoop_base import get_hadoop_base\nfrom jujubigdata.handlers import HDFS\nfrom jujubigdata import utils\nfrom charmhelpers.core import hookenv, unitdata\nfrom charms import leadership\nfrom charms.layer.apache_hadoop_namenode import get_cluster_nodes\nfrom charms.layer.apache_hadoop_namenode import set_cluster_nodes\n\n\n@when('hadoop.installed')\n@when_not('namenode.started')\n@when('leadership.is_leader') # don't both starting standalone if not leader\n@when('leadership.set.cluster-nodes')\ndef configure_namenode():\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n hdfs.configure_namenode(get_cluster_nodes())\n hdfs.format_namenode()\n hdfs.start_namenode()\n hdfs.create_hdfs_dirs()\n hadoop.open_ports('namenode')\n utils.initialize_kv_host()\n utils.manage_etc_hosts()\n set_state('namenode.started')\n\n\n@when('hadoop.installed', 'leadership.is_leader')\n@when_not('leadership.set.ssh-key-pub')\ndef generate_ssh_key():\n utils.generate_ssh_key('hdfs')\n leadership.leader_set({\n 'ssh-key-priv': utils.ssh_priv_key('hdfs').text(),\n 'ssh-key-pub': utils.ssh_pub_key('hdfs').text(),\n })\n\n\n@when('leadership.changed.ssh-key-pub')\ndef install_ssh_pub_key():\n ssh_dir = utils.ssh_key_dir('hdfs')\n ssh_dir.makedirs_p()\n authfile = ssh_dir / 'authorized_keys'\n authfile.write_lines([leadership.leader_get('ssh-key-pub')], append=True)\n\n\n@when('leadership.changed.ssh-key-priv')\n@when_not('leadership.is_leader')\ndef install_ssh_priv_key():\n ssh_dir = utils.ssh_key_dir('hdfs')\n ssh_dir.makedirs_p()\n keyfile = ssh_dir / 'id_rsa'\n keyfile.write_text(leadership.leader_get('ssh-key-priv'))\n\n\n@when('datanode.joined')\ndef manage_datanode_hosts(datanode):\n utils.update_kv_hosts(datanode.hosts_map())\n utils.manage_etc_hosts()\n datanode.send_hosts_map(utils.get_kv_hosts())\n\n\n@when('datanode.joined', 'leadership.set.ssh-key-pub')\ndef send_ssh_key(datanode):\n datanode.send_ssh_key(leadership.leader_get('ssh-key-pub'))\n\n\n@when('leadership.is_leader')\n@when_not('leadership.set.cluster-nodes')\ndef init_cluster_nodes():\n local_hostname = hookenv.local_unit().replace('/', '-')\n set_cluster_nodes([local_hostname])\n\n\n@when('namenode.started', 'datanode.joined')\ndef send_info(datanode):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n\n datanode.send_spec(hadoop.spec())\n datanode.send_clustername(hookenv.service_name())\n datanode.send_namenodes(get_cluster_nodes())\n datanode.send_ports(hdfs_port, webhdfs_port)\n\n\n@when('namenode.started', 'datanode.joined')\ndef update_slaves(datanode):\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n slaves = datanode.nodes()\n if data_changed('namenode.slaves', slaves):\n unitdata.kv().set('namenode.slaves', slaves)\n hdfs.register_slaves(slaves)\n hdfs.reload_slaves()\n\n set_state('namenode.ready')\n\n\n@when('namenode.started', 'datanode.joined')\n@when('leadership.changed.cluster-nodes')\ndef update_nodes(datanode):\n datanode.send_namenodes(get_cluster_nodes())\n\n\n@when('namenode.ready')\n@when('namenode.clients')\ndef accept_clients(clients):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n\n clients.send_spec(hadoop.spec())\n clients.send_clustername(hookenv.service_name())\n clients.send_namenodes(get_cluster_nodes())\n clients.send_ports(hdfs_port, webhdfs_port)\n clients.send_hosts_map(utils.get_kv_hosts())\n clients.send_ready(True)\n\n\n@when('namenode.ready')\n@when('namenode.clients')\n@when('leadership.changed.cluster-nodes')\ndef update_clients(clients):\n clients.send_namenodes(get_cluster_nodes())\n\n\n@when('namenode.clients')\n@when_not('namenode.ready')\ndef reject_clients(clients):\n clients.send_ready(False)\n\n\n@when('namenode.started', 'datanode.departing')\ndef unregister_datanode(datanode):\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n\n slaves = unitdata.kv().get('namenode.slaves', [])\n slaves_leaving = datanode.nodes() # only returns nodes in \"leaving\" state\n hookenv.log('Slaves leaving: {}'.format(slaves_leaving))\n\n slaves_remaining = list(set(slaves) - set(slaves_leaving))\n unitdata.kv().set('namenode.slaves', slaves_remaining)\n hdfs.register_slaves(slaves_remaining)\n hdfs.reload_slaves()\n\n utils.remove_kv_hosts(slaves_leaving)\n utils.manage_etc_hosts()\n\n if not slaves_remaining:\n remove_state('namenode.ready')\n\n datanode.dismiss()\n\n\n@when('benchmark.joined')\ndef register_benchmarks(benchmark):\n benchmark.register('nnbench', 'testdfsio')\n",
"from charms.reactive import when\nfrom charms.reactive import when_not\nfrom charms.reactive import set_state\nfrom charms.reactive import remove_state\nfrom charms.reactive.helpers import data_changed\nfrom charms.layer.hadoop_base import get_hadoop_base\nfrom jujubigdata.handlers import HDFS\nfrom jujubigdata import utils\nfrom charmhelpers.core import hookenv, unitdata\nfrom charms import leadership\nfrom charms.layer.apache_hadoop_namenode import get_cluster_nodes\nfrom charms.layer.apache_hadoop_namenode import set_cluster_nodes\n\n\n@when('hadoop.installed')\n@when_not('namenode.started')\n@when('leadership.is_leader')\n@when('leadership.set.cluster-nodes')\ndef configure_namenode():\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n hdfs.configure_namenode(get_cluster_nodes())\n hdfs.format_namenode()\n hdfs.start_namenode()\n hdfs.create_hdfs_dirs()\n hadoop.open_ports('namenode')\n utils.initialize_kv_host()\n utils.manage_etc_hosts()\n set_state('namenode.started')\n\n\n@when('hadoop.installed', 'leadership.is_leader')\n@when_not('leadership.set.ssh-key-pub')\ndef generate_ssh_key():\n utils.generate_ssh_key('hdfs')\n leadership.leader_set({'ssh-key-priv': utils.ssh_priv_key('hdfs').text(\n ), 'ssh-key-pub': utils.ssh_pub_key('hdfs').text()})\n\n\n@when('leadership.changed.ssh-key-pub')\ndef install_ssh_pub_key():\n ssh_dir = utils.ssh_key_dir('hdfs')\n ssh_dir.makedirs_p()\n authfile = ssh_dir / 'authorized_keys'\n authfile.write_lines([leadership.leader_get('ssh-key-pub')], append=True)\n\n\n@when('leadership.changed.ssh-key-priv')\n@when_not('leadership.is_leader')\ndef install_ssh_priv_key():\n ssh_dir = utils.ssh_key_dir('hdfs')\n ssh_dir.makedirs_p()\n keyfile = ssh_dir / 'id_rsa'\n keyfile.write_text(leadership.leader_get('ssh-key-priv'))\n\n\n@when('datanode.joined')\ndef manage_datanode_hosts(datanode):\n utils.update_kv_hosts(datanode.hosts_map())\n utils.manage_etc_hosts()\n datanode.send_hosts_map(utils.get_kv_hosts())\n\n\n@when('datanode.joined', 'leadership.set.ssh-key-pub')\ndef send_ssh_key(datanode):\n datanode.send_ssh_key(leadership.leader_get('ssh-key-pub'))\n\n\n@when('leadership.is_leader')\n@when_not('leadership.set.cluster-nodes')\ndef init_cluster_nodes():\n local_hostname = hookenv.local_unit().replace('/', '-')\n set_cluster_nodes([local_hostname])\n\n\n@when('namenode.started', 'datanode.joined')\ndef send_info(datanode):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n datanode.send_spec(hadoop.spec())\n datanode.send_clustername(hookenv.service_name())\n datanode.send_namenodes(get_cluster_nodes())\n datanode.send_ports(hdfs_port, webhdfs_port)\n\n\n@when('namenode.started', 'datanode.joined')\ndef update_slaves(datanode):\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n slaves = datanode.nodes()\n if data_changed('namenode.slaves', slaves):\n unitdata.kv().set('namenode.slaves', slaves)\n hdfs.register_slaves(slaves)\n hdfs.reload_slaves()\n set_state('namenode.ready')\n\n\n@when('namenode.started', 'datanode.joined')\n@when('leadership.changed.cluster-nodes')\ndef update_nodes(datanode):\n datanode.send_namenodes(get_cluster_nodes())\n\n\n@when('namenode.ready')\n@when('namenode.clients')\ndef accept_clients(clients):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n clients.send_spec(hadoop.spec())\n clients.send_clustername(hookenv.service_name())\n clients.send_namenodes(get_cluster_nodes())\n clients.send_ports(hdfs_port, webhdfs_port)\n clients.send_hosts_map(utils.get_kv_hosts())\n clients.send_ready(True)\n\n\n@when('namenode.ready')\n@when('namenode.clients')\n@when('leadership.changed.cluster-nodes')\ndef update_clients(clients):\n clients.send_namenodes(get_cluster_nodes())\n\n\n@when('namenode.clients')\n@when_not('namenode.ready')\ndef reject_clients(clients):\n clients.send_ready(False)\n\n\n@when('namenode.started', 'datanode.departing')\ndef unregister_datanode(datanode):\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n slaves = unitdata.kv().get('namenode.slaves', [])\n slaves_leaving = datanode.nodes()\n hookenv.log('Slaves leaving: {}'.format(slaves_leaving))\n slaves_remaining = list(set(slaves) - set(slaves_leaving))\n unitdata.kv().set('namenode.slaves', slaves_remaining)\n hdfs.register_slaves(slaves_remaining)\n hdfs.reload_slaves()\n utils.remove_kv_hosts(slaves_leaving)\n utils.manage_etc_hosts()\n if not slaves_remaining:\n remove_state('namenode.ready')\n datanode.dismiss()\n\n\n@when('benchmark.joined')\ndef register_benchmarks(benchmark):\n benchmark.register('nnbench', 'testdfsio')\n",
"<import token>\n\n\n@when('hadoop.installed')\n@when_not('namenode.started')\n@when('leadership.is_leader')\n@when('leadership.set.cluster-nodes')\ndef configure_namenode():\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n hdfs.configure_namenode(get_cluster_nodes())\n hdfs.format_namenode()\n hdfs.start_namenode()\n hdfs.create_hdfs_dirs()\n hadoop.open_ports('namenode')\n utils.initialize_kv_host()\n utils.manage_etc_hosts()\n set_state('namenode.started')\n\n\n@when('hadoop.installed', 'leadership.is_leader')\n@when_not('leadership.set.ssh-key-pub')\ndef generate_ssh_key():\n utils.generate_ssh_key('hdfs')\n leadership.leader_set({'ssh-key-priv': utils.ssh_priv_key('hdfs').text(\n ), 'ssh-key-pub': utils.ssh_pub_key('hdfs').text()})\n\n\n@when('leadership.changed.ssh-key-pub')\ndef install_ssh_pub_key():\n ssh_dir = utils.ssh_key_dir('hdfs')\n ssh_dir.makedirs_p()\n authfile = ssh_dir / 'authorized_keys'\n authfile.write_lines([leadership.leader_get('ssh-key-pub')], append=True)\n\n\n@when('leadership.changed.ssh-key-priv')\n@when_not('leadership.is_leader')\ndef install_ssh_priv_key():\n ssh_dir = utils.ssh_key_dir('hdfs')\n ssh_dir.makedirs_p()\n keyfile = ssh_dir / 'id_rsa'\n keyfile.write_text(leadership.leader_get('ssh-key-priv'))\n\n\n@when('datanode.joined')\ndef manage_datanode_hosts(datanode):\n utils.update_kv_hosts(datanode.hosts_map())\n utils.manage_etc_hosts()\n datanode.send_hosts_map(utils.get_kv_hosts())\n\n\n@when('datanode.joined', 'leadership.set.ssh-key-pub')\ndef send_ssh_key(datanode):\n datanode.send_ssh_key(leadership.leader_get('ssh-key-pub'))\n\n\n@when('leadership.is_leader')\n@when_not('leadership.set.cluster-nodes')\ndef init_cluster_nodes():\n local_hostname = hookenv.local_unit().replace('/', '-')\n set_cluster_nodes([local_hostname])\n\n\n@when('namenode.started', 'datanode.joined')\ndef send_info(datanode):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n datanode.send_spec(hadoop.spec())\n datanode.send_clustername(hookenv.service_name())\n datanode.send_namenodes(get_cluster_nodes())\n datanode.send_ports(hdfs_port, webhdfs_port)\n\n\n@when('namenode.started', 'datanode.joined')\ndef update_slaves(datanode):\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n slaves = datanode.nodes()\n if data_changed('namenode.slaves', slaves):\n unitdata.kv().set('namenode.slaves', slaves)\n hdfs.register_slaves(slaves)\n hdfs.reload_slaves()\n set_state('namenode.ready')\n\n\n@when('namenode.started', 'datanode.joined')\n@when('leadership.changed.cluster-nodes')\ndef update_nodes(datanode):\n datanode.send_namenodes(get_cluster_nodes())\n\n\n@when('namenode.ready')\n@when('namenode.clients')\ndef accept_clients(clients):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n clients.send_spec(hadoop.spec())\n clients.send_clustername(hookenv.service_name())\n clients.send_namenodes(get_cluster_nodes())\n clients.send_ports(hdfs_port, webhdfs_port)\n clients.send_hosts_map(utils.get_kv_hosts())\n clients.send_ready(True)\n\n\n@when('namenode.ready')\n@when('namenode.clients')\n@when('leadership.changed.cluster-nodes')\ndef update_clients(clients):\n clients.send_namenodes(get_cluster_nodes())\n\n\n@when('namenode.clients')\n@when_not('namenode.ready')\ndef reject_clients(clients):\n clients.send_ready(False)\n\n\n@when('namenode.started', 'datanode.departing')\ndef unregister_datanode(datanode):\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n slaves = unitdata.kv().get('namenode.slaves', [])\n slaves_leaving = datanode.nodes()\n hookenv.log('Slaves leaving: {}'.format(slaves_leaving))\n slaves_remaining = list(set(slaves) - set(slaves_leaving))\n unitdata.kv().set('namenode.slaves', slaves_remaining)\n hdfs.register_slaves(slaves_remaining)\n hdfs.reload_slaves()\n utils.remove_kv_hosts(slaves_leaving)\n utils.manage_etc_hosts()\n if not slaves_remaining:\n remove_state('namenode.ready')\n datanode.dismiss()\n\n\n@when('benchmark.joined')\ndef register_benchmarks(benchmark):\n benchmark.register('nnbench', 'testdfsio')\n",
"<import token>\n\n\n@when('hadoop.installed')\n@when_not('namenode.started')\n@when('leadership.is_leader')\n@when('leadership.set.cluster-nodes')\ndef configure_namenode():\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n hdfs.configure_namenode(get_cluster_nodes())\n hdfs.format_namenode()\n hdfs.start_namenode()\n hdfs.create_hdfs_dirs()\n hadoop.open_ports('namenode')\n utils.initialize_kv_host()\n utils.manage_etc_hosts()\n set_state('namenode.started')\n\n\n@when('hadoop.installed', 'leadership.is_leader')\n@when_not('leadership.set.ssh-key-pub')\ndef generate_ssh_key():\n utils.generate_ssh_key('hdfs')\n leadership.leader_set({'ssh-key-priv': utils.ssh_priv_key('hdfs').text(\n ), 'ssh-key-pub': utils.ssh_pub_key('hdfs').text()})\n\n\n<function token>\n\n\n@when('leadership.changed.ssh-key-priv')\n@when_not('leadership.is_leader')\ndef install_ssh_priv_key():\n ssh_dir = utils.ssh_key_dir('hdfs')\n ssh_dir.makedirs_p()\n keyfile = ssh_dir / 'id_rsa'\n keyfile.write_text(leadership.leader_get('ssh-key-priv'))\n\n\n@when('datanode.joined')\ndef manage_datanode_hosts(datanode):\n utils.update_kv_hosts(datanode.hosts_map())\n utils.manage_etc_hosts()\n datanode.send_hosts_map(utils.get_kv_hosts())\n\n\n@when('datanode.joined', 'leadership.set.ssh-key-pub')\ndef send_ssh_key(datanode):\n datanode.send_ssh_key(leadership.leader_get('ssh-key-pub'))\n\n\n@when('leadership.is_leader')\n@when_not('leadership.set.cluster-nodes')\ndef init_cluster_nodes():\n local_hostname = hookenv.local_unit().replace('/', '-')\n set_cluster_nodes([local_hostname])\n\n\n@when('namenode.started', 'datanode.joined')\ndef send_info(datanode):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n datanode.send_spec(hadoop.spec())\n datanode.send_clustername(hookenv.service_name())\n datanode.send_namenodes(get_cluster_nodes())\n datanode.send_ports(hdfs_port, webhdfs_port)\n\n\n@when('namenode.started', 'datanode.joined')\ndef update_slaves(datanode):\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n slaves = datanode.nodes()\n if data_changed('namenode.slaves', slaves):\n unitdata.kv().set('namenode.slaves', slaves)\n hdfs.register_slaves(slaves)\n hdfs.reload_slaves()\n set_state('namenode.ready')\n\n\n@when('namenode.started', 'datanode.joined')\n@when('leadership.changed.cluster-nodes')\ndef update_nodes(datanode):\n datanode.send_namenodes(get_cluster_nodes())\n\n\n@when('namenode.ready')\n@when('namenode.clients')\ndef accept_clients(clients):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n clients.send_spec(hadoop.spec())\n clients.send_clustername(hookenv.service_name())\n clients.send_namenodes(get_cluster_nodes())\n clients.send_ports(hdfs_port, webhdfs_port)\n clients.send_hosts_map(utils.get_kv_hosts())\n clients.send_ready(True)\n\n\n@when('namenode.ready')\n@when('namenode.clients')\n@when('leadership.changed.cluster-nodes')\ndef update_clients(clients):\n clients.send_namenodes(get_cluster_nodes())\n\n\n@when('namenode.clients')\n@when_not('namenode.ready')\ndef reject_clients(clients):\n clients.send_ready(False)\n\n\n@when('namenode.started', 'datanode.departing')\ndef unregister_datanode(datanode):\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n slaves = unitdata.kv().get('namenode.slaves', [])\n slaves_leaving = datanode.nodes()\n hookenv.log('Slaves leaving: {}'.format(slaves_leaving))\n slaves_remaining = list(set(slaves) - set(slaves_leaving))\n unitdata.kv().set('namenode.slaves', slaves_remaining)\n hdfs.register_slaves(slaves_remaining)\n hdfs.reload_slaves()\n utils.remove_kv_hosts(slaves_leaving)\n utils.manage_etc_hosts()\n if not slaves_remaining:\n remove_state('namenode.ready')\n datanode.dismiss()\n\n\n@when('benchmark.joined')\ndef register_benchmarks(benchmark):\n benchmark.register('nnbench', 'testdfsio')\n",
"<import token>\n\n\n@when('hadoop.installed')\n@when_not('namenode.started')\n@when('leadership.is_leader')\n@when('leadership.set.cluster-nodes')\ndef configure_namenode():\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n hdfs.configure_namenode(get_cluster_nodes())\n hdfs.format_namenode()\n hdfs.start_namenode()\n hdfs.create_hdfs_dirs()\n hadoop.open_ports('namenode')\n utils.initialize_kv_host()\n utils.manage_etc_hosts()\n set_state('namenode.started')\n\n\n@when('hadoop.installed', 'leadership.is_leader')\n@when_not('leadership.set.ssh-key-pub')\ndef generate_ssh_key():\n utils.generate_ssh_key('hdfs')\n leadership.leader_set({'ssh-key-priv': utils.ssh_priv_key('hdfs').text(\n ), 'ssh-key-pub': utils.ssh_pub_key('hdfs').text()})\n\n\n<function token>\n\n\n@when('leadership.changed.ssh-key-priv')\n@when_not('leadership.is_leader')\ndef install_ssh_priv_key():\n ssh_dir = utils.ssh_key_dir('hdfs')\n ssh_dir.makedirs_p()\n keyfile = ssh_dir / 'id_rsa'\n keyfile.write_text(leadership.leader_get('ssh-key-priv'))\n\n\n@when('datanode.joined')\ndef manage_datanode_hosts(datanode):\n utils.update_kv_hosts(datanode.hosts_map())\n utils.manage_etc_hosts()\n datanode.send_hosts_map(utils.get_kv_hosts())\n\n\n@when('datanode.joined', 'leadership.set.ssh-key-pub')\ndef send_ssh_key(datanode):\n datanode.send_ssh_key(leadership.leader_get('ssh-key-pub'))\n\n\n@when('leadership.is_leader')\n@when_not('leadership.set.cluster-nodes')\ndef init_cluster_nodes():\n local_hostname = hookenv.local_unit().replace('/', '-')\n set_cluster_nodes([local_hostname])\n\n\n@when('namenode.started', 'datanode.joined')\ndef send_info(datanode):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n datanode.send_spec(hadoop.spec())\n datanode.send_clustername(hookenv.service_name())\n datanode.send_namenodes(get_cluster_nodes())\n datanode.send_ports(hdfs_port, webhdfs_port)\n\n\n<function token>\n\n\n@when('namenode.started', 'datanode.joined')\n@when('leadership.changed.cluster-nodes')\ndef update_nodes(datanode):\n datanode.send_namenodes(get_cluster_nodes())\n\n\n@when('namenode.ready')\n@when('namenode.clients')\ndef accept_clients(clients):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n clients.send_spec(hadoop.spec())\n clients.send_clustername(hookenv.service_name())\n clients.send_namenodes(get_cluster_nodes())\n clients.send_ports(hdfs_port, webhdfs_port)\n clients.send_hosts_map(utils.get_kv_hosts())\n clients.send_ready(True)\n\n\n@when('namenode.ready')\n@when('namenode.clients')\n@when('leadership.changed.cluster-nodes')\ndef update_clients(clients):\n clients.send_namenodes(get_cluster_nodes())\n\n\n@when('namenode.clients')\n@when_not('namenode.ready')\ndef reject_clients(clients):\n clients.send_ready(False)\n\n\n@when('namenode.started', 'datanode.departing')\ndef unregister_datanode(datanode):\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n slaves = unitdata.kv().get('namenode.slaves', [])\n slaves_leaving = datanode.nodes()\n hookenv.log('Slaves leaving: {}'.format(slaves_leaving))\n slaves_remaining = list(set(slaves) - set(slaves_leaving))\n unitdata.kv().set('namenode.slaves', slaves_remaining)\n hdfs.register_slaves(slaves_remaining)\n hdfs.reload_slaves()\n utils.remove_kv_hosts(slaves_leaving)\n utils.manage_etc_hosts()\n if not slaves_remaining:\n remove_state('namenode.ready')\n datanode.dismiss()\n\n\n@when('benchmark.joined')\ndef register_benchmarks(benchmark):\n benchmark.register('nnbench', 'testdfsio')\n",
"<import token>\n\n\n@when('hadoop.installed')\n@when_not('namenode.started')\n@when('leadership.is_leader')\n@when('leadership.set.cluster-nodes')\ndef configure_namenode():\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n hdfs.configure_namenode(get_cluster_nodes())\n hdfs.format_namenode()\n hdfs.start_namenode()\n hdfs.create_hdfs_dirs()\n hadoop.open_ports('namenode')\n utils.initialize_kv_host()\n utils.manage_etc_hosts()\n set_state('namenode.started')\n\n\n@when('hadoop.installed', 'leadership.is_leader')\n@when_not('leadership.set.ssh-key-pub')\ndef generate_ssh_key():\n utils.generate_ssh_key('hdfs')\n leadership.leader_set({'ssh-key-priv': utils.ssh_priv_key('hdfs').text(\n ), 'ssh-key-pub': utils.ssh_pub_key('hdfs').text()})\n\n\n<function token>\n\n\n@when('leadership.changed.ssh-key-priv')\n@when_not('leadership.is_leader')\ndef install_ssh_priv_key():\n ssh_dir = utils.ssh_key_dir('hdfs')\n ssh_dir.makedirs_p()\n keyfile = ssh_dir / 'id_rsa'\n keyfile.write_text(leadership.leader_get('ssh-key-priv'))\n\n\n@when('datanode.joined')\ndef manage_datanode_hosts(datanode):\n utils.update_kv_hosts(datanode.hosts_map())\n utils.manage_etc_hosts()\n datanode.send_hosts_map(utils.get_kv_hosts())\n\n\n@when('datanode.joined', 'leadership.set.ssh-key-pub')\ndef send_ssh_key(datanode):\n datanode.send_ssh_key(leadership.leader_get('ssh-key-pub'))\n\n\n@when('leadership.is_leader')\n@when_not('leadership.set.cluster-nodes')\ndef init_cluster_nodes():\n local_hostname = hookenv.local_unit().replace('/', '-')\n set_cluster_nodes([local_hostname])\n\n\n@when('namenode.started', 'datanode.joined')\ndef send_info(datanode):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n datanode.send_spec(hadoop.spec())\n datanode.send_clustername(hookenv.service_name())\n datanode.send_namenodes(get_cluster_nodes())\n datanode.send_ports(hdfs_port, webhdfs_port)\n\n\n<function token>\n<function token>\n\n\n@when('namenode.ready')\n@when('namenode.clients')\ndef accept_clients(clients):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n clients.send_spec(hadoop.spec())\n clients.send_clustername(hookenv.service_name())\n clients.send_namenodes(get_cluster_nodes())\n clients.send_ports(hdfs_port, webhdfs_port)\n clients.send_hosts_map(utils.get_kv_hosts())\n clients.send_ready(True)\n\n\n@when('namenode.ready')\n@when('namenode.clients')\n@when('leadership.changed.cluster-nodes')\ndef update_clients(clients):\n clients.send_namenodes(get_cluster_nodes())\n\n\n@when('namenode.clients')\n@when_not('namenode.ready')\ndef reject_clients(clients):\n clients.send_ready(False)\n\n\n@when('namenode.started', 'datanode.departing')\ndef unregister_datanode(datanode):\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n slaves = unitdata.kv().get('namenode.slaves', [])\n slaves_leaving = datanode.nodes()\n hookenv.log('Slaves leaving: {}'.format(slaves_leaving))\n slaves_remaining = list(set(slaves) - set(slaves_leaving))\n unitdata.kv().set('namenode.slaves', slaves_remaining)\n hdfs.register_slaves(slaves_remaining)\n hdfs.reload_slaves()\n utils.remove_kv_hosts(slaves_leaving)\n utils.manage_etc_hosts()\n if not slaves_remaining:\n remove_state('namenode.ready')\n datanode.dismiss()\n\n\n@when('benchmark.joined')\ndef register_benchmarks(benchmark):\n benchmark.register('nnbench', 'testdfsio')\n",
"<import token>\n\n\n@when('hadoop.installed')\n@when_not('namenode.started')\n@when('leadership.is_leader')\n@when('leadership.set.cluster-nodes')\ndef configure_namenode():\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n hdfs.configure_namenode(get_cluster_nodes())\n hdfs.format_namenode()\n hdfs.start_namenode()\n hdfs.create_hdfs_dirs()\n hadoop.open_ports('namenode')\n utils.initialize_kv_host()\n utils.manage_etc_hosts()\n set_state('namenode.started')\n\n\n<function token>\n<function token>\n\n\n@when('leadership.changed.ssh-key-priv')\n@when_not('leadership.is_leader')\ndef install_ssh_priv_key():\n ssh_dir = utils.ssh_key_dir('hdfs')\n ssh_dir.makedirs_p()\n keyfile = ssh_dir / 'id_rsa'\n keyfile.write_text(leadership.leader_get('ssh-key-priv'))\n\n\n@when('datanode.joined')\ndef manage_datanode_hosts(datanode):\n utils.update_kv_hosts(datanode.hosts_map())\n utils.manage_etc_hosts()\n datanode.send_hosts_map(utils.get_kv_hosts())\n\n\n@when('datanode.joined', 'leadership.set.ssh-key-pub')\ndef send_ssh_key(datanode):\n datanode.send_ssh_key(leadership.leader_get('ssh-key-pub'))\n\n\n@when('leadership.is_leader')\n@when_not('leadership.set.cluster-nodes')\ndef init_cluster_nodes():\n local_hostname = hookenv.local_unit().replace('/', '-')\n set_cluster_nodes([local_hostname])\n\n\n@when('namenode.started', 'datanode.joined')\ndef send_info(datanode):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n datanode.send_spec(hadoop.spec())\n datanode.send_clustername(hookenv.service_name())\n datanode.send_namenodes(get_cluster_nodes())\n datanode.send_ports(hdfs_port, webhdfs_port)\n\n\n<function token>\n<function token>\n\n\n@when('namenode.ready')\n@when('namenode.clients')\ndef accept_clients(clients):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n clients.send_spec(hadoop.spec())\n clients.send_clustername(hookenv.service_name())\n clients.send_namenodes(get_cluster_nodes())\n clients.send_ports(hdfs_port, webhdfs_port)\n clients.send_hosts_map(utils.get_kv_hosts())\n clients.send_ready(True)\n\n\n@when('namenode.ready')\n@when('namenode.clients')\n@when('leadership.changed.cluster-nodes')\ndef update_clients(clients):\n clients.send_namenodes(get_cluster_nodes())\n\n\n@when('namenode.clients')\n@when_not('namenode.ready')\ndef reject_clients(clients):\n clients.send_ready(False)\n\n\n@when('namenode.started', 'datanode.departing')\ndef unregister_datanode(datanode):\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n slaves = unitdata.kv().get('namenode.slaves', [])\n slaves_leaving = datanode.nodes()\n hookenv.log('Slaves leaving: {}'.format(slaves_leaving))\n slaves_remaining = list(set(slaves) - set(slaves_leaving))\n unitdata.kv().set('namenode.slaves', slaves_remaining)\n hdfs.register_slaves(slaves_remaining)\n hdfs.reload_slaves()\n utils.remove_kv_hosts(slaves_leaving)\n utils.manage_etc_hosts()\n if not slaves_remaining:\n remove_state('namenode.ready')\n datanode.dismiss()\n\n\n@when('benchmark.joined')\ndef register_benchmarks(benchmark):\n benchmark.register('nnbench', 'testdfsio')\n",
"<import token>\n\n\n@when('hadoop.installed')\n@when_not('namenode.started')\n@when('leadership.is_leader')\n@when('leadership.set.cluster-nodes')\ndef configure_namenode():\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n hdfs.configure_namenode(get_cluster_nodes())\n hdfs.format_namenode()\n hdfs.start_namenode()\n hdfs.create_hdfs_dirs()\n hadoop.open_ports('namenode')\n utils.initialize_kv_host()\n utils.manage_etc_hosts()\n set_state('namenode.started')\n\n\n<function token>\n<function token>\n\n\n@when('leadership.changed.ssh-key-priv')\n@when_not('leadership.is_leader')\ndef install_ssh_priv_key():\n ssh_dir = utils.ssh_key_dir('hdfs')\n ssh_dir.makedirs_p()\n keyfile = ssh_dir / 'id_rsa'\n keyfile.write_text(leadership.leader_get('ssh-key-priv'))\n\n\n<function token>\n\n\n@when('datanode.joined', 'leadership.set.ssh-key-pub')\ndef send_ssh_key(datanode):\n datanode.send_ssh_key(leadership.leader_get('ssh-key-pub'))\n\n\n@when('leadership.is_leader')\n@when_not('leadership.set.cluster-nodes')\ndef init_cluster_nodes():\n local_hostname = hookenv.local_unit().replace('/', '-')\n set_cluster_nodes([local_hostname])\n\n\n@when('namenode.started', 'datanode.joined')\ndef send_info(datanode):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n datanode.send_spec(hadoop.spec())\n datanode.send_clustername(hookenv.service_name())\n datanode.send_namenodes(get_cluster_nodes())\n datanode.send_ports(hdfs_port, webhdfs_port)\n\n\n<function token>\n<function token>\n\n\n@when('namenode.ready')\n@when('namenode.clients')\ndef accept_clients(clients):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n clients.send_spec(hadoop.spec())\n clients.send_clustername(hookenv.service_name())\n clients.send_namenodes(get_cluster_nodes())\n clients.send_ports(hdfs_port, webhdfs_port)\n clients.send_hosts_map(utils.get_kv_hosts())\n clients.send_ready(True)\n\n\n@when('namenode.ready')\n@when('namenode.clients')\n@when('leadership.changed.cluster-nodes')\ndef update_clients(clients):\n clients.send_namenodes(get_cluster_nodes())\n\n\n@when('namenode.clients')\n@when_not('namenode.ready')\ndef reject_clients(clients):\n clients.send_ready(False)\n\n\n@when('namenode.started', 'datanode.departing')\ndef unregister_datanode(datanode):\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n slaves = unitdata.kv().get('namenode.slaves', [])\n slaves_leaving = datanode.nodes()\n hookenv.log('Slaves leaving: {}'.format(slaves_leaving))\n slaves_remaining = list(set(slaves) - set(slaves_leaving))\n unitdata.kv().set('namenode.slaves', slaves_remaining)\n hdfs.register_slaves(slaves_remaining)\n hdfs.reload_slaves()\n utils.remove_kv_hosts(slaves_leaving)\n utils.manage_etc_hosts()\n if not slaves_remaining:\n remove_state('namenode.ready')\n datanode.dismiss()\n\n\n@when('benchmark.joined')\ndef register_benchmarks(benchmark):\n benchmark.register('nnbench', 'testdfsio')\n",
"<import token>\n\n\n@when('hadoop.installed')\n@when_not('namenode.started')\n@when('leadership.is_leader')\n@when('leadership.set.cluster-nodes')\ndef configure_namenode():\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n hdfs.configure_namenode(get_cluster_nodes())\n hdfs.format_namenode()\n hdfs.start_namenode()\n hdfs.create_hdfs_dirs()\n hadoop.open_ports('namenode')\n utils.initialize_kv_host()\n utils.manage_etc_hosts()\n set_state('namenode.started')\n\n\n<function token>\n<function token>\n\n\n@when('leadership.changed.ssh-key-priv')\n@when_not('leadership.is_leader')\ndef install_ssh_priv_key():\n ssh_dir = utils.ssh_key_dir('hdfs')\n ssh_dir.makedirs_p()\n keyfile = ssh_dir / 'id_rsa'\n keyfile.write_text(leadership.leader_get('ssh-key-priv'))\n\n\n<function token>\n\n\n@when('datanode.joined', 'leadership.set.ssh-key-pub')\ndef send_ssh_key(datanode):\n datanode.send_ssh_key(leadership.leader_get('ssh-key-pub'))\n\n\n@when('leadership.is_leader')\n@when_not('leadership.set.cluster-nodes')\ndef init_cluster_nodes():\n local_hostname = hookenv.local_unit().replace('/', '-')\n set_cluster_nodes([local_hostname])\n\n\n@when('namenode.started', 'datanode.joined')\ndef send_info(datanode):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n datanode.send_spec(hadoop.spec())\n datanode.send_clustername(hookenv.service_name())\n datanode.send_namenodes(get_cluster_nodes())\n datanode.send_ports(hdfs_port, webhdfs_port)\n\n\n<function token>\n<function token>\n\n\n@when('namenode.ready')\n@when('namenode.clients')\ndef accept_clients(clients):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n clients.send_spec(hadoop.spec())\n clients.send_clustername(hookenv.service_name())\n clients.send_namenodes(get_cluster_nodes())\n clients.send_ports(hdfs_port, webhdfs_port)\n clients.send_hosts_map(utils.get_kv_hosts())\n clients.send_ready(True)\n\n\n@when('namenode.ready')\n@when('namenode.clients')\n@when('leadership.changed.cluster-nodes')\ndef update_clients(clients):\n clients.send_namenodes(get_cluster_nodes())\n\n\n<function token>\n\n\n@when('namenode.started', 'datanode.departing')\ndef unregister_datanode(datanode):\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n slaves = unitdata.kv().get('namenode.slaves', [])\n slaves_leaving = datanode.nodes()\n hookenv.log('Slaves leaving: {}'.format(slaves_leaving))\n slaves_remaining = list(set(slaves) - set(slaves_leaving))\n unitdata.kv().set('namenode.slaves', slaves_remaining)\n hdfs.register_slaves(slaves_remaining)\n hdfs.reload_slaves()\n utils.remove_kv_hosts(slaves_leaving)\n utils.manage_etc_hosts()\n if not slaves_remaining:\n remove_state('namenode.ready')\n datanode.dismiss()\n\n\n@when('benchmark.joined')\ndef register_benchmarks(benchmark):\n benchmark.register('nnbench', 'testdfsio')\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\n@when('leadership.changed.ssh-key-priv')\n@when_not('leadership.is_leader')\ndef install_ssh_priv_key():\n ssh_dir = utils.ssh_key_dir('hdfs')\n ssh_dir.makedirs_p()\n keyfile = ssh_dir / 'id_rsa'\n keyfile.write_text(leadership.leader_get('ssh-key-priv'))\n\n\n<function token>\n\n\n@when('datanode.joined', 'leadership.set.ssh-key-pub')\ndef send_ssh_key(datanode):\n datanode.send_ssh_key(leadership.leader_get('ssh-key-pub'))\n\n\n@when('leadership.is_leader')\n@when_not('leadership.set.cluster-nodes')\ndef init_cluster_nodes():\n local_hostname = hookenv.local_unit().replace('/', '-')\n set_cluster_nodes([local_hostname])\n\n\n@when('namenode.started', 'datanode.joined')\ndef send_info(datanode):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n datanode.send_spec(hadoop.spec())\n datanode.send_clustername(hookenv.service_name())\n datanode.send_namenodes(get_cluster_nodes())\n datanode.send_ports(hdfs_port, webhdfs_port)\n\n\n<function token>\n<function token>\n\n\n@when('namenode.ready')\n@when('namenode.clients')\ndef accept_clients(clients):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n clients.send_spec(hadoop.spec())\n clients.send_clustername(hookenv.service_name())\n clients.send_namenodes(get_cluster_nodes())\n clients.send_ports(hdfs_port, webhdfs_port)\n clients.send_hosts_map(utils.get_kv_hosts())\n clients.send_ready(True)\n\n\n@when('namenode.ready')\n@when('namenode.clients')\n@when('leadership.changed.cluster-nodes')\ndef update_clients(clients):\n clients.send_namenodes(get_cluster_nodes())\n\n\n<function token>\n\n\n@when('namenode.started', 'datanode.departing')\ndef unregister_datanode(datanode):\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n slaves = unitdata.kv().get('namenode.slaves', [])\n slaves_leaving = datanode.nodes()\n hookenv.log('Slaves leaving: {}'.format(slaves_leaving))\n slaves_remaining = list(set(slaves) - set(slaves_leaving))\n unitdata.kv().set('namenode.slaves', slaves_remaining)\n hdfs.register_slaves(slaves_remaining)\n hdfs.reload_slaves()\n utils.remove_kv_hosts(slaves_leaving)\n utils.manage_etc_hosts()\n if not slaves_remaining:\n remove_state('namenode.ready')\n datanode.dismiss()\n\n\n@when('benchmark.joined')\ndef register_benchmarks(benchmark):\n benchmark.register('nnbench', 'testdfsio')\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\n@when('datanode.joined', 'leadership.set.ssh-key-pub')\ndef send_ssh_key(datanode):\n datanode.send_ssh_key(leadership.leader_get('ssh-key-pub'))\n\n\n@when('leadership.is_leader')\n@when_not('leadership.set.cluster-nodes')\ndef init_cluster_nodes():\n local_hostname = hookenv.local_unit().replace('/', '-')\n set_cluster_nodes([local_hostname])\n\n\n@when('namenode.started', 'datanode.joined')\ndef send_info(datanode):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n datanode.send_spec(hadoop.spec())\n datanode.send_clustername(hookenv.service_name())\n datanode.send_namenodes(get_cluster_nodes())\n datanode.send_ports(hdfs_port, webhdfs_port)\n\n\n<function token>\n<function token>\n\n\n@when('namenode.ready')\n@when('namenode.clients')\ndef accept_clients(clients):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n clients.send_spec(hadoop.spec())\n clients.send_clustername(hookenv.service_name())\n clients.send_namenodes(get_cluster_nodes())\n clients.send_ports(hdfs_port, webhdfs_port)\n clients.send_hosts_map(utils.get_kv_hosts())\n clients.send_ready(True)\n\n\n@when('namenode.ready')\n@when('namenode.clients')\n@when('leadership.changed.cluster-nodes')\ndef update_clients(clients):\n clients.send_namenodes(get_cluster_nodes())\n\n\n<function token>\n\n\n@when('namenode.started', 'datanode.departing')\ndef unregister_datanode(datanode):\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n slaves = unitdata.kv().get('namenode.slaves', [])\n slaves_leaving = datanode.nodes()\n hookenv.log('Slaves leaving: {}'.format(slaves_leaving))\n slaves_remaining = list(set(slaves) - set(slaves_leaving))\n unitdata.kv().set('namenode.slaves', slaves_remaining)\n hdfs.register_slaves(slaves_remaining)\n hdfs.reload_slaves()\n utils.remove_kv_hosts(slaves_leaving)\n utils.manage_etc_hosts()\n if not slaves_remaining:\n remove_state('namenode.ready')\n datanode.dismiss()\n\n\n@when('benchmark.joined')\ndef register_benchmarks(benchmark):\n benchmark.register('nnbench', 'testdfsio')\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\n@when('datanode.joined', 'leadership.set.ssh-key-pub')\ndef send_ssh_key(datanode):\n datanode.send_ssh_key(leadership.leader_get('ssh-key-pub'))\n\n\n<function token>\n\n\n@when('namenode.started', 'datanode.joined')\ndef send_info(datanode):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n datanode.send_spec(hadoop.spec())\n datanode.send_clustername(hookenv.service_name())\n datanode.send_namenodes(get_cluster_nodes())\n datanode.send_ports(hdfs_port, webhdfs_port)\n\n\n<function token>\n<function token>\n\n\n@when('namenode.ready')\n@when('namenode.clients')\ndef accept_clients(clients):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n clients.send_spec(hadoop.spec())\n clients.send_clustername(hookenv.service_name())\n clients.send_namenodes(get_cluster_nodes())\n clients.send_ports(hdfs_port, webhdfs_port)\n clients.send_hosts_map(utils.get_kv_hosts())\n clients.send_ready(True)\n\n\n@when('namenode.ready')\n@when('namenode.clients')\n@when('leadership.changed.cluster-nodes')\ndef update_clients(clients):\n clients.send_namenodes(get_cluster_nodes())\n\n\n<function token>\n\n\n@when('namenode.started', 'datanode.departing')\ndef unregister_datanode(datanode):\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n slaves = unitdata.kv().get('namenode.slaves', [])\n slaves_leaving = datanode.nodes()\n hookenv.log('Slaves leaving: {}'.format(slaves_leaving))\n slaves_remaining = list(set(slaves) - set(slaves_leaving))\n unitdata.kv().set('namenode.slaves', slaves_remaining)\n hdfs.register_slaves(slaves_remaining)\n hdfs.reload_slaves()\n utils.remove_kv_hosts(slaves_leaving)\n utils.manage_etc_hosts()\n if not slaves_remaining:\n remove_state('namenode.ready')\n datanode.dismiss()\n\n\n@when('benchmark.joined')\ndef register_benchmarks(benchmark):\n benchmark.register('nnbench', 'testdfsio')\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\n@when('datanode.joined', 'leadership.set.ssh-key-pub')\ndef send_ssh_key(datanode):\n datanode.send_ssh_key(leadership.leader_get('ssh-key-pub'))\n\n\n<function token>\n\n\n@when('namenode.started', 'datanode.joined')\ndef send_info(datanode):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n datanode.send_spec(hadoop.spec())\n datanode.send_clustername(hookenv.service_name())\n datanode.send_namenodes(get_cluster_nodes())\n datanode.send_ports(hdfs_port, webhdfs_port)\n\n\n<function token>\n<function token>\n\n\n@when('namenode.ready')\n@when('namenode.clients')\ndef accept_clients(clients):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n clients.send_spec(hadoop.spec())\n clients.send_clustername(hookenv.service_name())\n clients.send_namenodes(get_cluster_nodes())\n clients.send_ports(hdfs_port, webhdfs_port)\n clients.send_hosts_map(utils.get_kv_hosts())\n clients.send_ready(True)\n\n\n@when('namenode.ready')\n@when('namenode.clients')\n@when('leadership.changed.cluster-nodes')\ndef update_clients(clients):\n clients.send_namenodes(get_cluster_nodes())\n\n\n<function token>\n\n\n@when('namenode.started', 'datanode.departing')\ndef unregister_datanode(datanode):\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n slaves = unitdata.kv().get('namenode.slaves', [])\n slaves_leaving = datanode.nodes()\n hookenv.log('Slaves leaving: {}'.format(slaves_leaving))\n slaves_remaining = list(set(slaves) - set(slaves_leaving))\n unitdata.kv().set('namenode.slaves', slaves_remaining)\n hdfs.register_slaves(slaves_remaining)\n hdfs.reload_slaves()\n utils.remove_kv_hosts(slaves_leaving)\n utils.manage_etc_hosts()\n if not slaves_remaining:\n remove_state('namenode.ready')\n datanode.dismiss()\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\n@when('namenode.started', 'datanode.joined')\ndef send_info(datanode):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n datanode.send_spec(hadoop.spec())\n datanode.send_clustername(hookenv.service_name())\n datanode.send_namenodes(get_cluster_nodes())\n datanode.send_ports(hdfs_port, webhdfs_port)\n\n\n<function token>\n<function token>\n\n\n@when('namenode.ready')\n@when('namenode.clients')\ndef accept_clients(clients):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n clients.send_spec(hadoop.spec())\n clients.send_clustername(hookenv.service_name())\n clients.send_namenodes(get_cluster_nodes())\n clients.send_ports(hdfs_port, webhdfs_port)\n clients.send_hosts_map(utils.get_kv_hosts())\n clients.send_ready(True)\n\n\n@when('namenode.ready')\n@when('namenode.clients')\n@when('leadership.changed.cluster-nodes')\ndef update_clients(clients):\n clients.send_namenodes(get_cluster_nodes())\n\n\n<function token>\n\n\n@when('namenode.started', 'datanode.departing')\ndef unregister_datanode(datanode):\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n slaves = unitdata.kv().get('namenode.slaves', [])\n slaves_leaving = datanode.nodes()\n hookenv.log('Slaves leaving: {}'.format(slaves_leaving))\n slaves_remaining = list(set(slaves) - set(slaves_leaving))\n unitdata.kv().set('namenode.slaves', slaves_remaining)\n hdfs.register_slaves(slaves_remaining)\n hdfs.reload_slaves()\n utils.remove_kv_hosts(slaves_leaving)\n utils.manage_etc_hosts()\n if not slaves_remaining:\n remove_state('namenode.ready')\n datanode.dismiss()\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\n@when('namenode.started', 'datanode.joined')\ndef send_info(datanode):\n hadoop = get_hadoop_base()\n hdfs_port = hadoop.dist_config.port('namenode')\n webhdfs_port = hadoop.dist_config.port('nn_webapp_http')\n datanode.send_spec(hadoop.spec())\n datanode.send_clustername(hookenv.service_name())\n datanode.send_namenodes(get_cluster_nodes())\n datanode.send_ports(hdfs_port, webhdfs_port)\n\n\n<function token>\n<function token>\n<function token>\n\n\n@when('namenode.ready')\n@when('namenode.clients')\n@when('leadership.changed.cluster-nodes')\ndef update_clients(clients):\n clients.send_namenodes(get_cluster_nodes())\n\n\n<function token>\n\n\n@when('namenode.started', 'datanode.departing')\ndef unregister_datanode(datanode):\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n slaves = unitdata.kv().get('namenode.slaves', [])\n slaves_leaving = datanode.nodes()\n hookenv.log('Slaves leaving: {}'.format(slaves_leaving))\n slaves_remaining = list(set(slaves) - set(slaves_leaving))\n unitdata.kv().set('namenode.slaves', slaves_remaining)\n hdfs.register_slaves(slaves_remaining)\n hdfs.reload_slaves()\n utils.remove_kv_hosts(slaves_leaving)\n utils.manage_etc_hosts()\n if not slaves_remaining:\n remove_state('namenode.ready')\n datanode.dismiss()\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\n@when('namenode.ready')\n@when('namenode.clients')\n@when('leadership.changed.cluster-nodes')\ndef update_clients(clients):\n clients.send_namenodes(get_cluster_nodes())\n\n\n<function token>\n\n\n@when('namenode.started', 'datanode.departing')\ndef unregister_datanode(datanode):\n hadoop = get_hadoop_base()\n hdfs = HDFS(hadoop)\n slaves = unitdata.kv().get('namenode.slaves', [])\n slaves_leaving = datanode.nodes()\n hookenv.log('Slaves leaving: {}'.format(slaves_leaving))\n slaves_remaining = list(set(slaves) - set(slaves_leaving))\n unitdata.kv().set('namenode.slaves', slaves_remaining)\n hdfs.register_slaves(slaves_remaining)\n hdfs.reload_slaves()\n utils.remove_kv_hosts(slaves_leaving)\n utils.manage_etc_hosts()\n if not slaves_remaining:\n remove_state('namenode.ready')\n datanode.dismiss()\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\n@when('namenode.ready')\n@when('namenode.clients')\n@when('leadership.changed.cluster-nodes')\ndef update_clients(clients):\n clients.send_namenodes(get_cluster_nodes())\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,697 |
a990cdfdadb127af6577a6be2973cac67f8556f2
|
"""
Defines arguments manipulation utilities, like checking if an argument is iterable, flattening a nested arguments list, etc.
These utility functions can be used by other util modules and are imported in util's main namespace for use by other pymel modules
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from future import standard_library
import numbers
# 2to3: remove switch when python-3 only
try:
from collections.abc import Mapping, Sequence
except ImportError:
from collections import Mapping, Sequence
standard_library.install_aliases()
from builtins import range
from past.builtins import basestring
from builtins import object
from future.utils import PY2
from collections import deque as _deque
import sys
import operator
import itertools
from .utilitytypes import ProxyUnicode
if False:
from typing import *
T = TypeVar('T')
# some functions used to need to make the difference between strings and non-string iterables when PyNode where unicode derived
# doing a hasattr(obj, '__iter__') test will fail for objects that implement __getitem__, but not __iter__, so try iter(obj)
def isIterable(obj):
# type: (Any) -> bool
"""
Returns True if an object is iterable and not a string or ProxyUnicode type, otherwise returns False.
Returns
-------
bool
"""
if isinstance(obj, basestring):
return False
elif isinstance(obj, ProxyUnicode):
return False
try:
iter(obj)
except TypeError:
return False
else:
return True
# consider only ints and floats numeric
def isScalar(obj):
# type: (Any) -> bool
"""
Returns True if an object is a number or complex type, otherwise returns False.
Returns
-------
bool
"""
return isinstance(obj, numbers.Number) and not isinstance(obj, complex)
# TODO : this is unneeded as operator provides it, can call directly to operator methods
def isNumeric(obj):
# type: (Any) -> bool
"""
Returns True if an object is a number type, otherwise returns False.
Returns
-------
bool
"""
return isinstance(obj, numbers.Number)
def isSequence(obj):
# type: (Any) -> bool
"""
same as `operator.isSequenceType`
Returns
-------
bool
"""
return isinstance(obj, Sequence)
def isMapping(obj):
# type: (Any) -> bool
"""
Returns True if an object is a mapping (dictionary) type, otherwise returns False.
same as `operator.isMappingType`
Returns
-------
bool
"""
return isinstance(obj, Mapping)
clsname = lambda x: type(x).__name__
def convertListArgs(args):
if len(args) == 1 and isIterable(args[0]):
return tuple(args[0])
return args
def expandArgs(*args, **kwargs):
"""
'Flattens' the arguments list: recursively replaces any iterable argument in *args by a tuple of its
elements that will be inserted at its place in the returned arguments.
By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.
:Keywords:
depth : int
will specify the nested depth limit after which iterables are returned as they are
type
for type='list' will only expand lists, by default type='all' expands any iterable sequence
postorder : bool
will return elements depth first, from leaves to roots
breadth : bool
will return elements breadth first, roots, then first depth level, etc.
For a nested list represent trees::
a____b____c
| |____d
e____f
|____g
preorder(default) :
>>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 )
('a', 'b', ['c', 'd'], 'e', 'f', 'g')
>>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] )
('a', 'b', 'c', 'd', 'e', 'f', 'g')
postorder :
>>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1)
('b', ['c', 'd'], 'a', 'f', 'g', 'e')
>>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True)
('c', 'd', 'b', 'a', 'f', 'g', 'e')
breadth :
>>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True)
('a', 'e', 'b', ['c', 'd'], 'f', 'g')
>>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True)
('a', 'e', 'b', 'f', 'g', 'c', 'd')
Note that with default depth (unlimited) and order (preorder), if passed a pymel Tree
result will be the equivalent of doing a preorder traversal : [k for k in iter(theTree)] """
tpe = kwargs.get('type', 'all')
limit = kwargs.get('limit', sys.getrecursionlimit())
postorder = kwargs.get('postorder', False)
breadth = kwargs.get('breadth', False)
if tpe == 'list' or tpe == list:
def _expandArgsTest(arg):
return type(arg) == list
elif tpe == 'all':
def _expandArgsTest(arg):
return isIterable(arg)
else:
raise ValueError("unknown expand type=%s" % tpe)
if postorder:
return postorderArgs(limit, _expandArgsTest, *args)
elif breadth:
return breadthArgs(limit, _expandArgsTest, *args)
else:
return preorderArgs(limit, _expandArgsTest, *args)
def preorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):
""" returns a list of a preorder expansion of args """
stack = [(x, 0) for x in args]
result = _deque()
while stack:
arg, level = stack.pop()
if testFn(arg) and level < limit:
stack += [(x, level + 1) for x in arg]
else:
result.appendleft(arg)
return tuple(result)
def postorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):
""" returns a list of a postorder expansion of args """
if len(args) == 1:
return (args[0],)
else:
deq = _deque((x, 0) for x in args)
stack = []
result = []
while deq:
arg, level = deq.popleft()
if testFn(arg) and level < limit:
deq = _deque([(x, level + 1) for x in arg] + list(deq))
else:
if stack:
while stack and level <= stack[-1][1]:
result.append(stack.pop()[0])
stack.append((arg, level))
else:
stack.append((arg, level))
while stack:
result.append(stack.pop()[0])
return tuple(result)
def breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):
""" returns a list of a breadth first expansion of args """
deq = _deque((x, 0) for x in args)
result = []
while deq:
arg, level = deq.popleft()
if testFn(arg) and level < limit:
for a in arg:
deq.append((a, level + 1))
else:
result.append(arg)
return tuple(result)
# Same behavior as expandListArg but implemented as an Python iterator, the recursieve approach
# will be more memory efficient, but slower
def iterateArgs(*args, **kwargs):
""" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its
elements that will be inserted at its place in the returned arguments.
By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.
:Keywords:
depth : int
will specify the nested depth limit after which iterables are returned as they are
type
for type='list' will only expand lists, by default type='all' expands any iterable sequence
postorder : bool
will return elements depth first, from leaves to roots
breadth : bool
will return elements breadth first, roots, then first depth level, etc.
For a nested list represent trees::
a____b____c
| |____d
e____f
|____g
preorder(default) :
>>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))
('a', 'b', ['c', 'd'], 'e', 'f', 'g')
>>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))
('a', 'b', 'c', 'd', 'e', 'f', 'g')
postorder :
>>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))
('b', ['c', 'd'], 'a', 'f', 'g', 'e')
>>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))
('c', 'd', 'b', 'a', 'f', 'g', 'e')
breadth :
>>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))
('a', 'e', 'b', ['c', 'd'], 'f', 'g')
>>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))
('a', 'e', 'b', 'f', 'g', 'c', 'd')
Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree
result will be the equivalent of using a preorder iterator : iter(theTree) """
tpe = kwargs.get('type', 'all')
limit = kwargs.get('limit', sys.getrecursionlimit())
postorder = kwargs.get('postorder', False)
breadth = kwargs.get('breadth', False)
if tpe == 'list' or tpe == list:
def _iterateArgsTest(arg):
return type(arg) == list
elif tpe == 'all':
def _iterateArgsTest(arg):
return isIterable(arg)
else:
raise ValueError("unknown expand type=%s" % tpe)
if postorder:
for arg in postorderIterArgs(limit, _iterateArgsTest, *args):
yield arg
elif breadth:
for arg in breadthIterArgs(limit, _iterateArgsTest, *args):
yield arg
else:
for arg in preorderIterArgs(limit, _iterateArgsTest, *args):
yield arg
def preorderIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):
""" iterator doing a preorder expansion of args """
if limit:
for arg in args:
if testFn(arg):
for a in preorderIterArgs(limit - 1, testFn, *arg):
yield a
else:
yield arg
else:
for arg in args:
yield arg
def postorderIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):
""" iterator doing a postorder expansion of args """
if limit:
last = None
for arg in args:
if testFn(arg):
for a in postorderIterArgs(limit - 1, testFn, *arg):
yield a
else:
if last:
yield last
last = arg
if last:
yield last
else:
for arg in args:
yield arg
def breadthIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):
""" iterator doing a breadth first expansion of args """
deq = _deque((x, 0) for x in args)
while deq:
arg, level = deq.popleft()
if testFn(arg) and level < limit:
for a in arg:
deq.append((a, level + 1))
else:
yield arg
def preorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):
""" iterator doing a preorder expansion of args """
if limit:
for arg in iterable:
if testFn(arg):
for a in preorderIterArgs(limit - 1, testFn, *arg):
yield a
else:
yield arg
else:
for arg in iterable:
yield arg
def postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):
""" iterator doing a postorder expansion of args """
if limit:
last = None
for arg in iterable:
if testFn(arg):
for a in postorderIterArgs(limit - 1, testFn, *arg):
yield a
else:
if last:
yield last
last = arg
if last:
yield last
else:
for arg in iterable:
yield arg
def breadth(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):
""" iterator doing a breadth first expansion of args """
deq = _deque((x, 0) for x in iterable)
while deq:
arg, level = deq.popleft()
if testFn(arg) and level < limit:
for a in arg:
deq.append((a, level + 1))
else:
yield arg
def listForNone(res):
# type: (Optional[List[T]]) -> List[T]
"returns an empty list when the result is None"
if res is None:
return []
return res
# for discussion of implementation,
# see http://mail.python.org/pipermail/python-list/2008-January/474369.html for discussion...
def pairIter(sequence):
# type: (Iterable[Any]) -> Iterator[Tuple[Any, Any]]
'''
Returns an iterator over every 2 items of sequence.
ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]
If sequence has an odd number of items, the last item will not be returned
in a pair.
'''
theIter = iter(sequence)
return zip(theIter, theIter)
def reorder(x, indexList=[], indexDict={}):
"""
Reorder a list based upon a list of positional indices and/or a dictionary
of fromIndex:toIndex.
>>> l = ['zero', 'one', 'two', 'three', 'four', 'five', 'six']
>>> reorder( l, [1, 4] ) # based on positional indices: 0-->1, 1-->4
['one', 'four', 'zero', 'two', 'three', 'five', 'six']
>>> reorder( l, [1, None, 4] ) # None can be used as a place-holder
['one', 'zero', 'four', 'two', 'three', 'five', 'six']
>>> reorder( l, [1, 4], {5:6} ) # remapping via dictionary: move the value at index 5 to index 6
['one', 'four', 'zero', 'two', 'three', 'six', 'five']
"""
x = list(x)
num = len(x)
popCount = 0
indexValDict = {}
for i, index in enumerate(indexList):
if index is not None:
val = x.pop(index - popCount)
assert index not in indexDict, indexDict
indexValDict[i] = val
popCount += 1
for k, v in list(indexDict.items()):
indexValDict[v] = x.pop(k - popCount)
popCount += 1
newlist = []
for i in range(num):
try:
val = indexValDict[i]
except KeyError:
val = x.pop(0)
newlist.append(val)
return newlist
class RemovedKey(object):
def __init__(self, oldVal):
self.oldVal = oldVal
def __eq__(self, other):
return self.oldVal == other.oldVal
def __ne__(self, other):
return not self.oldVal == other.oldVal
def __hash__(self):
return hash(self.oldVal)
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.oldVal)
class AddedKey(object):
def __init__(self, newVal):
self.newVal = newVal
def __eq__(self, other):
return self.newVal == other.newVal
def __ne__(self, other):
return not self.newVal == other.newVal
def __hash__(self):
return hash(self.newVal)
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.newVal)
class ChangedKey(object):
def __init__(self, oldVal, newVal):
self.oldVal = oldVal
self.newVal = newVal
def __eq__(self, other):
return self.newVal == other.newVal and self.oldVal == other.oldVal
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.oldVal, self.newVal))
def __repr__(self):
return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)
# 2to3: when we transition to 3-only, get rid of encoding kwarg
def compareCascadingDicts(dict1, dict2, encoding=None, useAddedKeys=False,
useChangedKeys=False):
# type: (Union[dict, list, tuple], Union[dict, list, tuple], Union[str, bool, None], bool, bool) -> Tuple[set, set, set, dict]
'''compares two cascading dicts
Parameters
----------
dict1 : Union[dict, list, tuple]
the first object to compare
dict2 : Union[dict, list, tuple]
the second object to compare
encoding : Union[str, bool, None]
controls how comparisons are made when one value is a str, and one is a
unicode; if None, then comparisons are simply made with == (so ascii
characters will compare equally); if the value False, then unicode and
str are ALWAYS considered different - ie, u'foo' and 'foo' would not be
considered equal; otherwise, it should be the name of a unicode
encoding, which will be applied to the unicode string before comparing
useAddedKeys : bool
if True, then similarly to how 'RemovedKey' objects are used in the
returned diferences object (see the Returns section), 'AddedKey' objects
are used for keys which exist in dict2 but not in dict1; this allows
a user to distinguish, purely by inspecting the differences dict, which
keys are brand new, versus merely changed; mergeCascadingDicts will
treat AddedKey objects exactly the same as though they were their
contents - ie, useAddedKeys should make no difference to the behavior
of mergeCascadingDicts
useChangedKeys : bool
if True, then similarly to how 'RemovedKey' objects are used in the
returned diferences object (see the Returns section), 'ChangedKey'
objects are used for keys which exist in both dict1 and dict2, but with
different values
Returns
-------
both : `set`
keys that were present in both (non-recursively)
(both, only1, and only2 should be discrete partitions of all the keys
present in both dict1 and dict2)
only1 : `set`
keys that were present in only1 (non-recursively)
only2 : `set`
keys that were present in only2 (non-recursively)
differences : `dict`
recursive sparse dict containing information that was 'different' in
dict2 - either not present in dict1, or having a different value in
dict2, or removed in dict2 (in which case an instance of 'RemovedKey'
will be set as the value in differences)
Values that are different, and both dictionaries, will themselves have
sparse entries, showing only what is different
The return value should be such that if you do if you merge the
differences with d1, you will get d2.
'''
areSets = False
if isinstance(dict1, set) and isinstance(dict2, set):
areSets = True
v1 = dict1
v2 = dict2
else:
if isinstance(dict1, (list, tuple)):
dict1 = dict(enumerate(dict1))
if isinstance(dict2, (list, tuple)):
dict2 = dict(enumerate(dict2))
v1 = set(dict1)
v2 = set(dict2)
both = v1 & v2
only1 = v1 - both
only2 = v2 - both
if areSets:
if useAddedKeys:
differences = set(AddedKey(key) for key in only2)
else:
differences = set(only2)
differences.update(RemovedKey(key) for key in only1)
else:
recurseTypes = (dict, list, tuple, set)
if PY2:
strUnicode = set([str, unicode])
if useAddedKeys:
differences = dict((key, AddedKey(dict2[key])) for key in only2)
else:
differences = dict((key, dict2[key]) for key in only2)
differences.update((key, RemovedKey(dict1[key])) for key in only1)
for key in both:
val1 = dict1[key]
val2 = dict2[key]
areRecurseTypes = (isinstance(val1, recurseTypes)
and isinstance(val2, recurseTypes))
if areRecurseTypes:
# we have a type that we need to recurse into, and either they
# compare not equal, or encoding is False (in which case they
# may compare python-equal, but could have some str-unicode
# equalities, so we need to verify for ourselves):
if encoding is False or val1 != val2:
subDiffs = compareCascadingDicts(val1, val2,
encoding=encoding,
useAddedKeys=useAddedKeys,
useChangedKeys=useChangedKeys)[-1]
if subDiffs:
differences[key] = subDiffs
else:
# ok, we're not doing a recursive comparison...
if PY2 and set([type(val1), type(val2)]) == strUnicode:
# we have a string and a unicode - decide what to do based on
# encoding setting
if encoding is False:
equal = False
elif encoding is None:
equal = (val1 == val2)
else:
if type(val1) == str:
strVal = val2
unicodeVal = val1
else:
strVal = val1
unicodeVal = val2
try:
encoded = unicodeVal.encode(encoding)
except UnicodeEncodeError:
# if there's an encoding error, consider them
# different
equal = False
else:
equal = (encoded == strVal)
else:
equal = (val1 == val2)
if not equal:
if useChangedKeys:
differences[key] = ChangedKey(val1, val2)
else:
differences[key] = val2
return both, only1, only2, differences
def mergeCascadingDicts(from_dict, to_dict, allowDictToListMerging=False,
allowNewListMembers=False):
"""
recursively update to_dict with values from from_dict.
if any entries in 'from_dict' are instances of the class RemovedKey,
then the key containing that value will be removed from to_dict
if allowDictToListMerging is True, then if to_dict contains a list,
from_dict can contain a dictionary with int keys which can be used to
sparsely update the list.
if allowNewListMembers is True, and allowDictToListMerging is also True,
then if merging an index into a list that currently isn't long enough to
contain that index, then the list will be extended to be long enough (with
None inserted in any intermediate indices)
Note: if using RemovedKey objects and allowDictToList merging, then only
indices greater than all of any indices updated / added should be removed,
because the order in which items are updated / removed is indeterminate.
"""
listMerge = allowDictToListMerging and isinstance(to_dict, list)
if listMerge:
contains = lambda key: isinstance(key, int) and 0 <= key < len(to_dict)
else:
contains = lambda key: key in to_dict
for key, from_val in from_dict.items():
# print key, from_val
if contains(key):
if isinstance(from_val, RemovedKey):
del to_dict[key]
continue
elif isinstance(from_val, (AddedKey, ChangedKey)):
from_val = from_val.newVal
to_val = to_dict[key]
# if isMapping(from_val) and ( isMapping(to_val) or (allowDictToListMerging and isinstance(to_val, list )) ):
if hasattr(from_val, 'items') and (hasattr(to_val, 'items')
or (allowDictToListMerging and isinstance(to_val, list))):
mergeCascadingDicts(from_val, to_val, allowDictToListMerging)
else:
to_dict[key] = from_val
else:
if isinstance(from_val, RemovedKey):
continue
if listMerge and allowNewListMembers and key >= len(to_dict):
to_dict.extend((None,) * (key + 1 - len(to_dict)))
to_dict[key] = from_val
def setCascadingDictItem(dict, keys, value):
currentDict = dict
for key in keys[:-1]:
if key not in currentDict:
currentDict[key] = {}
currentDict = currentDict[key]
currentDict[keys[-1]] = value
def getCascadingDictItem(dict, keys, default={}):
currentDict = dict
for key in keys[:-1]:
if isMapping(currentDict) and key not in currentDict:
currentDict[key] = {}
currentDict = currentDict[key]
try:
return currentDict[keys[-1]]
except KeyError:
return default
def deepPatch(input, predicate, changer):
'''Recursively traverses the items stored in input (for basic data types:
lists, tuples, sets, and dicts), calling changer on all items for which
predicate returns true, and then replacing the original item with the
changed item.
Changes will be made in place when possible. The patched input (which may
be a new object, or the original object, if altered in place) is returned.
'''
return deepPatchAltered(input, predicate, changer)[0]
def deepPatchAltered(input, predicate, changer):
'''Like deepPatch, but returns a pair, (alteredInput, wasAltered)'''
# first, recurse
anyAltered = False
if isinstance(input, dict):
alteredKeys = {}
for key, val in list(input.items()):
newVal, altered = deepPatchAltered(val, predicate, changer)
if altered:
anyAltered = True
input[key] = newVal
# we need to delay altering the keys, so we don't change size
# of the dict while we're in the middle of traversing; also, if
# changer makes it into a key that already exists - but that already
# existing key is altered too - this is handled correctly. ie, if
# changer adds 2, and we have {2: 'foo', 4: 'bar'}, we need to make
# sure we end up with {4: 'foo', 6: 'bar'}, and not just {4: 'foo'}
newKey, altered = deepPatchAltered(key, predicate, changer)
if altered:
anyAltered = True
alteredKeys[newKey] = input.pop(key)
# ok, now go back and change the keys
input.update(alteredKeys)
elif isinstance(input, list):
for i, item in enumerate(input):
newItem, altered = deepPatchAltered(item, predicate, changer)
if altered:
anyAltered = True
input[i] = newItem
elif isinstance(input, tuple):
asList = list(input)
newList, altered = deepPatchAltered(asList, predicate, changer)
if altered:
anyAltered = True
input = tuple(newList)
elif isinstance(input, set):
toRemove = set()
toAdd = set()
for item in input:
newItem, altered = deepPatchAltered(item, predicate, changer)
if altered:
anyAltered = True
toRemove.add(item)
toAdd.add(newItem)
input.difference_update(toRemove)
input.update(toAdd)
# now check if predicate applies to entire input
if predicate(input):
anyAltered = True
input = changer(input)
return input, anyAltered
def sequenceToSlices(intList, sort=True):
"""convert a sequence of integers into a tuple of slice objects"""
slices = []
if intList:
if sort:
intList = sorted(intList)
start = intList[0]
stop = None
step = None
lastStep = None
lastVal = start
for curr in intList[1:]:
curr = int(curr)
thisStep = curr - lastVal
#assert thisStep > 0, "cannot have duplicate values. pass a set to be safe"
# print
# print "%s -> %s" % (lastVal, curr)
# print "thisStep", thisStep
# print "lastStep", lastStep
# print "step", step
# print "lastVal", lastVal
# print (start, stop, step)
# print slices
if lastStep is None:
# we're here bc the last iteration was the beginning of a new slice
pass
elif thisStep > 0 and thisStep == lastStep:
# we found 2 in a row, they are the beginning of a new slice
# setting step indicates we've found a pattern
# print "found a pattern on", thisStep
step = thisStep
else:
if step is not None:
# since step is set we know a pattern has been found (at least two in a row with same step)
# we also know that the current value is not part of this pattern, so end the old slice at the last value
if step == 1:
newslice = slice(start, lastVal + 1, None)
else:
newslice = slice(start, lastVal + 1, step)
thisStep = None
start = curr
else:
if lastStep == 1:
newslice = slice(start, lastVal + 1, lastStep)
thisStep = None
start = curr
else:
newslice = slice(start, stop + 1)
start = lastVal
# print "adding", newslice
slices.append(newslice)
# start the new
stop = None
step = None
lastStep = thisStep
stop = lastVal
lastVal = curr
if step is not None:
# end the old slice
if step == 1:
newslice = slice(start, lastVal + 1, None)
else:
newslice = slice(start, lastVal + 1, step)
# print "adding", newslice
slices.append(newslice)
else:
if lastStep == 1:
slices.append(slice(start, lastVal + 1, lastStep))
else:
slices.append(slice(start, start + 1))
if lastStep is not None:
slices.append(slice(lastVal, lastVal + 1))
return slices
def izip_longest(*args, **kwds):
# izip_longest('ABCD', 'xy', fillvalue='-') --> Ax By C- D-
fillvalue = kwds.get('fillvalue')
def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):
yield counter() # yields the fillvalue, or raises IndexError
fillers = itertools.repeat(fillvalue)
iters = [itertools.chain(it, sentinel(), fillers) for it in args]
try:
for tup in zip(*iters):
yield tup
except IndexError:
pass
def getImportableObject(importableName):
import importlib
if '.' in importableName:
modulename, objName = importableName.rsplit('.', 1)
else:
# if no module, it's in builtins
modulename = 'builtins'
if PY2:
modulename = '__builtin__'
objName = importableName
moduleobj = importlib.import_module(modulename)
return getattr(moduleobj, objName)
def getImportableName(obj):
import inspect
module = inspect.getmodule(obj)
import builtins
if PY2:
import __builtin__ as builtins
if module == builtins:
return obj.__name__
return '{}.{}'.format(module.__name__, obj.__name__)
|
[
"\"\"\"\nDefines arguments manipulation utilities, like checking if an argument is iterable, flattening a nested arguments list, etc.\nThese utility functions can be used by other util modules and are imported in util's main namespace for use by other pymel modules\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nfrom future import standard_library\nimport numbers\n\n# 2to3: remove switch when python-3 only\ntry:\n from collections.abc import Mapping, Sequence\nexcept ImportError:\n from collections import Mapping, Sequence\nstandard_library.install_aliases()\nfrom builtins import range\nfrom past.builtins import basestring\nfrom builtins import object\nfrom future.utils import PY2\nfrom collections import deque as _deque\nimport sys\nimport operator\nimport itertools\n\nfrom .utilitytypes import ProxyUnicode\n\nif False:\n from typing import *\n T = TypeVar('T')\n\n# some functions used to need to make the difference between strings and non-string iterables when PyNode where unicode derived\n# doing a hasattr(obj, '__iter__') test will fail for objects that implement __getitem__, but not __iter__, so try iter(obj)\n\n\ndef isIterable(obj):\n # type: (Any) -> bool\n \"\"\"\n Returns True if an object is iterable and not a string or ProxyUnicode type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n if isinstance(obj, basestring):\n return False\n elif isinstance(obj, ProxyUnicode):\n return False\n try:\n iter(obj)\n except TypeError:\n return False\n else:\n return True\n\n# consider only ints and floats numeric\n\n\ndef isScalar(obj):\n # type: (Any) -> bool\n \"\"\"\n Returns True if an object is a number or complex type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number) and not isinstance(obj, complex)\n\n# TODO : this is unneeded as operator provides it, can call directly to operator methods\n\n\ndef isNumeric(obj):\n # type: (Any) -> bool\n \"\"\"\n Returns True if an object is a number type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number)\n\n\ndef isSequence(obj):\n # type: (Any) -> bool\n \"\"\"\n same as `operator.isSequenceType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Sequence)\n\n\ndef isMapping(obj):\n # type: (Any) -> bool\n \"\"\"\n Returns True if an object is a mapping (dictionary) type, otherwise returns False.\n\n same as `operator.isMappingType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Mapping)\n\nclsname = lambda x: type(x).__name__\n\n\ndef convertListArgs(args):\n if len(args) == 1 and isIterable(args[0]):\n return tuple(args[0])\n return args\n\n\ndef expandArgs(*args, **kwargs):\n \"\"\"\n 'Flattens' the arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 )\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] )\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1)\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True)\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True)\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True)\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n\n Note that with default depth (unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of doing a preorder traversal : [k for k in iter(theTree)] \"\"\"\n\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n def _expandArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n def _expandArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError(\"unknown expand type=%s\" % tpe)\n\n if postorder:\n return postorderArgs(limit, _expandArgsTest, *args)\n elif breadth:\n return breadthArgs(limit, _expandArgsTest, *args)\n else:\n return preorderArgs(limit, _expandArgsTest, *args)\n\n\ndef preorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a preorder expansion of args \"\"\"\n stack = [(x, 0) for x in args]\n result = _deque()\n while stack:\n arg, level = stack.pop()\n if testFn(arg) and level < limit:\n stack += [(x, level + 1) for x in arg]\n else:\n result.appendleft(arg)\n\n return tuple(result)\n\n\ndef postorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a postorder expansion of args \"\"\"\n if len(args) == 1:\n return (args[0],)\n else:\n deq = _deque((x, 0) for x in args)\n stack = []\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n deq = _deque([(x, level + 1) for x in arg] + list(deq))\n else:\n if stack:\n while stack and level <= stack[-1][1]:\n result.append(stack.pop()[0])\n stack.append((arg, level))\n else:\n stack.append((arg, level))\n while stack:\n result.append(stack.pop()[0])\n\n return tuple(result)\n\n\ndef breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n result.append(arg)\n\n return tuple(result)\n\n# Same behavior as expandListArg but implemented as an Python iterator, the recursieve approach\n# will be more memory efficient, but slower\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError(\"unknown expand type=%s\" % tpe)\n\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\ndef preorderIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a preorder expansion of args \"\"\"\n if limit:\n for arg in args:\n if testFn(arg):\n for a in preorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n yield arg\n else:\n for arg in args:\n yield arg\n\n\ndef postorderIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in args:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in args:\n yield arg\n\n\ndef breadthIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg\n\n\ndef preorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a preorder expansion of args \"\"\"\n if limit:\n for arg in iterable:\n if testFn(arg):\n for a in preorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n yield arg\n else:\n for arg in iterable:\n yield arg\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\ndef breadth(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in iterable)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg\n\n\ndef listForNone(res):\n # type: (Optional[List[T]]) -> List[T]\n \"returns an empty list when the result is None\"\n if res is None:\n return []\n return res\n\n# for discussion of implementation,\n# see http://mail.python.org/pipermail/python-list/2008-January/474369.html for discussion...\n\n\ndef pairIter(sequence):\n # type: (Iterable[Any]) -> Iterator[Tuple[Any, Any]]\n '''\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n '''\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\ndef reorder(x, indexList=[], indexDict={}):\n \"\"\"\n Reorder a list based upon a list of positional indices and/or a dictionary\n of fromIndex:toIndex.\n\n >>> l = ['zero', 'one', 'two', 'three', 'four', 'five', 'six']\n >>> reorder( l, [1, 4] ) # based on positional indices: 0-->1, 1-->4\n ['one', 'four', 'zero', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, None, 4] ) # None can be used as a place-holder\n ['one', 'zero', 'four', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, 4], {5:6} ) # remapping via dictionary: move the value at index 5 to index 6\n ['one', 'four', 'zero', 'two', 'three', 'six', 'five']\n \"\"\"\n\n x = list(x)\n num = len(x)\n popCount = 0\n indexValDict = {}\n\n for i, index in enumerate(indexList):\n if index is not None:\n val = x.pop(index - popCount)\n assert index not in indexDict, indexDict\n indexValDict[i] = val\n popCount += 1\n for k, v in list(indexDict.items()):\n indexValDict[v] = x.pop(k - popCount)\n popCount += 1\n\n newlist = []\n for i in range(num):\n try:\n val = indexValDict[i]\n except KeyError:\n val = x.pop(0)\n newlist.append(val)\n return newlist\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n# 2to3: when we transition to 3-only, get rid of encoding kwarg\ndef compareCascadingDicts(dict1, dict2, encoding=None, useAddedKeys=False,\n useChangedKeys=False):\n # type: (Union[dict, list, tuple], Union[dict, list, tuple], Union[str, bool, None], bool, bool) -> Tuple[set, set, set, dict]\n '''compares two cascading dicts\n\n Parameters\n ----------\n dict1 : Union[dict, list, tuple]\n the first object to compare\n dict2 : Union[dict, list, tuple]\n the second object to compare\n encoding : Union[str, bool, None]\n controls how comparisons are made when one value is a str, and one is a\n unicode; if None, then comparisons are simply made with == (so ascii\n characters will compare equally); if the value False, then unicode and\n str are ALWAYS considered different - ie, u'foo' and 'foo' would not be\n considered equal; otherwise, it should be the name of a unicode\n encoding, which will be applied to the unicode string before comparing\n useAddedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'AddedKey' objects\n are used for keys which exist in dict2 but not in dict1; this allows\n a user to distinguish, purely by inspecting the differences dict, which\n keys are brand new, versus merely changed; mergeCascadingDicts will\n treat AddedKey objects exactly the same as though they were their\n contents - ie, useAddedKeys should make no difference to the behavior\n of mergeCascadingDicts\n useChangedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'ChangedKey'\n objects are used for keys which exist in both dict1 and dict2, but with\n different values\n\n Returns\n -------\n both : `set`\n keys that were present in both (non-recursively)\n (both, only1, and only2 should be discrete partitions of all the keys\n present in both dict1 and dict2)\n only1 : `set`\n keys that were present in only1 (non-recursively)\n only2 : `set`\n keys that were present in only2 (non-recursively)\n differences : `dict`\n recursive sparse dict containing information that was 'different' in\n dict2 - either not present in dict1, or having a different value in\n dict2, or removed in dict2 (in which case an instance of 'RemovedKey'\n will be set as the value in differences)\n Values that are different, and both dictionaries, will themselves have\n sparse entries, showing only what is different\n The return value should be such that if you do if you merge the\n differences with d1, you will get d2.\n '''\n areSets = False\n if isinstance(dict1, set) and isinstance(dict2, set):\n areSets = True\n v1 = dict1\n v2 = dict2\n else:\n if isinstance(dict1, (list, tuple)):\n dict1 = dict(enumerate(dict1))\n if isinstance(dict2, (list, tuple)):\n dict2 = dict(enumerate(dict2))\n v1 = set(dict1)\n v2 = set(dict2)\n both = v1 & v2\n only1 = v1 - both\n only2 = v2 - both\n\n if areSets:\n if useAddedKeys:\n differences = set(AddedKey(key) for key in only2)\n else:\n differences = set(only2)\n differences.update(RemovedKey(key) for key in only1)\n else:\n recurseTypes = (dict, list, tuple, set)\n if PY2:\n strUnicode = set([str, unicode])\n if useAddedKeys:\n differences = dict((key, AddedKey(dict2[key])) for key in only2)\n else:\n differences = dict((key, dict2[key]) for key in only2)\n differences.update((key, RemovedKey(dict1[key])) for key in only1)\n\n for key in both:\n val1 = dict1[key]\n val2 = dict2[key]\n\n areRecurseTypes = (isinstance(val1, recurseTypes)\n and isinstance(val2, recurseTypes))\n if areRecurseTypes:\n # we have a type that we need to recurse into, and either they\n # compare not equal, or encoding is False (in which case they\n # may compare python-equal, but could have some str-unicode\n # equalities, so we need to verify for ourselves):\n if encoding is False or val1 != val2:\n subDiffs = compareCascadingDicts(val1, val2,\n encoding=encoding,\n useAddedKeys=useAddedKeys,\n useChangedKeys=useChangedKeys)[-1]\n if subDiffs:\n differences[key] = subDiffs\n else:\n # ok, we're not doing a recursive comparison...\n if PY2 and set([type(val1), type(val2)]) == strUnicode:\n # we have a string and a unicode - decide what to do based on\n # encoding setting\n if encoding is False:\n equal = False\n elif encoding is None:\n equal = (val1 == val2)\n else:\n if type(val1) == str:\n strVal = val2\n unicodeVal = val1\n else:\n strVal = val1\n unicodeVal = val2\n try:\n encoded = unicodeVal.encode(encoding)\n except UnicodeEncodeError:\n # if there's an encoding error, consider them\n # different\n equal = False\n else:\n equal = (encoded == strVal)\n else:\n equal = (val1 == val2)\n\n if not equal:\n if useChangedKeys:\n differences[key] = ChangedKey(val1, val2)\n else:\n differences[key] = val2\n\n return both, only1, only2, differences\n\n\ndef mergeCascadingDicts(from_dict, to_dict, allowDictToListMerging=False,\n allowNewListMembers=False):\n \"\"\"\n recursively update to_dict with values from from_dict.\n\n if any entries in 'from_dict' are instances of the class RemovedKey,\n then the key containing that value will be removed from to_dict\n\n if allowDictToListMerging is True, then if to_dict contains a list,\n from_dict can contain a dictionary with int keys which can be used to\n sparsely update the list.\n\n if allowNewListMembers is True, and allowDictToListMerging is also True,\n then if merging an index into a list that currently isn't long enough to\n contain that index, then the list will be extended to be long enough (with\n None inserted in any intermediate indices)\n\n Note: if using RemovedKey objects and allowDictToList merging, then only\n indices greater than all of any indices updated / added should be removed,\n because the order in which items are updated / removed is indeterminate.\n \"\"\"\n listMerge = allowDictToListMerging and isinstance(to_dict, list)\n if listMerge:\n contains = lambda key: isinstance(key, int) and 0 <= key < len(to_dict)\n else:\n contains = lambda key: key in to_dict\n\n for key, from_val in from_dict.items():\n # print key, from_val\n if contains(key):\n if isinstance(from_val, RemovedKey):\n del to_dict[key]\n continue\n elif isinstance(from_val, (AddedKey, ChangedKey)):\n from_val = from_val.newVal\n to_val = to_dict[key]\n # if isMapping(from_val) and ( isMapping(to_val) or (allowDictToListMerging and isinstance(to_val, list )) ):\n if hasattr(from_val, 'items') and (hasattr(to_val, 'items')\n or (allowDictToListMerging and isinstance(to_val, list))):\n mergeCascadingDicts(from_val, to_val, allowDictToListMerging)\n else:\n to_dict[key] = from_val\n else:\n if isinstance(from_val, RemovedKey):\n continue\n if listMerge and allowNewListMembers and key >= len(to_dict):\n to_dict.extend((None,) * (key + 1 - len(to_dict)))\n to_dict[key] = from_val\n\n\ndef setCascadingDictItem(dict, keys, value):\n\n currentDict = dict\n for key in keys[:-1]:\n if key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n currentDict[keys[-1]] = value\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\ndef deepPatch(input, predicate, changer):\n '''Recursively traverses the items stored in input (for basic data types:\n lists, tuples, sets, and dicts), calling changer on all items for which\n predicate returns true, and then replacing the original item with the\n changed item.\n\n Changes will be made in place when possible. The patched input (which may\n be a new object, or the original object, if altered in place) is returned.\n '''\n return deepPatchAltered(input, predicate, changer)[0]\n\n\ndef deepPatchAltered(input, predicate, changer):\n '''Like deepPatch, but returns a pair, (alteredInput, wasAltered)'''\n\n # first, recurse\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n # we need to delay altering the keys, so we don't change size\n # of the dict while we're in the middle of traversing; also, if\n # changer makes it into a key that already exists - but that already\n # existing key is altered too - this is handled correctly. ie, if\n # changer adds 2, and we have {2: 'foo', 4: 'bar'}, we need to make\n # sure we end up with {4: 'foo', 6: 'bar'}, and not just {4: 'foo'}\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n # ok, now go back and change the keys\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n\n # now check if predicate applies to entire input\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\ndef sequenceToSlices(intList, sort=True):\n \"\"\"convert a sequence of integers into a tuple of slice objects\"\"\"\n slices = []\n\n if intList:\n if sort:\n intList = sorted(intList)\n start = intList[0]\n stop = None\n step = None\n lastStep = None\n lastVal = start\n for curr in intList[1:]:\n curr = int(curr)\n thisStep = curr - lastVal\n #assert thisStep > 0, \"cannot have duplicate values. pass a set to be safe\"\n\n# print\n# print \"%s -> %s\" % (lastVal, curr)\n# print \"thisStep\", thisStep\n# print \"lastStep\", lastStep\n# print \"step\", step\n# print \"lastVal\", lastVal\n# print (start, stop, step)\n# print slices\n\n if lastStep is None:\n # we're here bc the last iteration was the beginning of a new slice\n pass\n elif thisStep > 0 and thisStep == lastStep:\n # we found 2 in a row, they are the beginning of a new slice\n # setting step indicates we've found a pattern\n # print \"found a pattern on\", thisStep\n step = thisStep\n else:\n if step is not None:\n # since step is set we know a pattern has been found (at least two in a row with same step)\n # we also know that the current value is not part of this pattern, so end the old slice at the last value\n if step == 1:\n newslice = slice(start, lastVal + 1, None)\n else:\n newslice = slice(start, lastVal + 1, step)\n thisStep = None\n start = curr\n else:\n if lastStep == 1:\n newslice = slice(start, lastVal + 1, lastStep)\n thisStep = None\n start = curr\n else:\n newslice = slice(start, stop + 1)\n start = lastVal\n\n# print \"adding\", newslice\n slices.append(newslice)\n # start the new\n\n stop = None\n step = None\n\n lastStep = thisStep\n\n stop = lastVal\n lastVal = curr\n\n if step is not None:\n # end the old slice\n if step == 1:\n newslice = slice(start, lastVal + 1, None)\n else:\n newslice = slice(start, lastVal + 1, step)\n\n # print \"adding\", newslice\n slices.append(newslice)\n else:\n\n if lastStep == 1:\n slices.append(slice(start, lastVal + 1, lastStep))\n\n else:\n slices.append(slice(start, start + 1))\n if lastStep is not None:\n slices.append(slice(lastVal, lastVal + 1))\n\n return slices\n\n\ndef izip_longest(*args, **kwds):\n # izip_longest('ABCD', 'xy', fillvalue='-') --> Ax By C- D-\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter() # yields the fillvalue, or raises IndexError\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\ndef getImportableObject(importableName):\n import importlib\n if '.' in importableName:\n modulename, objName = importableName.rsplit('.', 1)\n else:\n # if no module, it's in builtins\n modulename = 'builtins'\n if PY2:\n modulename = '__builtin__'\n objName = importableName\n moduleobj = importlib.import_module(modulename)\n return getattr(moduleobj, objName)\n\n\ndef getImportableName(obj):\n import inspect\n module = inspect.getmodule(obj)\n import builtins\n if PY2:\n import __builtin__ as builtins\n if module == builtins:\n return obj.__name__\n return '{}.{}'.format(module.__name__, obj.__name__)\n",
"<docstring token>\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\nfrom future import standard_library\nimport numbers\ntry:\n from collections.abc import Mapping, Sequence\nexcept ImportError:\n from collections import Mapping, Sequence\nstandard_library.install_aliases()\nfrom builtins import range\nfrom past.builtins import basestring\nfrom builtins import object\nfrom future.utils import PY2\nfrom collections import deque as _deque\nimport sys\nimport operator\nimport itertools\nfrom .utilitytypes import ProxyUnicode\nif False:\n from typing import *\n T = TypeVar('T')\n\n\ndef isIterable(obj):\n \"\"\"\n Returns True if an object is iterable and not a string or ProxyUnicode type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n if isinstance(obj, basestring):\n return False\n elif isinstance(obj, ProxyUnicode):\n return False\n try:\n iter(obj)\n except TypeError:\n return False\n else:\n return True\n\n\ndef isScalar(obj):\n \"\"\"\n Returns True if an object is a number or complex type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number) and not isinstance(obj, complex)\n\n\ndef isNumeric(obj):\n \"\"\"\n Returns True if an object is a number type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number)\n\n\ndef isSequence(obj):\n \"\"\"\n same as `operator.isSequenceType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Sequence)\n\n\ndef isMapping(obj):\n \"\"\"\n Returns True if an object is a mapping (dictionary) type, otherwise returns False.\n\n same as `operator.isMappingType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Mapping)\n\n\nclsname = lambda x: type(x).__name__\n\n\ndef convertListArgs(args):\n if len(args) == 1 and isIterable(args[0]):\n return tuple(args[0])\n return args\n\n\ndef expandArgs(*args, **kwargs):\n \"\"\"\n 'Flattens' the arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 )\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] )\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1)\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True)\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True)\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True)\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n\n Note that with default depth (unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of doing a preorder traversal : [k for k in iter(theTree)] \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _expandArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _expandArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n return postorderArgs(limit, _expandArgsTest, *args)\n elif breadth:\n return breadthArgs(limit, _expandArgsTest, *args)\n else:\n return preorderArgs(limit, _expandArgsTest, *args)\n\n\ndef preorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a preorder expansion of args \"\"\"\n stack = [(x, 0) for x in args]\n result = _deque()\n while stack:\n arg, level = stack.pop()\n if testFn(arg) and level < limit:\n stack += [(x, level + 1) for x in arg]\n else:\n result.appendleft(arg)\n return tuple(result)\n\n\ndef postorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a postorder expansion of args \"\"\"\n if len(args) == 1:\n return args[0],\n else:\n deq = _deque((x, 0) for x in args)\n stack = []\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n deq = _deque([(x, level + 1) for x in arg] + list(deq))\n elif stack:\n while stack and level <= stack[-1][1]:\n result.append(stack.pop()[0])\n stack.append((arg, level))\n else:\n stack.append((arg, level))\n while stack:\n result.append(stack.pop()[0])\n return tuple(result)\n\n\ndef breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n result.append(arg)\n return tuple(result)\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\ndef preorderIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a preorder expansion of args \"\"\"\n if limit:\n for arg in args:\n if testFn(arg):\n for a in preorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n yield arg\n else:\n for arg in args:\n yield arg\n\n\ndef postorderIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in args:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in args:\n yield arg\n\n\ndef breadthIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg\n\n\ndef preorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a preorder expansion of args \"\"\"\n if limit:\n for arg in iterable:\n if testFn(arg):\n for a in preorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n yield arg\n else:\n for arg in iterable:\n yield arg\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\ndef breadth(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in iterable)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg\n\n\ndef listForNone(res):\n \"\"\"returns an empty list when the result is None\"\"\"\n if res is None:\n return []\n return res\n\n\ndef pairIter(sequence):\n \"\"\"\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n \"\"\"\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\ndef reorder(x, indexList=[], indexDict={}):\n \"\"\"\n Reorder a list based upon a list of positional indices and/or a dictionary\n of fromIndex:toIndex.\n\n >>> l = ['zero', 'one', 'two', 'three', 'four', 'five', 'six']\n >>> reorder( l, [1, 4] ) # based on positional indices: 0-->1, 1-->4\n ['one', 'four', 'zero', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, None, 4] ) # None can be used as a place-holder\n ['one', 'zero', 'four', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, 4], {5:6} ) # remapping via dictionary: move the value at index 5 to index 6\n ['one', 'four', 'zero', 'two', 'three', 'six', 'five']\n \"\"\"\n x = list(x)\n num = len(x)\n popCount = 0\n indexValDict = {}\n for i, index in enumerate(indexList):\n if index is not None:\n val = x.pop(index - popCount)\n assert index not in indexDict, indexDict\n indexValDict[i] = val\n popCount += 1\n for k, v in list(indexDict.items()):\n indexValDict[v] = x.pop(k - popCount)\n popCount += 1\n newlist = []\n for i in range(num):\n try:\n val = indexValDict[i]\n except KeyError:\n val = x.pop(0)\n newlist.append(val)\n return newlist\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\ndef compareCascadingDicts(dict1, dict2, encoding=None, useAddedKeys=False,\n useChangedKeys=False):\n \"\"\"compares two cascading dicts\n\n Parameters\n ----------\n dict1 : Union[dict, list, tuple]\n the first object to compare\n dict2 : Union[dict, list, tuple]\n the second object to compare\n encoding : Union[str, bool, None]\n controls how comparisons are made when one value is a str, and one is a\n unicode; if None, then comparisons are simply made with == (so ascii\n characters will compare equally); if the value False, then unicode and\n str are ALWAYS considered different - ie, u'foo' and 'foo' would not be\n considered equal; otherwise, it should be the name of a unicode\n encoding, which will be applied to the unicode string before comparing\n useAddedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'AddedKey' objects\n are used for keys which exist in dict2 but not in dict1; this allows\n a user to distinguish, purely by inspecting the differences dict, which\n keys are brand new, versus merely changed; mergeCascadingDicts will\n treat AddedKey objects exactly the same as though they were their\n contents - ie, useAddedKeys should make no difference to the behavior\n of mergeCascadingDicts\n useChangedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'ChangedKey'\n objects are used for keys which exist in both dict1 and dict2, but with\n different values\n\n Returns\n -------\n both : `set`\n keys that were present in both (non-recursively)\n (both, only1, and only2 should be discrete partitions of all the keys\n present in both dict1 and dict2)\n only1 : `set`\n keys that were present in only1 (non-recursively)\n only2 : `set`\n keys that were present in only2 (non-recursively)\n differences : `dict`\n recursive sparse dict containing information that was 'different' in\n dict2 - either not present in dict1, or having a different value in\n dict2, or removed in dict2 (in which case an instance of 'RemovedKey'\n will be set as the value in differences)\n Values that are different, and both dictionaries, will themselves have\n sparse entries, showing only what is different\n The return value should be such that if you do if you merge the\n differences with d1, you will get d2.\n \"\"\"\n areSets = False\n if isinstance(dict1, set) and isinstance(dict2, set):\n areSets = True\n v1 = dict1\n v2 = dict2\n else:\n if isinstance(dict1, (list, tuple)):\n dict1 = dict(enumerate(dict1))\n if isinstance(dict2, (list, tuple)):\n dict2 = dict(enumerate(dict2))\n v1 = set(dict1)\n v2 = set(dict2)\n both = v1 & v2\n only1 = v1 - both\n only2 = v2 - both\n if areSets:\n if useAddedKeys:\n differences = set(AddedKey(key) for key in only2)\n else:\n differences = set(only2)\n differences.update(RemovedKey(key) for key in only1)\n else:\n recurseTypes = dict, list, tuple, set\n if PY2:\n strUnicode = set([str, unicode])\n if useAddedKeys:\n differences = dict((key, AddedKey(dict2[key])) for key in only2)\n else:\n differences = dict((key, dict2[key]) for key in only2)\n differences.update((key, RemovedKey(dict1[key])) for key in only1)\n for key in both:\n val1 = dict1[key]\n val2 = dict2[key]\n areRecurseTypes = isinstance(val1, recurseTypes) and isinstance(\n val2, recurseTypes)\n if areRecurseTypes:\n if encoding is False or val1 != val2:\n subDiffs = compareCascadingDicts(val1, val2, encoding=\n encoding, useAddedKeys=useAddedKeys, useChangedKeys\n =useChangedKeys)[-1]\n if subDiffs:\n differences[key] = subDiffs\n else:\n if PY2 and set([type(val1), type(val2)]) == strUnicode:\n if encoding is False:\n equal = False\n elif encoding is None:\n equal = val1 == val2\n else:\n if type(val1) == str:\n strVal = val2\n unicodeVal = val1\n else:\n strVal = val1\n unicodeVal = val2\n try:\n encoded = unicodeVal.encode(encoding)\n except UnicodeEncodeError:\n equal = False\n else:\n equal = encoded == strVal\n else:\n equal = val1 == val2\n if not equal:\n if useChangedKeys:\n differences[key] = ChangedKey(val1, val2)\n else:\n differences[key] = val2\n return both, only1, only2, differences\n\n\ndef mergeCascadingDicts(from_dict, to_dict, allowDictToListMerging=False,\n allowNewListMembers=False):\n \"\"\"\n recursively update to_dict with values from from_dict.\n\n if any entries in 'from_dict' are instances of the class RemovedKey,\n then the key containing that value will be removed from to_dict\n\n if allowDictToListMerging is True, then if to_dict contains a list,\n from_dict can contain a dictionary with int keys which can be used to\n sparsely update the list.\n\n if allowNewListMembers is True, and allowDictToListMerging is also True,\n then if merging an index into a list that currently isn't long enough to\n contain that index, then the list will be extended to be long enough (with\n None inserted in any intermediate indices)\n\n Note: if using RemovedKey objects and allowDictToList merging, then only\n indices greater than all of any indices updated / added should be removed,\n because the order in which items are updated / removed is indeterminate.\n \"\"\"\n listMerge = allowDictToListMerging and isinstance(to_dict, list)\n if listMerge:\n contains = lambda key: isinstance(key, int) and 0 <= key < len(to_dict)\n else:\n contains = lambda key: key in to_dict\n for key, from_val in from_dict.items():\n if contains(key):\n if isinstance(from_val, RemovedKey):\n del to_dict[key]\n continue\n elif isinstance(from_val, (AddedKey, ChangedKey)):\n from_val = from_val.newVal\n to_val = to_dict[key]\n if hasattr(from_val, 'items') and (hasattr(to_val, 'items') or \n allowDictToListMerging and isinstance(to_val, list)):\n mergeCascadingDicts(from_val, to_val, allowDictToListMerging)\n else:\n to_dict[key] = from_val\n else:\n if isinstance(from_val, RemovedKey):\n continue\n if listMerge and allowNewListMembers and key >= len(to_dict):\n to_dict.extend((None,) * (key + 1 - len(to_dict)))\n to_dict[key] = from_val\n\n\ndef setCascadingDictItem(dict, keys, value):\n currentDict = dict\n for key in keys[:-1]:\n if key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n currentDict[keys[-1]] = value\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\ndef deepPatch(input, predicate, changer):\n \"\"\"Recursively traverses the items stored in input (for basic data types:\n lists, tuples, sets, and dicts), calling changer on all items for which\n predicate returns true, and then replacing the original item with the\n changed item.\n\n Changes will be made in place when possible. The patched input (which may\n be a new object, or the original object, if altered in place) is returned.\n \"\"\"\n return deepPatchAltered(input, predicate, changer)[0]\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\ndef sequenceToSlices(intList, sort=True):\n \"\"\"convert a sequence of integers into a tuple of slice objects\"\"\"\n slices = []\n if intList:\n if sort:\n intList = sorted(intList)\n start = intList[0]\n stop = None\n step = None\n lastStep = None\n lastVal = start\n for curr in intList[1:]:\n curr = int(curr)\n thisStep = curr - lastVal\n if lastStep is None:\n pass\n elif thisStep > 0 and thisStep == lastStep:\n step = thisStep\n else:\n if step is not None:\n if step == 1:\n newslice = slice(start, lastVal + 1, None)\n else:\n newslice = slice(start, lastVal + 1, step)\n thisStep = None\n start = curr\n elif lastStep == 1:\n newslice = slice(start, lastVal + 1, lastStep)\n thisStep = None\n start = curr\n else:\n newslice = slice(start, stop + 1)\n start = lastVal\n slices.append(newslice)\n stop = None\n step = None\n lastStep = thisStep\n stop = lastVal\n lastVal = curr\n if step is not None:\n if step == 1:\n newslice = slice(start, lastVal + 1, None)\n else:\n newslice = slice(start, lastVal + 1, step)\n slices.append(newslice)\n elif lastStep == 1:\n slices.append(slice(start, lastVal + 1, lastStep))\n else:\n slices.append(slice(start, start + 1))\n if lastStep is not None:\n slices.append(slice(lastVal, lastVal + 1))\n return slices\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\ndef getImportableObject(importableName):\n import importlib\n if '.' in importableName:\n modulename, objName = importableName.rsplit('.', 1)\n else:\n modulename = 'builtins'\n if PY2:\n modulename = '__builtin__'\n objName = importableName\n moduleobj = importlib.import_module(modulename)\n return getattr(moduleobj, objName)\n\n\ndef getImportableName(obj):\n import inspect\n module = inspect.getmodule(obj)\n import builtins\n if PY2:\n import __builtin__ as builtins\n if module == builtins:\n return obj.__name__\n return '{}.{}'.format(module.__name__, obj.__name__)\n",
"<docstring token>\n<import token>\ntry:\n from collections.abc import Mapping, Sequence\nexcept ImportError:\n from collections import Mapping, Sequence\nstandard_library.install_aliases()\n<import token>\nif False:\n from typing import *\n T = TypeVar('T')\n\n\ndef isIterable(obj):\n \"\"\"\n Returns True if an object is iterable and not a string or ProxyUnicode type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n if isinstance(obj, basestring):\n return False\n elif isinstance(obj, ProxyUnicode):\n return False\n try:\n iter(obj)\n except TypeError:\n return False\n else:\n return True\n\n\ndef isScalar(obj):\n \"\"\"\n Returns True if an object is a number or complex type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number) and not isinstance(obj, complex)\n\n\ndef isNumeric(obj):\n \"\"\"\n Returns True if an object is a number type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number)\n\n\ndef isSequence(obj):\n \"\"\"\n same as `operator.isSequenceType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Sequence)\n\n\ndef isMapping(obj):\n \"\"\"\n Returns True if an object is a mapping (dictionary) type, otherwise returns False.\n\n same as `operator.isMappingType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Mapping)\n\n\nclsname = lambda x: type(x).__name__\n\n\ndef convertListArgs(args):\n if len(args) == 1 and isIterable(args[0]):\n return tuple(args[0])\n return args\n\n\ndef expandArgs(*args, **kwargs):\n \"\"\"\n 'Flattens' the arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 )\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] )\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1)\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True)\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True)\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True)\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n\n Note that with default depth (unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of doing a preorder traversal : [k for k in iter(theTree)] \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _expandArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _expandArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n return postorderArgs(limit, _expandArgsTest, *args)\n elif breadth:\n return breadthArgs(limit, _expandArgsTest, *args)\n else:\n return preorderArgs(limit, _expandArgsTest, *args)\n\n\ndef preorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a preorder expansion of args \"\"\"\n stack = [(x, 0) for x in args]\n result = _deque()\n while stack:\n arg, level = stack.pop()\n if testFn(arg) and level < limit:\n stack += [(x, level + 1) for x in arg]\n else:\n result.appendleft(arg)\n return tuple(result)\n\n\ndef postorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a postorder expansion of args \"\"\"\n if len(args) == 1:\n return args[0],\n else:\n deq = _deque((x, 0) for x in args)\n stack = []\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n deq = _deque([(x, level + 1) for x in arg] + list(deq))\n elif stack:\n while stack and level <= stack[-1][1]:\n result.append(stack.pop()[0])\n stack.append((arg, level))\n else:\n stack.append((arg, level))\n while stack:\n result.append(stack.pop()[0])\n return tuple(result)\n\n\ndef breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n result.append(arg)\n return tuple(result)\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\ndef preorderIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a preorder expansion of args \"\"\"\n if limit:\n for arg in args:\n if testFn(arg):\n for a in preorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n yield arg\n else:\n for arg in args:\n yield arg\n\n\ndef postorderIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in args:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in args:\n yield arg\n\n\ndef breadthIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg\n\n\ndef preorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a preorder expansion of args \"\"\"\n if limit:\n for arg in iterable:\n if testFn(arg):\n for a in preorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n yield arg\n else:\n for arg in iterable:\n yield arg\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\ndef breadth(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in iterable)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg\n\n\ndef listForNone(res):\n \"\"\"returns an empty list when the result is None\"\"\"\n if res is None:\n return []\n return res\n\n\ndef pairIter(sequence):\n \"\"\"\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n \"\"\"\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\ndef reorder(x, indexList=[], indexDict={}):\n \"\"\"\n Reorder a list based upon a list of positional indices and/or a dictionary\n of fromIndex:toIndex.\n\n >>> l = ['zero', 'one', 'two', 'three', 'four', 'five', 'six']\n >>> reorder( l, [1, 4] ) # based on positional indices: 0-->1, 1-->4\n ['one', 'four', 'zero', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, None, 4] ) # None can be used as a place-holder\n ['one', 'zero', 'four', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, 4], {5:6} ) # remapping via dictionary: move the value at index 5 to index 6\n ['one', 'four', 'zero', 'two', 'three', 'six', 'five']\n \"\"\"\n x = list(x)\n num = len(x)\n popCount = 0\n indexValDict = {}\n for i, index in enumerate(indexList):\n if index is not None:\n val = x.pop(index - popCount)\n assert index not in indexDict, indexDict\n indexValDict[i] = val\n popCount += 1\n for k, v in list(indexDict.items()):\n indexValDict[v] = x.pop(k - popCount)\n popCount += 1\n newlist = []\n for i in range(num):\n try:\n val = indexValDict[i]\n except KeyError:\n val = x.pop(0)\n newlist.append(val)\n return newlist\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\ndef compareCascadingDicts(dict1, dict2, encoding=None, useAddedKeys=False,\n useChangedKeys=False):\n \"\"\"compares two cascading dicts\n\n Parameters\n ----------\n dict1 : Union[dict, list, tuple]\n the first object to compare\n dict2 : Union[dict, list, tuple]\n the second object to compare\n encoding : Union[str, bool, None]\n controls how comparisons are made when one value is a str, and one is a\n unicode; if None, then comparisons are simply made with == (so ascii\n characters will compare equally); if the value False, then unicode and\n str are ALWAYS considered different - ie, u'foo' and 'foo' would not be\n considered equal; otherwise, it should be the name of a unicode\n encoding, which will be applied to the unicode string before comparing\n useAddedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'AddedKey' objects\n are used for keys which exist in dict2 but not in dict1; this allows\n a user to distinguish, purely by inspecting the differences dict, which\n keys are brand new, versus merely changed; mergeCascadingDicts will\n treat AddedKey objects exactly the same as though they were their\n contents - ie, useAddedKeys should make no difference to the behavior\n of mergeCascadingDicts\n useChangedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'ChangedKey'\n objects are used for keys which exist in both dict1 and dict2, but with\n different values\n\n Returns\n -------\n both : `set`\n keys that were present in both (non-recursively)\n (both, only1, and only2 should be discrete partitions of all the keys\n present in both dict1 and dict2)\n only1 : `set`\n keys that were present in only1 (non-recursively)\n only2 : `set`\n keys that were present in only2 (non-recursively)\n differences : `dict`\n recursive sparse dict containing information that was 'different' in\n dict2 - either not present in dict1, or having a different value in\n dict2, or removed in dict2 (in which case an instance of 'RemovedKey'\n will be set as the value in differences)\n Values that are different, and both dictionaries, will themselves have\n sparse entries, showing only what is different\n The return value should be such that if you do if you merge the\n differences with d1, you will get d2.\n \"\"\"\n areSets = False\n if isinstance(dict1, set) and isinstance(dict2, set):\n areSets = True\n v1 = dict1\n v2 = dict2\n else:\n if isinstance(dict1, (list, tuple)):\n dict1 = dict(enumerate(dict1))\n if isinstance(dict2, (list, tuple)):\n dict2 = dict(enumerate(dict2))\n v1 = set(dict1)\n v2 = set(dict2)\n both = v1 & v2\n only1 = v1 - both\n only2 = v2 - both\n if areSets:\n if useAddedKeys:\n differences = set(AddedKey(key) for key in only2)\n else:\n differences = set(only2)\n differences.update(RemovedKey(key) for key in only1)\n else:\n recurseTypes = dict, list, tuple, set\n if PY2:\n strUnicode = set([str, unicode])\n if useAddedKeys:\n differences = dict((key, AddedKey(dict2[key])) for key in only2)\n else:\n differences = dict((key, dict2[key]) for key in only2)\n differences.update((key, RemovedKey(dict1[key])) for key in only1)\n for key in both:\n val1 = dict1[key]\n val2 = dict2[key]\n areRecurseTypes = isinstance(val1, recurseTypes) and isinstance(\n val2, recurseTypes)\n if areRecurseTypes:\n if encoding is False or val1 != val2:\n subDiffs = compareCascadingDicts(val1, val2, encoding=\n encoding, useAddedKeys=useAddedKeys, useChangedKeys\n =useChangedKeys)[-1]\n if subDiffs:\n differences[key] = subDiffs\n else:\n if PY2 and set([type(val1), type(val2)]) == strUnicode:\n if encoding is False:\n equal = False\n elif encoding is None:\n equal = val1 == val2\n else:\n if type(val1) == str:\n strVal = val2\n unicodeVal = val1\n else:\n strVal = val1\n unicodeVal = val2\n try:\n encoded = unicodeVal.encode(encoding)\n except UnicodeEncodeError:\n equal = False\n else:\n equal = encoded == strVal\n else:\n equal = val1 == val2\n if not equal:\n if useChangedKeys:\n differences[key] = ChangedKey(val1, val2)\n else:\n differences[key] = val2\n return both, only1, only2, differences\n\n\ndef mergeCascadingDicts(from_dict, to_dict, allowDictToListMerging=False,\n allowNewListMembers=False):\n \"\"\"\n recursively update to_dict with values from from_dict.\n\n if any entries in 'from_dict' are instances of the class RemovedKey,\n then the key containing that value will be removed from to_dict\n\n if allowDictToListMerging is True, then if to_dict contains a list,\n from_dict can contain a dictionary with int keys which can be used to\n sparsely update the list.\n\n if allowNewListMembers is True, and allowDictToListMerging is also True,\n then if merging an index into a list that currently isn't long enough to\n contain that index, then the list will be extended to be long enough (with\n None inserted in any intermediate indices)\n\n Note: if using RemovedKey objects and allowDictToList merging, then only\n indices greater than all of any indices updated / added should be removed,\n because the order in which items are updated / removed is indeterminate.\n \"\"\"\n listMerge = allowDictToListMerging and isinstance(to_dict, list)\n if listMerge:\n contains = lambda key: isinstance(key, int) and 0 <= key < len(to_dict)\n else:\n contains = lambda key: key in to_dict\n for key, from_val in from_dict.items():\n if contains(key):\n if isinstance(from_val, RemovedKey):\n del to_dict[key]\n continue\n elif isinstance(from_val, (AddedKey, ChangedKey)):\n from_val = from_val.newVal\n to_val = to_dict[key]\n if hasattr(from_val, 'items') and (hasattr(to_val, 'items') or \n allowDictToListMerging and isinstance(to_val, list)):\n mergeCascadingDicts(from_val, to_val, allowDictToListMerging)\n else:\n to_dict[key] = from_val\n else:\n if isinstance(from_val, RemovedKey):\n continue\n if listMerge and allowNewListMembers and key >= len(to_dict):\n to_dict.extend((None,) * (key + 1 - len(to_dict)))\n to_dict[key] = from_val\n\n\ndef setCascadingDictItem(dict, keys, value):\n currentDict = dict\n for key in keys[:-1]:\n if key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n currentDict[keys[-1]] = value\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\ndef deepPatch(input, predicate, changer):\n \"\"\"Recursively traverses the items stored in input (for basic data types:\n lists, tuples, sets, and dicts), calling changer on all items for which\n predicate returns true, and then replacing the original item with the\n changed item.\n\n Changes will be made in place when possible. The patched input (which may\n be a new object, or the original object, if altered in place) is returned.\n \"\"\"\n return deepPatchAltered(input, predicate, changer)[0]\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\ndef sequenceToSlices(intList, sort=True):\n \"\"\"convert a sequence of integers into a tuple of slice objects\"\"\"\n slices = []\n if intList:\n if sort:\n intList = sorted(intList)\n start = intList[0]\n stop = None\n step = None\n lastStep = None\n lastVal = start\n for curr in intList[1:]:\n curr = int(curr)\n thisStep = curr - lastVal\n if lastStep is None:\n pass\n elif thisStep > 0 and thisStep == lastStep:\n step = thisStep\n else:\n if step is not None:\n if step == 1:\n newslice = slice(start, lastVal + 1, None)\n else:\n newslice = slice(start, lastVal + 1, step)\n thisStep = None\n start = curr\n elif lastStep == 1:\n newslice = slice(start, lastVal + 1, lastStep)\n thisStep = None\n start = curr\n else:\n newslice = slice(start, stop + 1)\n start = lastVal\n slices.append(newslice)\n stop = None\n step = None\n lastStep = thisStep\n stop = lastVal\n lastVal = curr\n if step is not None:\n if step == 1:\n newslice = slice(start, lastVal + 1, None)\n else:\n newslice = slice(start, lastVal + 1, step)\n slices.append(newslice)\n elif lastStep == 1:\n slices.append(slice(start, lastVal + 1, lastStep))\n else:\n slices.append(slice(start, start + 1))\n if lastStep is not None:\n slices.append(slice(lastVal, lastVal + 1))\n return slices\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\ndef getImportableObject(importableName):\n import importlib\n if '.' in importableName:\n modulename, objName = importableName.rsplit('.', 1)\n else:\n modulename = 'builtins'\n if PY2:\n modulename = '__builtin__'\n objName = importableName\n moduleobj = importlib.import_module(modulename)\n return getattr(moduleobj, objName)\n\n\ndef getImportableName(obj):\n import inspect\n module = inspect.getmodule(obj)\n import builtins\n if PY2:\n import __builtin__ as builtins\n if module == builtins:\n return obj.__name__\n return '{}.{}'.format(module.__name__, obj.__name__)\n",
"<docstring token>\n<import token>\ntry:\n from collections.abc import Mapping, Sequence\nexcept ImportError:\n from collections import Mapping, Sequence\nstandard_library.install_aliases()\n<import token>\nif False:\n from typing import *\n T = TypeVar('T')\n\n\ndef isIterable(obj):\n \"\"\"\n Returns True if an object is iterable and not a string or ProxyUnicode type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n if isinstance(obj, basestring):\n return False\n elif isinstance(obj, ProxyUnicode):\n return False\n try:\n iter(obj)\n except TypeError:\n return False\n else:\n return True\n\n\ndef isScalar(obj):\n \"\"\"\n Returns True if an object is a number or complex type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number) and not isinstance(obj, complex)\n\n\ndef isNumeric(obj):\n \"\"\"\n Returns True if an object is a number type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number)\n\n\ndef isSequence(obj):\n \"\"\"\n same as `operator.isSequenceType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Sequence)\n\n\ndef isMapping(obj):\n \"\"\"\n Returns True if an object is a mapping (dictionary) type, otherwise returns False.\n\n same as `operator.isMappingType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Mapping)\n\n\n<assignment token>\n\n\ndef convertListArgs(args):\n if len(args) == 1 and isIterable(args[0]):\n return tuple(args[0])\n return args\n\n\ndef expandArgs(*args, **kwargs):\n \"\"\"\n 'Flattens' the arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 )\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] )\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1)\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True)\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True)\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True)\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n\n Note that with default depth (unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of doing a preorder traversal : [k for k in iter(theTree)] \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _expandArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _expandArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n return postorderArgs(limit, _expandArgsTest, *args)\n elif breadth:\n return breadthArgs(limit, _expandArgsTest, *args)\n else:\n return preorderArgs(limit, _expandArgsTest, *args)\n\n\ndef preorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a preorder expansion of args \"\"\"\n stack = [(x, 0) for x in args]\n result = _deque()\n while stack:\n arg, level = stack.pop()\n if testFn(arg) and level < limit:\n stack += [(x, level + 1) for x in arg]\n else:\n result.appendleft(arg)\n return tuple(result)\n\n\ndef postorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a postorder expansion of args \"\"\"\n if len(args) == 1:\n return args[0],\n else:\n deq = _deque((x, 0) for x in args)\n stack = []\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n deq = _deque([(x, level + 1) for x in arg] + list(deq))\n elif stack:\n while stack and level <= stack[-1][1]:\n result.append(stack.pop()[0])\n stack.append((arg, level))\n else:\n stack.append((arg, level))\n while stack:\n result.append(stack.pop()[0])\n return tuple(result)\n\n\ndef breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n result.append(arg)\n return tuple(result)\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\ndef preorderIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a preorder expansion of args \"\"\"\n if limit:\n for arg in args:\n if testFn(arg):\n for a in preorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n yield arg\n else:\n for arg in args:\n yield arg\n\n\ndef postorderIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in args:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in args:\n yield arg\n\n\ndef breadthIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg\n\n\ndef preorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a preorder expansion of args \"\"\"\n if limit:\n for arg in iterable:\n if testFn(arg):\n for a in preorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n yield arg\n else:\n for arg in iterable:\n yield arg\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\ndef breadth(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in iterable)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg\n\n\ndef listForNone(res):\n \"\"\"returns an empty list when the result is None\"\"\"\n if res is None:\n return []\n return res\n\n\ndef pairIter(sequence):\n \"\"\"\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n \"\"\"\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\ndef reorder(x, indexList=[], indexDict={}):\n \"\"\"\n Reorder a list based upon a list of positional indices and/or a dictionary\n of fromIndex:toIndex.\n\n >>> l = ['zero', 'one', 'two', 'three', 'four', 'five', 'six']\n >>> reorder( l, [1, 4] ) # based on positional indices: 0-->1, 1-->4\n ['one', 'four', 'zero', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, None, 4] ) # None can be used as a place-holder\n ['one', 'zero', 'four', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, 4], {5:6} ) # remapping via dictionary: move the value at index 5 to index 6\n ['one', 'four', 'zero', 'two', 'three', 'six', 'five']\n \"\"\"\n x = list(x)\n num = len(x)\n popCount = 0\n indexValDict = {}\n for i, index in enumerate(indexList):\n if index is not None:\n val = x.pop(index - popCount)\n assert index not in indexDict, indexDict\n indexValDict[i] = val\n popCount += 1\n for k, v in list(indexDict.items()):\n indexValDict[v] = x.pop(k - popCount)\n popCount += 1\n newlist = []\n for i in range(num):\n try:\n val = indexValDict[i]\n except KeyError:\n val = x.pop(0)\n newlist.append(val)\n return newlist\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\ndef compareCascadingDicts(dict1, dict2, encoding=None, useAddedKeys=False,\n useChangedKeys=False):\n \"\"\"compares two cascading dicts\n\n Parameters\n ----------\n dict1 : Union[dict, list, tuple]\n the first object to compare\n dict2 : Union[dict, list, tuple]\n the second object to compare\n encoding : Union[str, bool, None]\n controls how comparisons are made when one value is a str, and one is a\n unicode; if None, then comparisons are simply made with == (so ascii\n characters will compare equally); if the value False, then unicode and\n str are ALWAYS considered different - ie, u'foo' and 'foo' would not be\n considered equal; otherwise, it should be the name of a unicode\n encoding, which will be applied to the unicode string before comparing\n useAddedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'AddedKey' objects\n are used for keys which exist in dict2 but not in dict1; this allows\n a user to distinguish, purely by inspecting the differences dict, which\n keys are brand new, versus merely changed; mergeCascadingDicts will\n treat AddedKey objects exactly the same as though they were their\n contents - ie, useAddedKeys should make no difference to the behavior\n of mergeCascadingDicts\n useChangedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'ChangedKey'\n objects are used for keys which exist in both dict1 and dict2, but with\n different values\n\n Returns\n -------\n both : `set`\n keys that were present in both (non-recursively)\n (both, only1, and only2 should be discrete partitions of all the keys\n present in both dict1 and dict2)\n only1 : `set`\n keys that were present in only1 (non-recursively)\n only2 : `set`\n keys that were present in only2 (non-recursively)\n differences : `dict`\n recursive sparse dict containing information that was 'different' in\n dict2 - either not present in dict1, or having a different value in\n dict2, or removed in dict2 (in which case an instance of 'RemovedKey'\n will be set as the value in differences)\n Values that are different, and both dictionaries, will themselves have\n sparse entries, showing only what is different\n The return value should be such that if you do if you merge the\n differences with d1, you will get d2.\n \"\"\"\n areSets = False\n if isinstance(dict1, set) and isinstance(dict2, set):\n areSets = True\n v1 = dict1\n v2 = dict2\n else:\n if isinstance(dict1, (list, tuple)):\n dict1 = dict(enumerate(dict1))\n if isinstance(dict2, (list, tuple)):\n dict2 = dict(enumerate(dict2))\n v1 = set(dict1)\n v2 = set(dict2)\n both = v1 & v2\n only1 = v1 - both\n only2 = v2 - both\n if areSets:\n if useAddedKeys:\n differences = set(AddedKey(key) for key in only2)\n else:\n differences = set(only2)\n differences.update(RemovedKey(key) for key in only1)\n else:\n recurseTypes = dict, list, tuple, set\n if PY2:\n strUnicode = set([str, unicode])\n if useAddedKeys:\n differences = dict((key, AddedKey(dict2[key])) for key in only2)\n else:\n differences = dict((key, dict2[key]) for key in only2)\n differences.update((key, RemovedKey(dict1[key])) for key in only1)\n for key in both:\n val1 = dict1[key]\n val2 = dict2[key]\n areRecurseTypes = isinstance(val1, recurseTypes) and isinstance(\n val2, recurseTypes)\n if areRecurseTypes:\n if encoding is False or val1 != val2:\n subDiffs = compareCascadingDicts(val1, val2, encoding=\n encoding, useAddedKeys=useAddedKeys, useChangedKeys\n =useChangedKeys)[-1]\n if subDiffs:\n differences[key] = subDiffs\n else:\n if PY2 and set([type(val1), type(val2)]) == strUnicode:\n if encoding is False:\n equal = False\n elif encoding is None:\n equal = val1 == val2\n else:\n if type(val1) == str:\n strVal = val2\n unicodeVal = val1\n else:\n strVal = val1\n unicodeVal = val2\n try:\n encoded = unicodeVal.encode(encoding)\n except UnicodeEncodeError:\n equal = False\n else:\n equal = encoded == strVal\n else:\n equal = val1 == val2\n if not equal:\n if useChangedKeys:\n differences[key] = ChangedKey(val1, val2)\n else:\n differences[key] = val2\n return both, only1, only2, differences\n\n\ndef mergeCascadingDicts(from_dict, to_dict, allowDictToListMerging=False,\n allowNewListMembers=False):\n \"\"\"\n recursively update to_dict with values from from_dict.\n\n if any entries in 'from_dict' are instances of the class RemovedKey,\n then the key containing that value will be removed from to_dict\n\n if allowDictToListMerging is True, then if to_dict contains a list,\n from_dict can contain a dictionary with int keys which can be used to\n sparsely update the list.\n\n if allowNewListMembers is True, and allowDictToListMerging is also True,\n then if merging an index into a list that currently isn't long enough to\n contain that index, then the list will be extended to be long enough (with\n None inserted in any intermediate indices)\n\n Note: if using RemovedKey objects and allowDictToList merging, then only\n indices greater than all of any indices updated / added should be removed,\n because the order in which items are updated / removed is indeterminate.\n \"\"\"\n listMerge = allowDictToListMerging and isinstance(to_dict, list)\n if listMerge:\n contains = lambda key: isinstance(key, int) and 0 <= key < len(to_dict)\n else:\n contains = lambda key: key in to_dict\n for key, from_val in from_dict.items():\n if contains(key):\n if isinstance(from_val, RemovedKey):\n del to_dict[key]\n continue\n elif isinstance(from_val, (AddedKey, ChangedKey)):\n from_val = from_val.newVal\n to_val = to_dict[key]\n if hasattr(from_val, 'items') and (hasattr(to_val, 'items') or \n allowDictToListMerging and isinstance(to_val, list)):\n mergeCascadingDicts(from_val, to_val, allowDictToListMerging)\n else:\n to_dict[key] = from_val\n else:\n if isinstance(from_val, RemovedKey):\n continue\n if listMerge and allowNewListMembers and key >= len(to_dict):\n to_dict.extend((None,) * (key + 1 - len(to_dict)))\n to_dict[key] = from_val\n\n\ndef setCascadingDictItem(dict, keys, value):\n currentDict = dict\n for key in keys[:-1]:\n if key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n currentDict[keys[-1]] = value\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\ndef deepPatch(input, predicate, changer):\n \"\"\"Recursively traverses the items stored in input (for basic data types:\n lists, tuples, sets, and dicts), calling changer on all items for which\n predicate returns true, and then replacing the original item with the\n changed item.\n\n Changes will be made in place when possible. The patched input (which may\n be a new object, or the original object, if altered in place) is returned.\n \"\"\"\n return deepPatchAltered(input, predicate, changer)[0]\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\ndef sequenceToSlices(intList, sort=True):\n \"\"\"convert a sequence of integers into a tuple of slice objects\"\"\"\n slices = []\n if intList:\n if sort:\n intList = sorted(intList)\n start = intList[0]\n stop = None\n step = None\n lastStep = None\n lastVal = start\n for curr in intList[1:]:\n curr = int(curr)\n thisStep = curr - lastVal\n if lastStep is None:\n pass\n elif thisStep > 0 and thisStep == lastStep:\n step = thisStep\n else:\n if step is not None:\n if step == 1:\n newslice = slice(start, lastVal + 1, None)\n else:\n newslice = slice(start, lastVal + 1, step)\n thisStep = None\n start = curr\n elif lastStep == 1:\n newslice = slice(start, lastVal + 1, lastStep)\n thisStep = None\n start = curr\n else:\n newslice = slice(start, stop + 1)\n start = lastVal\n slices.append(newslice)\n stop = None\n step = None\n lastStep = thisStep\n stop = lastVal\n lastVal = curr\n if step is not None:\n if step == 1:\n newslice = slice(start, lastVal + 1, None)\n else:\n newslice = slice(start, lastVal + 1, step)\n slices.append(newslice)\n elif lastStep == 1:\n slices.append(slice(start, lastVal + 1, lastStep))\n else:\n slices.append(slice(start, start + 1))\n if lastStep is not None:\n slices.append(slice(lastVal, lastVal + 1))\n return slices\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\ndef getImportableObject(importableName):\n import importlib\n if '.' in importableName:\n modulename, objName = importableName.rsplit('.', 1)\n else:\n modulename = 'builtins'\n if PY2:\n modulename = '__builtin__'\n objName = importableName\n moduleobj = importlib.import_module(modulename)\n return getattr(moduleobj, objName)\n\n\ndef getImportableName(obj):\n import inspect\n module = inspect.getmodule(obj)\n import builtins\n if PY2:\n import __builtin__ as builtins\n if module == builtins:\n return obj.__name__\n return '{}.{}'.format(module.__name__, obj.__name__)\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n\n\ndef isIterable(obj):\n \"\"\"\n Returns True if an object is iterable and not a string or ProxyUnicode type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n if isinstance(obj, basestring):\n return False\n elif isinstance(obj, ProxyUnicode):\n return False\n try:\n iter(obj)\n except TypeError:\n return False\n else:\n return True\n\n\ndef isScalar(obj):\n \"\"\"\n Returns True if an object is a number or complex type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number) and not isinstance(obj, complex)\n\n\ndef isNumeric(obj):\n \"\"\"\n Returns True if an object is a number type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number)\n\n\ndef isSequence(obj):\n \"\"\"\n same as `operator.isSequenceType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Sequence)\n\n\ndef isMapping(obj):\n \"\"\"\n Returns True if an object is a mapping (dictionary) type, otherwise returns False.\n\n same as `operator.isMappingType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Mapping)\n\n\n<assignment token>\n\n\ndef convertListArgs(args):\n if len(args) == 1 and isIterable(args[0]):\n return tuple(args[0])\n return args\n\n\ndef expandArgs(*args, **kwargs):\n \"\"\"\n 'Flattens' the arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 )\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] )\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1)\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True)\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True)\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True)\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n\n Note that with default depth (unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of doing a preorder traversal : [k for k in iter(theTree)] \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _expandArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _expandArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n return postorderArgs(limit, _expandArgsTest, *args)\n elif breadth:\n return breadthArgs(limit, _expandArgsTest, *args)\n else:\n return preorderArgs(limit, _expandArgsTest, *args)\n\n\ndef preorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a preorder expansion of args \"\"\"\n stack = [(x, 0) for x in args]\n result = _deque()\n while stack:\n arg, level = stack.pop()\n if testFn(arg) and level < limit:\n stack += [(x, level + 1) for x in arg]\n else:\n result.appendleft(arg)\n return tuple(result)\n\n\ndef postorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a postorder expansion of args \"\"\"\n if len(args) == 1:\n return args[0],\n else:\n deq = _deque((x, 0) for x in args)\n stack = []\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n deq = _deque([(x, level + 1) for x in arg] + list(deq))\n elif stack:\n while stack and level <= stack[-1][1]:\n result.append(stack.pop()[0])\n stack.append((arg, level))\n else:\n stack.append((arg, level))\n while stack:\n result.append(stack.pop()[0])\n return tuple(result)\n\n\ndef breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n result.append(arg)\n return tuple(result)\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\ndef preorderIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a preorder expansion of args \"\"\"\n if limit:\n for arg in args:\n if testFn(arg):\n for a in preorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n yield arg\n else:\n for arg in args:\n yield arg\n\n\ndef postorderIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in args:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in args:\n yield arg\n\n\ndef breadthIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg\n\n\ndef preorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a preorder expansion of args \"\"\"\n if limit:\n for arg in iterable:\n if testFn(arg):\n for a in preorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n yield arg\n else:\n for arg in iterable:\n yield arg\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\ndef breadth(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in iterable)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg\n\n\ndef listForNone(res):\n \"\"\"returns an empty list when the result is None\"\"\"\n if res is None:\n return []\n return res\n\n\ndef pairIter(sequence):\n \"\"\"\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n \"\"\"\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\ndef reorder(x, indexList=[], indexDict={}):\n \"\"\"\n Reorder a list based upon a list of positional indices and/or a dictionary\n of fromIndex:toIndex.\n\n >>> l = ['zero', 'one', 'two', 'three', 'four', 'five', 'six']\n >>> reorder( l, [1, 4] ) # based on positional indices: 0-->1, 1-->4\n ['one', 'four', 'zero', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, None, 4] ) # None can be used as a place-holder\n ['one', 'zero', 'four', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, 4], {5:6} ) # remapping via dictionary: move the value at index 5 to index 6\n ['one', 'four', 'zero', 'two', 'three', 'six', 'five']\n \"\"\"\n x = list(x)\n num = len(x)\n popCount = 0\n indexValDict = {}\n for i, index in enumerate(indexList):\n if index is not None:\n val = x.pop(index - popCount)\n assert index not in indexDict, indexDict\n indexValDict[i] = val\n popCount += 1\n for k, v in list(indexDict.items()):\n indexValDict[v] = x.pop(k - popCount)\n popCount += 1\n newlist = []\n for i in range(num):\n try:\n val = indexValDict[i]\n except KeyError:\n val = x.pop(0)\n newlist.append(val)\n return newlist\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\ndef compareCascadingDicts(dict1, dict2, encoding=None, useAddedKeys=False,\n useChangedKeys=False):\n \"\"\"compares two cascading dicts\n\n Parameters\n ----------\n dict1 : Union[dict, list, tuple]\n the first object to compare\n dict2 : Union[dict, list, tuple]\n the second object to compare\n encoding : Union[str, bool, None]\n controls how comparisons are made when one value is a str, and one is a\n unicode; if None, then comparisons are simply made with == (so ascii\n characters will compare equally); if the value False, then unicode and\n str are ALWAYS considered different - ie, u'foo' and 'foo' would not be\n considered equal; otherwise, it should be the name of a unicode\n encoding, which will be applied to the unicode string before comparing\n useAddedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'AddedKey' objects\n are used for keys which exist in dict2 but not in dict1; this allows\n a user to distinguish, purely by inspecting the differences dict, which\n keys are brand new, versus merely changed; mergeCascadingDicts will\n treat AddedKey objects exactly the same as though they were their\n contents - ie, useAddedKeys should make no difference to the behavior\n of mergeCascadingDicts\n useChangedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'ChangedKey'\n objects are used for keys which exist in both dict1 and dict2, but with\n different values\n\n Returns\n -------\n both : `set`\n keys that were present in both (non-recursively)\n (both, only1, and only2 should be discrete partitions of all the keys\n present in both dict1 and dict2)\n only1 : `set`\n keys that were present in only1 (non-recursively)\n only2 : `set`\n keys that were present in only2 (non-recursively)\n differences : `dict`\n recursive sparse dict containing information that was 'different' in\n dict2 - either not present in dict1, or having a different value in\n dict2, or removed in dict2 (in which case an instance of 'RemovedKey'\n will be set as the value in differences)\n Values that are different, and both dictionaries, will themselves have\n sparse entries, showing only what is different\n The return value should be such that if you do if you merge the\n differences with d1, you will get d2.\n \"\"\"\n areSets = False\n if isinstance(dict1, set) and isinstance(dict2, set):\n areSets = True\n v1 = dict1\n v2 = dict2\n else:\n if isinstance(dict1, (list, tuple)):\n dict1 = dict(enumerate(dict1))\n if isinstance(dict2, (list, tuple)):\n dict2 = dict(enumerate(dict2))\n v1 = set(dict1)\n v2 = set(dict2)\n both = v1 & v2\n only1 = v1 - both\n only2 = v2 - both\n if areSets:\n if useAddedKeys:\n differences = set(AddedKey(key) for key in only2)\n else:\n differences = set(only2)\n differences.update(RemovedKey(key) for key in only1)\n else:\n recurseTypes = dict, list, tuple, set\n if PY2:\n strUnicode = set([str, unicode])\n if useAddedKeys:\n differences = dict((key, AddedKey(dict2[key])) for key in only2)\n else:\n differences = dict((key, dict2[key]) for key in only2)\n differences.update((key, RemovedKey(dict1[key])) for key in only1)\n for key in both:\n val1 = dict1[key]\n val2 = dict2[key]\n areRecurseTypes = isinstance(val1, recurseTypes) and isinstance(\n val2, recurseTypes)\n if areRecurseTypes:\n if encoding is False or val1 != val2:\n subDiffs = compareCascadingDicts(val1, val2, encoding=\n encoding, useAddedKeys=useAddedKeys, useChangedKeys\n =useChangedKeys)[-1]\n if subDiffs:\n differences[key] = subDiffs\n else:\n if PY2 and set([type(val1), type(val2)]) == strUnicode:\n if encoding is False:\n equal = False\n elif encoding is None:\n equal = val1 == val2\n else:\n if type(val1) == str:\n strVal = val2\n unicodeVal = val1\n else:\n strVal = val1\n unicodeVal = val2\n try:\n encoded = unicodeVal.encode(encoding)\n except UnicodeEncodeError:\n equal = False\n else:\n equal = encoded == strVal\n else:\n equal = val1 == val2\n if not equal:\n if useChangedKeys:\n differences[key] = ChangedKey(val1, val2)\n else:\n differences[key] = val2\n return both, only1, only2, differences\n\n\ndef mergeCascadingDicts(from_dict, to_dict, allowDictToListMerging=False,\n allowNewListMembers=False):\n \"\"\"\n recursively update to_dict with values from from_dict.\n\n if any entries in 'from_dict' are instances of the class RemovedKey,\n then the key containing that value will be removed from to_dict\n\n if allowDictToListMerging is True, then if to_dict contains a list,\n from_dict can contain a dictionary with int keys which can be used to\n sparsely update the list.\n\n if allowNewListMembers is True, and allowDictToListMerging is also True,\n then if merging an index into a list that currently isn't long enough to\n contain that index, then the list will be extended to be long enough (with\n None inserted in any intermediate indices)\n\n Note: if using RemovedKey objects and allowDictToList merging, then only\n indices greater than all of any indices updated / added should be removed,\n because the order in which items are updated / removed is indeterminate.\n \"\"\"\n listMerge = allowDictToListMerging and isinstance(to_dict, list)\n if listMerge:\n contains = lambda key: isinstance(key, int) and 0 <= key < len(to_dict)\n else:\n contains = lambda key: key in to_dict\n for key, from_val in from_dict.items():\n if contains(key):\n if isinstance(from_val, RemovedKey):\n del to_dict[key]\n continue\n elif isinstance(from_val, (AddedKey, ChangedKey)):\n from_val = from_val.newVal\n to_val = to_dict[key]\n if hasattr(from_val, 'items') and (hasattr(to_val, 'items') or \n allowDictToListMerging and isinstance(to_val, list)):\n mergeCascadingDicts(from_val, to_val, allowDictToListMerging)\n else:\n to_dict[key] = from_val\n else:\n if isinstance(from_val, RemovedKey):\n continue\n if listMerge and allowNewListMembers and key >= len(to_dict):\n to_dict.extend((None,) * (key + 1 - len(to_dict)))\n to_dict[key] = from_val\n\n\ndef setCascadingDictItem(dict, keys, value):\n currentDict = dict\n for key in keys[:-1]:\n if key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n currentDict[keys[-1]] = value\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\ndef deepPatch(input, predicate, changer):\n \"\"\"Recursively traverses the items stored in input (for basic data types:\n lists, tuples, sets, and dicts), calling changer on all items for which\n predicate returns true, and then replacing the original item with the\n changed item.\n\n Changes will be made in place when possible. The patched input (which may\n be a new object, or the original object, if altered in place) is returned.\n \"\"\"\n return deepPatchAltered(input, predicate, changer)[0]\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\ndef sequenceToSlices(intList, sort=True):\n \"\"\"convert a sequence of integers into a tuple of slice objects\"\"\"\n slices = []\n if intList:\n if sort:\n intList = sorted(intList)\n start = intList[0]\n stop = None\n step = None\n lastStep = None\n lastVal = start\n for curr in intList[1:]:\n curr = int(curr)\n thisStep = curr - lastVal\n if lastStep is None:\n pass\n elif thisStep > 0 and thisStep == lastStep:\n step = thisStep\n else:\n if step is not None:\n if step == 1:\n newslice = slice(start, lastVal + 1, None)\n else:\n newslice = slice(start, lastVal + 1, step)\n thisStep = None\n start = curr\n elif lastStep == 1:\n newslice = slice(start, lastVal + 1, lastStep)\n thisStep = None\n start = curr\n else:\n newslice = slice(start, stop + 1)\n start = lastVal\n slices.append(newslice)\n stop = None\n step = None\n lastStep = thisStep\n stop = lastVal\n lastVal = curr\n if step is not None:\n if step == 1:\n newslice = slice(start, lastVal + 1, None)\n else:\n newslice = slice(start, lastVal + 1, step)\n slices.append(newslice)\n elif lastStep == 1:\n slices.append(slice(start, lastVal + 1, lastStep))\n else:\n slices.append(slice(start, start + 1))\n if lastStep is not None:\n slices.append(slice(lastVal, lastVal + 1))\n return slices\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\ndef getImportableObject(importableName):\n import importlib\n if '.' in importableName:\n modulename, objName = importableName.rsplit('.', 1)\n else:\n modulename = 'builtins'\n if PY2:\n modulename = '__builtin__'\n objName = importableName\n moduleobj = importlib.import_module(modulename)\n return getattr(moduleobj, objName)\n\n\ndef getImportableName(obj):\n import inspect\n module = inspect.getmodule(obj)\n import builtins\n if PY2:\n import __builtin__ as builtins\n if module == builtins:\n return obj.__name__\n return '{}.{}'.format(module.__name__, obj.__name__)\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n\n\ndef isIterable(obj):\n \"\"\"\n Returns True if an object is iterable and not a string or ProxyUnicode type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n if isinstance(obj, basestring):\n return False\n elif isinstance(obj, ProxyUnicode):\n return False\n try:\n iter(obj)\n except TypeError:\n return False\n else:\n return True\n\n\ndef isScalar(obj):\n \"\"\"\n Returns True if an object is a number or complex type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number) and not isinstance(obj, complex)\n\n\ndef isNumeric(obj):\n \"\"\"\n Returns True if an object is a number type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number)\n\n\ndef isSequence(obj):\n \"\"\"\n same as `operator.isSequenceType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Sequence)\n\n\ndef isMapping(obj):\n \"\"\"\n Returns True if an object is a mapping (dictionary) type, otherwise returns False.\n\n same as `operator.isMappingType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Mapping)\n\n\n<assignment token>\n\n\ndef convertListArgs(args):\n if len(args) == 1 and isIterable(args[0]):\n return tuple(args[0])\n return args\n\n\ndef expandArgs(*args, **kwargs):\n \"\"\"\n 'Flattens' the arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 )\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] )\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1)\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True)\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True)\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True)\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n\n Note that with default depth (unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of doing a preorder traversal : [k for k in iter(theTree)] \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _expandArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _expandArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n return postorderArgs(limit, _expandArgsTest, *args)\n elif breadth:\n return breadthArgs(limit, _expandArgsTest, *args)\n else:\n return preorderArgs(limit, _expandArgsTest, *args)\n\n\ndef preorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a preorder expansion of args \"\"\"\n stack = [(x, 0) for x in args]\n result = _deque()\n while stack:\n arg, level = stack.pop()\n if testFn(arg) and level < limit:\n stack += [(x, level + 1) for x in arg]\n else:\n result.appendleft(arg)\n return tuple(result)\n\n\ndef postorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a postorder expansion of args \"\"\"\n if len(args) == 1:\n return args[0],\n else:\n deq = _deque((x, 0) for x in args)\n stack = []\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n deq = _deque([(x, level + 1) for x in arg] + list(deq))\n elif stack:\n while stack and level <= stack[-1][1]:\n result.append(stack.pop()[0])\n stack.append((arg, level))\n else:\n stack.append((arg, level))\n while stack:\n result.append(stack.pop()[0])\n return tuple(result)\n\n\ndef breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n result.append(arg)\n return tuple(result)\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\ndef preorderIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a preorder expansion of args \"\"\"\n if limit:\n for arg in args:\n if testFn(arg):\n for a in preorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n yield arg\n else:\n for arg in args:\n yield arg\n\n\n<function token>\n\n\ndef breadthIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg\n\n\ndef preorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a preorder expansion of args \"\"\"\n if limit:\n for arg in iterable:\n if testFn(arg):\n for a in preorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n yield arg\n else:\n for arg in iterable:\n yield arg\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\ndef breadth(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in iterable)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg\n\n\ndef listForNone(res):\n \"\"\"returns an empty list when the result is None\"\"\"\n if res is None:\n return []\n return res\n\n\ndef pairIter(sequence):\n \"\"\"\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n \"\"\"\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\ndef reorder(x, indexList=[], indexDict={}):\n \"\"\"\n Reorder a list based upon a list of positional indices and/or a dictionary\n of fromIndex:toIndex.\n\n >>> l = ['zero', 'one', 'two', 'three', 'four', 'five', 'six']\n >>> reorder( l, [1, 4] ) # based on positional indices: 0-->1, 1-->4\n ['one', 'four', 'zero', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, None, 4] ) # None can be used as a place-holder\n ['one', 'zero', 'four', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, 4], {5:6} ) # remapping via dictionary: move the value at index 5 to index 6\n ['one', 'four', 'zero', 'two', 'three', 'six', 'five']\n \"\"\"\n x = list(x)\n num = len(x)\n popCount = 0\n indexValDict = {}\n for i, index in enumerate(indexList):\n if index is not None:\n val = x.pop(index - popCount)\n assert index not in indexDict, indexDict\n indexValDict[i] = val\n popCount += 1\n for k, v in list(indexDict.items()):\n indexValDict[v] = x.pop(k - popCount)\n popCount += 1\n newlist = []\n for i in range(num):\n try:\n val = indexValDict[i]\n except KeyError:\n val = x.pop(0)\n newlist.append(val)\n return newlist\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\ndef compareCascadingDicts(dict1, dict2, encoding=None, useAddedKeys=False,\n useChangedKeys=False):\n \"\"\"compares two cascading dicts\n\n Parameters\n ----------\n dict1 : Union[dict, list, tuple]\n the first object to compare\n dict2 : Union[dict, list, tuple]\n the second object to compare\n encoding : Union[str, bool, None]\n controls how comparisons are made when one value is a str, and one is a\n unicode; if None, then comparisons are simply made with == (so ascii\n characters will compare equally); if the value False, then unicode and\n str are ALWAYS considered different - ie, u'foo' and 'foo' would not be\n considered equal; otherwise, it should be the name of a unicode\n encoding, which will be applied to the unicode string before comparing\n useAddedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'AddedKey' objects\n are used for keys which exist in dict2 but not in dict1; this allows\n a user to distinguish, purely by inspecting the differences dict, which\n keys are brand new, versus merely changed; mergeCascadingDicts will\n treat AddedKey objects exactly the same as though they were their\n contents - ie, useAddedKeys should make no difference to the behavior\n of mergeCascadingDicts\n useChangedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'ChangedKey'\n objects are used for keys which exist in both dict1 and dict2, but with\n different values\n\n Returns\n -------\n both : `set`\n keys that were present in both (non-recursively)\n (both, only1, and only2 should be discrete partitions of all the keys\n present in both dict1 and dict2)\n only1 : `set`\n keys that were present in only1 (non-recursively)\n only2 : `set`\n keys that were present in only2 (non-recursively)\n differences : `dict`\n recursive sparse dict containing information that was 'different' in\n dict2 - either not present in dict1, or having a different value in\n dict2, or removed in dict2 (in which case an instance of 'RemovedKey'\n will be set as the value in differences)\n Values that are different, and both dictionaries, will themselves have\n sparse entries, showing only what is different\n The return value should be such that if you do if you merge the\n differences with d1, you will get d2.\n \"\"\"\n areSets = False\n if isinstance(dict1, set) and isinstance(dict2, set):\n areSets = True\n v1 = dict1\n v2 = dict2\n else:\n if isinstance(dict1, (list, tuple)):\n dict1 = dict(enumerate(dict1))\n if isinstance(dict2, (list, tuple)):\n dict2 = dict(enumerate(dict2))\n v1 = set(dict1)\n v2 = set(dict2)\n both = v1 & v2\n only1 = v1 - both\n only2 = v2 - both\n if areSets:\n if useAddedKeys:\n differences = set(AddedKey(key) for key in only2)\n else:\n differences = set(only2)\n differences.update(RemovedKey(key) for key in only1)\n else:\n recurseTypes = dict, list, tuple, set\n if PY2:\n strUnicode = set([str, unicode])\n if useAddedKeys:\n differences = dict((key, AddedKey(dict2[key])) for key in only2)\n else:\n differences = dict((key, dict2[key]) for key in only2)\n differences.update((key, RemovedKey(dict1[key])) for key in only1)\n for key in both:\n val1 = dict1[key]\n val2 = dict2[key]\n areRecurseTypes = isinstance(val1, recurseTypes) and isinstance(\n val2, recurseTypes)\n if areRecurseTypes:\n if encoding is False or val1 != val2:\n subDiffs = compareCascadingDicts(val1, val2, encoding=\n encoding, useAddedKeys=useAddedKeys, useChangedKeys\n =useChangedKeys)[-1]\n if subDiffs:\n differences[key] = subDiffs\n else:\n if PY2 and set([type(val1), type(val2)]) == strUnicode:\n if encoding is False:\n equal = False\n elif encoding is None:\n equal = val1 == val2\n else:\n if type(val1) == str:\n strVal = val2\n unicodeVal = val1\n else:\n strVal = val1\n unicodeVal = val2\n try:\n encoded = unicodeVal.encode(encoding)\n except UnicodeEncodeError:\n equal = False\n else:\n equal = encoded == strVal\n else:\n equal = val1 == val2\n if not equal:\n if useChangedKeys:\n differences[key] = ChangedKey(val1, val2)\n else:\n differences[key] = val2\n return both, only1, only2, differences\n\n\ndef mergeCascadingDicts(from_dict, to_dict, allowDictToListMerging=False,\n allowNewListMembers=False):\n \"\"\"\n recursively update to_dict with values from from_dict.\n\n if any entries in 'from_dict' are instances of the class RemovedKey,\n then the key containing that value will be removed from to_dict\n\n if allowDictToListMerging is True, then if to_dict contains a list,\n from_dict can contain a dictionary with int keys which can be used to\n sparsely update the list.\n\n if allowNewListMembers is True, and allowDictToListMerging is also True,\n then if merging an index into a list that currently isn't long enough to\n contain that index, then the list will be extended to be long enough (with\n None inserted in any intermediate indices)\n\n Note: if using RemovedKey objects and allowDictToList merging, then only\n indices greater than all of any indices updated / added should be removed,\n because the order in which items are updated / removed is indeterminate.\n \"\"\"\n listMerge = allowDictToListMerging and isinstance(to_dict, list)\n if listMerge:\n contains = lambda key: isinstance(key, int) and 0 <= key < len(to_dict)\n else:\n contains = lambda key: key in to_dict\n for key, from_val in from_dict.items():\n if contains(key):\n if isinstance(from_val, RemovedKey):\n del to_dict[key]\n continue\n elif isinstance(from_val, (AddedKey, ChangedKey)):\n from_val = from_val.newVal\n to_val = to_dict[key]\n if hasattr(from_val, 'items') and (hasattr(to_val, 'items') or \n allowDictToListMerging and isinstance(to_val, list)):\n mergeCascadingDicts(from_val, to_val, allowDictToListMerging)\n else:\n to_dict[key] = from_val\n else:\n if isinstance(from_val, RemovedKey):\n continue\n if listMerge and allowNewListMembers and key >= len(to_dict):\n to_dict.extend((None,) * (key + 1 - len(to_dict)))\n to_dict[key] = from_val\n\n\ndef setCascadingDictItem(dict, keys, value):\n currentDict = dict\n for key in keys[:-1]:\n if key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n currentDict[keys[-1]] = value\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\ndef deepPatch(input, predicate, changer):\n \"\"\"Recursively traverses the items stored in input (for basic data types:\n lists, tuples, sets, and dicts), calling changer on all items for which\n predicate returns true, and then replacing the original item with the\n changed item.\n\n Changes will be made in place when possible. The patched input (which may\n be a new object, or the original object, if altered in place) is returned.\n \"\"\"\n return deepPatchAltered(input, predicate, changer)[0]\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\ndef sequenceToSlices(intList, sort=True):\n \"\"\"convert a sequence of integers into a tuple of slice objects\"\"\"\n slices = []\n if intList:\n if sort:\n intList = sorted(intList)\n start = intList[0]\n stop = None\n step = None\n lastStep = None\n lastVal = start\n for curr in intList[1:]:\n curr = int(curr)\n thisStep = curr - lastVal\n if lastStep is None:\n pass\n elif thisStep > 0 and thisStep == lastStep:\n step = thisStep\n else:\n if step is not None:\n if step == 1:\n newslice = slice(start, lastVal + 1, None)\n else:\n newslice = slice(start, lastVal + 1, step)\n thisStep = None\n start = curr\n elif lastStep == 1:\n newslice = slice(start, lastVal + 1, lastStep)\n thisStep = None\n start = curr\n else:\n newslice = slice(start, stop + 1)\n start = lastVal\n slices.append(newslice)\n stop = None\n step = None\n lastStep = thisStep\n stop = lastVal\n lastVal = curr\n if step is not None:\n if step == 1:\n newslice = slice(start, lastVal + 1, None)\n else:\n newslice = slice(start, lastVal + 1, step)\n slices.append(newslice)\n elif lastStep == 1:\n slices.append(slice(start, lastVal + 1, lastStep))\n else:\n slices.append(slice(start, start + 1))\n if lastStep is not None:\n slices.append(slice(lastVal, lastVal + 1))\n return slices\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\ndef getImportableObject(importableName):\n import importlib\n if '.' in importableName:\n modulename, objName = importableName.rsplit('.', 1)\n else:\n modulename = 'builtins'\n if PY2:\n modulename = '__builtin__'\n objName = importableName\n moduleobj = importlib.import_module(modulename)\n return getattr(moduleobj, objName)\n\n\ndef getImportableName(obj):\n import inspect\n module = inspect.getmodule(obj)\n import builtins\n if PY2:\n import __builtin__ as builtins\n if module == builtins:\n return obj.__name__\n return '{}.{}'.format(module.__name__, obj.__name__)\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n\n\ndef isIterable(obj):\n \"\"\"\n Returns True if an object is iterable and not a string or ProxyUnicode type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n if isinstance(obj, basestring):\n return False\n elif isinstance(obj, ProxyUnicode):\n return False\n try:\n iter(obj)\n except TypeError:\n return False\n else:\n return True\n\n\ndef isScalar(obj):\n \"\"\"\n Returns True if an object is a number or complex type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number) and not isinstance(obj, complex)\n\n\ndef isNumeric(obj):\n \"\"\"\n Returns True if an object is a number type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number)\n\n\ndef isSequence(obj):\n \"\"\"\n same as `operator.isSequenceType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Sequence)\n\n\ndef isMapping(obj):\n \"\"\"\n Returns True if an object is a mapping (dictionary) type, otherwise returns False.\n\n same as `operator.isMappingType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Mapping)\n\n\n<assignment token>\n\n\ndef convertListArgs(args):\n if len(args) == 1 and isIterable(args[0]):\n return tuple(args[0])\n return args\n\n\ndef expandArgs(*args, **kwargs):\n \"\"\"\n 'Flattens' the arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 )\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] )\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1)\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True)\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True)\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True)\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n\n Note that with default depth (unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of doing a preorder traversal : [k for k in iter(theTree)] \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _expandArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _expandArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n return postorderArgs(limit, _expandArgsTest, *args)\n elif breadth:\n return breadthArgs(limit, _expandArgsTest, *args)\n else:\n return preorderArgs(limit, _expandArgsTest, *args)\n\n\ndef preorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a preorder expansion of args \"\"\"\n stack = [(x, 0) for x in args]\n result = _deque()\n while stack:\n arg, level = stack.pop()\n if testFn(arg) and level < limit:\n stack += [(x, level + 1) for x in arg]\n else:\n result.appendleft(arg)\n return tuple(result)\n\n\ndef postorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a postorder expansion of args \"\"\"\n if len(args) == 1:\n return args[0],\n else:\n deq = _deque((x, 0) for x in args)\n stack = []\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n deq = _deque([(x, level + 1) for x in arg] + list(deq))\n elif stack:\n while stack and level <= stack[-1][1]:\n result.append(stack.pop()[0])\n stack.append((arg, level))\n else:\n stack.append((arg, level))\n while stack:\n result.append(stack.pop()[0])\n return tuple(result)\n\n\ndef breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n result.append(arg)\n return tuple(result)\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef breadthIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg\n\n\ndef preorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a preorder expansion of args \"\"\"\n if limit:\n for arg in iterable:\n if testFn(arg):\n for a in preorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n yield arg\n else:\n for arg in iterable:\n yield arg\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\ndef breadth(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in iterable)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg\n\n\ndef listForNone(res):\n \"\"\"returns an empty list when the result is None\"\"\"\n if res is None:\n return []\n return res\n\n\ndef pairIter(sequence):\n \"\"\"\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n \"\"\"\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\ndef reorder(x, indexList=[], indexDict={}):\n \"\"\"\n Reorder a list based upon a list of positional indices and/or a dictionary\n of fromIndex:toIndex.\n\n >>> l = ['zero', 'one', 'two', 'three', 'four', 'five', 'six']\n >>> reorder( l, [1, 4] ) # based on positional indices: 0-->1, 1-->4\n ['one', 'four', 'zero', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, None, 4] ) # None can be used as a place-holder\n ['one', 'zero', 'four', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, 4], {5:6} ) # remapping via dictionary: move the value at index 5 to index 6\n ['one', 'four', 'zero', 'two', 'three', 'six', 'five']\n \"\"\"\n x = list(x)\n num = len(x)\n popCount = 0\n indexValDict = {}\n for i, index in enumerate(indexList):\n if index is not None:\n val = x.pop(index - popCount)\n assert index not in indexDict, indexDict\n indexValDict[i] = val\n popCount += 1\n for k, v in list(indexDict.items()):\n indexValDict[v] = x.pop(k - popCount)\n popCount += 1\n newlist = []\n for i in range(num):\n try:\n val = indexValDict[i]\n except KeyError:\n val = x.pop(0)\n newlist.append(val)\n return newlist\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\ndef compareCascadingDicts(dict1, dict2, encoding=None, useAddedKeys=False,\n useChangedKeys=False):\n \"\"\"compares two cascading dicts\n\n Parameters\n ----------\n dict1 : Union[dict, list, tuple]\n the first object to compare\n dict2 : Union[dict, list, tuple]\n the second object to compare\n encoding : Union[str, bool, None]\n controls how comparisons are made when one value is a str, and one is a\n unicode; if None, then comparisons are simply made with == (so ascii\n characters will compare equally); if the value False, then unicode and\n str are ALWAYS considered different - ie, u'foo' and 'foo' would not be\n considered equal; otherwise, it should be the name of a unicode\n encoding, which will be applied to the unicode string before comparing\n useAddedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'AddedKey' objects\n are used for keys which exist in dict2 but not in dict1; this allows\n a user to distinguish, purely by inspecting the differences dict, which\n keys are brand new, versus merely changed; mergeCascadingDicts will\n treat AddedKey objects exactly the same as though they were their\n contents - ie, useAddedKeys should make no difference to the behavior\n of mergeCascadingDicts\n useChangedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'ChangedKey'\n objects are used for keys which exist in both dict1 and dict2, but with\n different values\n\n Returns\n -------\n both : `set`\n keys that were present in both (non-recursively)\n (both, only1, and only2 should be discrete partitions of all the keys\n present in both dict1 and dict2)\n only1 : `set`\n keys that were present in only1 (non-recursively)\n only2 : `set`\n keys that were present in only2 (non-recursively)\n differences : `dict`\n recursive sparse dict containing information that was 'different' in\n dict2 - either not present in dict1, or having a different value in\n dict2, or removed in dict2 (in which case an instance of 'RemovedKey'\n will be set as the value in differences)\n Values that are different, and both dictionaries, will themselves have\n sparse entries, showing only what is different\n The return value should be such that if you do if you merge the\n differences with d1, you will get d2.\n \"\"\"\n areSets = False\n if isinstance(dict1, set) and isinstance(dict2, set):\n areSets = True\n v1 = dict1\n v2 = dict2\n else:\n if isinstance(dict1, (list, tuple)):\n dict1 = dict(enumerate(dict1))\n if isinstance(dict2, (list, tuple)):\n dict2 = dict(enumerate(dict2))\n v1 = set(dict1)\n v2 = set(dict2)\n both = v1 & v2\n only1 = v1 - both\n only2 = v2 - both\n if areSets:\n if useAddedKeys:\n differences = set(AddedKey(key) for key in only2)\n else:\n differences = set(only2)\n differences.update(RemovedKey(key) for key in only1)\n else:\n recurseTypes = dict, list, tuple, set\n if PY2:\n strUnicode = set([str, unicode])\n if useAddedKeys:\n differences = dict((key, AddedKey(dict2[key])) for key in only2)\n else:\n differences = dict((key, dict2[key]) for key in only2)\n differences.update((key, RemovedKey(dict1[key])) for key in only1)\n for key in both:\n val1 = dict1[key]\n val2 = dict2[key]\n areRecurseTypes = isinstance(val1, recurseTypes) and isinstance(\n val2, recurseTypes)\n if areRecurseTypes:\n if encoding is False or val1 != val2:\n subDiffs = compareCascadingDicts(val1, val2, encoding=\n encoding, useAddedKeys=useAddedKeys, useChangedKeys\n =useChangedKeys)[-1]\n if subDiffs:\n differences[key] = subDiffs\n else:\n if PY2 and set([type(val1), type(val2)]) == strUnicode:\n if encoding is False:\n equal = False\n elif encoding is None:\n equal = val1 == val2\n else:\n if type(val1) == str:\n strVal = val2\n unicodeVal = val1\n else:\n strVal = val1\n unicodeVal = val2\n try:\n encoded = unicodeVal.encode(encoding)\n except UnicodeEncodeError:\n equal = False\n else:\n equal = encoded == strVal\n else:\n equal = val1 == val2\n if not equal:\n if useChangedKeys:\n differences[key] = ChangedKey(val1, val2)\n else:\n differences[key] = val2\n return both, only1, only2, differences\n\n\ndef mergeCascadingDicts(from_dict, to_dict, allowDictToListMerging=False,\n allowNewListMembers=False):\n \"\"\"\n recursively update to_dict with values from from_dict.\n\n if any entries in 'from_dict' are instances of the class RemovedKey,\n then the key containing that value will be removed from to_dict\n\n if allowDictToListMerging is True, then if to_dict contains a list,\n from_dict can contain a dictionary with int keys which can be used to\n sparsely update the list.\n\n if allowNewListMembers is True, and allowDictToListMerging is also True,\n then if merging an index into a list that currently isn't long enough to\n contain that index, then the list will be extended to be long enough (with\n None inserted in any intermediate indices)\n\n Note: if using RemovedKey objects and allowDictToList merging, then only\n indices greater than all of any indices updated / added should be removed,\n because the order in which items are updated / removed is indeterminate.\n \"\"\"\n listMerge = allowDictToListMerging and isinstance(to_dict, list)\n if listMerge:\n contains = lambda key: isinstance(key, int) and 0 <= key < len(to_dict)\n else:\n contains = lambda key: key in to_dict\n for key, from_val in from_dict.items():\n if contains(key):\n if isinstance(from_val, RemovedKey):\n del to_dict[key]\n continue\n elif isinstance(from_val, (AddedKey, ChangedKey)):\n from_val = from_val.newVal\n to_val = to_dict[key]\n if hasattr(from_val, 'items') and (hasattr(to_val, 'items') or \n allowDictToListMerging and isinstance(to_val, list)):\n mergeCascadingDicts(from_val, to_val, allowDictToListMerging)\n else:\n to_dict[key] = from_val\n else:\n if isinstance(from_val, RemovedKey):\n continue\n if listMerge and allowNewListMembers and key >= len(to_dict):\n to_dict.extend((None,) * (key + 1 - len(to_dict)))\n to_dict[key] = from_val\n\n\ndef setCascadingDictItem(dict, keys, value):\n currentDict = dict\n for key in keys[:-1]:\n if key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n currentDict[keys[-1]] = value\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\ndef deepPatch(input, predicate, changer):\n \"\"\"Recursively traverses the items stored in input (for basic data types:\n lists, tuples, sets, and dicts), calling changer on all items for which\n predicate returns true, and then replacing the original item with the\n changed item.\n\n Changes will be made in place when possible. The patched input (which may\n be a new object, or the original object, if altered in place) is returned.\n \"\"\"\n return deepPatchAltered(input, predicate, changer)[0]\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\ndef sequenceToSlices(intList, sort=True):\n \"\"\"convert a sequence of integers into a tuple of slice objects\"\"\"\n slices = []\n if intList:\n if sort:\n intList = sorted(intList)\n start = intList[0]\n stop = None\n step = None\n lastStep = None\n lastVal = start\n for curr in intList[1:]:\n curr = int(curr)\n thisStep = curr - lastVal\n if lastStep is None:\n pass\n elif thisStep > 0 and thisStep == lastStep:\n step = thisStep\n else:\n if step is not None:\n if step == 1:\n newslice = slice(start, lastVal + 1, None)\n else:\n newslice = slice(start, lastVal + 1, step)\n thisStep = None\n start = curr\n elif lastStep == 1:\n newslice = slice(start, lastVal + 1, lastStep)\n thisStep = None\n start = curr\n else:\n newslice = slice(start, stop + 1)\n start = lastVal\n slices.append(newslice)\n stop = None\n step = None\n lastStep = thisStep\n stop = lastVal\n lastVal = curr\n if step is not None:\n if step == 1:\n newslice = slice(start, lastVal + 1, None)\n else:\n newslice = slice(start, lastVal + 1, step)\n slices.append(newslice)\n elif lastStep == 1:\n slices.append(slice(start, lastVal + 1, lastStep))\n else:\n slices.append(slice(start, start + 1))\n if lastStep is not None:\n slices.append(slice(lastVal, lastVal + 1))\n return slices\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\ndef getImportableObject(importableName):\n import importlib\n if '.' in importableName:\n modulename, objName = importableName.rsplit('.', 1)\n else:\n modulename = 'builtins'\n if PY2:\n modulename = '__builtin__'\n objName = importableName\n moduleobj = importlib.import_module(modulename)\n return getattr(moduleobj, objName)\n\n\ndef getImportableName(obj):\n import inspect\n module = inspect.getmodule(obj)\n import builtins\n if PY2:\n import __builtin__ as builtins\n if module == builtins:\n return obj.__name__\n return '{}.{}'.format(module.__name__, obj.__name__)\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n\n\ndef isIterable(obj):\n \"\"\"\n Returns True if an object is iterable and not a string or ProxyUnicode type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n if isinstance(obj, basestring):\n return False\n elif isinstance(obj, ProxyUnicode):\n return False\n try:\n iter(obj)\n except TypeError:\n return False\n else:\n return True\n\n\ndef isScalar(obj):\n \"\"\"\n Returns True if an object is a number or complex type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number) and not isinstance(obj, complex)\n\n\ndef isNumeric(obj):\n \"\"\"\n Returns True if an object is a number type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number)\n\n\ndef isSequence(obj):\n \"\"\"\n same as `operator.isSequenceType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Sequence)\n\n\ndef isMapping(obj):\n \"\"\"\n Returns True if an object is a mapping (dictionary) type, otherwise returns False.\n\n same as `operator.isMappingType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Mapping)\n\n\n<assignment token>\n\n\ndef convertListArgs(args):\n if len(args) == 1 and isIterable(args[0]):\n return tuple(args[0])\n return args\n\n\ndef expandArgs(*args, **kwargs):\n \"\"\"\n 'Flattens' the arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 )\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] )\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1)\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True)\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True)\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True)\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n\n Note that with default depth (unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of doing a preorder traversal : [k for k in iter(theTree)] \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _expandArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _expandArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n return postorderArgs(limit, _expandArgsTest, *args)\n elif breadth:\n return breadthArgs(limit, _expandArgsTest, *args)\n else:\n return preorderArgs(limit, _expandArgsTest, *args)\n\n\ndef preorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a preorder expansion of args \"\"\"\n stack = [(x, 0) for x in args]\n result = _deque()\n while stack:\n arg, level = stack.pop()\n if testFn(arg) and level < limit:\n stack += [(x, level + 1) for x in arg]\n else:\n result.appendleft(arg)\n return tuple(result)\n\n\ndef postorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a postorder expansion of args \"\"\"\n if len(args) == 1:\n return args[0],\n else:\n deq = _deque((x, 0) for x in args)\n stack = []\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n deq = _deque([(x, level + 1) for x in arg] + list(deq))\n elif stack:\n while stack and level <= stack[-1][1]:\n result.append(stack.pop()[0])\n stack.append((arg, level))\n else:\n stack.append((arg, level))\n while stack:\n result.append(stack.pop()[0])\n return tuple(result)\n\n\ndef breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n result.append(arg)\n return tuple(result)\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef breadthIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg\n\n\ndef preorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a preorder expansion of args \"\"\"\n if limit:\n for arg in iterable:\n if testFn(arg):\n for a in preorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n yield arg\n else:\n for arg in iterable:\n yield arg\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\n<function token>\n\n\ndef listForNone(res):\n \"\"\"returns an empty list when the result is None\"\"\"\n if res is None:\n return []\n return res\n\n\ndef pairIter(sequence):\n \"\"\"\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n \"\"\"\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\ndef reorder(x, indexList=[], indexDict={}):\n \"\"\"\n Reorder a list based upon a list of positional indices and/or a dictionary\n of fromIndex:toIndex.\n\n >>> l = ['zero', 'one', 'two', 'three', 'four', 'five', 'six']\n >>> reorder( l, [1, 4] ) # based on positional indices: 0-->1, 1-->4\n ['one', 'four', 'zero', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, None, 4] ) # None can be used as a place-holder\n ['one', 'zero', 'four', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, 4], {5:6} ) # remapping via dictionary: move the value at index 5 to index 6\n ['one', 'four', 'zero', 'two', 'three', 'six', 'five']\n \"\"\"\n x = list(x)\n num = len(x)\n popCount = 0\n indexValDict = {}\n for i, index in enumerate(indexList):\n if index is not None:\n val = x.pop(index - popCount)\n assert index not in indexDict, indexDict\n indexValDict[i] = val\n popCount += 1\n for k, v in list(indexDict.items()):\n indexValDict[v] = x.pop(k - popCount)\n popCount += 1\n newlist = []\n for i in range(num):\n try:\n val = indexValDict[i]\n except KeyError:\n val = x.pop(0)\n newlist.append(val)\n return newlist\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\ndef compareCascadingDicts(dict1, dict2, encoding=None, useAddedKeys=False,\n useChangedKeys=False):\n \"\"\"compares two cascading dicts\n\n Parameters\n ----------\n dict1 : Union[dict, list, tuple]\n the first object to compare\n dict2 : Union[dict, list, tuple]\n the second object to compare\n encoding : Union[str, bool, None]\n controls how comparisons are made when one value is a str, and one is a\n unicode; if None, then comparisons are simply made with == (so ascii\n characters will compare equally); if the value False, then unicode and\n str are ALWAYS considered different - ie, u'foo' and 'foo' would not be\n considered equal; otherwise, it should be the name of a unicode\n encoding, which will be applied to the unicode string before comparing\n useAddedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'AddedKey' objects\n are used for keys which exist in dict2 but not in dict1; this allows\n a user to distinguish, purely by inspecting the differences dict, which\n keys are brand new, versus merely changed; mergeCascadingDicts will\n treat AddedKey objects exactly the same as though they were their\n contents - ie, useAddedKeys should make no difference to the behavior\n of mergeCascadingDicts\n useChangedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'ChangedKey'\n objects are used for keys which exist in both dict1 and dict2, but with\n different values\n\n Returns\n -------\n both : `set`\n keys that were present in both (non-recursively)\n (both, only1, and only2 should be discrete partitions of all the keys\n present in both dict1 and dict2)\n only1 : `set`\n keys that were present in only1 (non-recursively)\n only2 : `set`\n keys that were present in only2 (non-recursively)\n differences : `dict`\n recursive sparse dict containing information that was 'different' in\n dict2 - either not present in dict1, or having a different value in\n dict2, or removed in dict2 (in which case an instance of 'RemovedKey'\n will be set as the value in differences)\n Values that are different, and both dictionaries, will themselves have\n sparse entries, showing only what is different\n The return value should be such that if you do if you merge the\n differences with d1, you will get d2.\n \"\"\"\n areSets = False\n if isinstance(dict1, set) and isinstance(dict2, set):\n areSets = True\n v1 = dict1\n v2 = dict2\n else:\n if isinstance(dict1, (list, tuple)):\n dict1 = dict(enumerate(dict1))\n if isinstance(dict2, (list, tuple)):\n dict2 = dict(enumerate(dict2))\n v1 = set(dict1)\n v2 = set(dict2)\n both = v1 & v2\n only1 = v1 - both\n only2 = v2 - both\n if areSets:\n if useAddedKeys:\n differences = set(AddedKey(key) for key in only2)\n else:\n differences = set(only2)\n differences.update(RemovedKey(key) for key in only1)\n else:\n recurseTypes = dict, list, tuple, set\n if PY2:\n strUnicode = set([str, unicode])\n if useAddedKeys:\n differences = dict((key, AddedKey(dict2[key])) for key in only2)\n else:\n differences = dict((key, dict2[key]) for key in only2)\n differences.update((key, RemovedKey(dict1[key])) for key in only1)\n for key in both:\n val1 = dict1[key]\n val2 = dict2[key]\n areRecurseTypes = isinstance(val1, recurseTypes) and isinstance(\n val2, recurseTypes)\n if areRecurseTypes:\n if encoding is False or val1 != val2:\n subDiffs = compareCascadingDicts(val1, val2, encoding=\n encoding, useAddedKeys=useAddedKeys, useChangedKeys\n =useChangedKeys)[-1]\n if subDiffs:\n differences[key] = subDiffs\n else:\n if PY2 and set([type(val1), type(val2)]) == strUnicode:\n if encoding is False:\n equal = False\n elif encoding is None:\n equal = val1 == val2\n else:\n if type(val1) == str:\n strVal = val2\n unicodeVal = val1\n else:\n strVal = val1\n unicodeVal = val2\n try:\n encoded = unicodeVal.encode(encoding)\n except UnicodeEncodeError:\n equal = False\n else:\n equal = encoded == strVal\n else:\n equal = val1 == val2\n if not equal:\n if useChangedKeys:\n differences[key] = ChangedKey(val1, val2)\n else:\n differences[key] = val2\n return both, only1, only2, differences\n\n\ndef mergeCascadingDicts(from_dict, to_dict, allowDictToListMerging=False,\n allowNewListMembers=False):\n \"\"\"\n recursively update to_dict with values from from_dict.\n\n if any entries in 'from_dict' are instances of the class RemovedKey,\n then the key containing that value will be removed from to_dict\n\n if allowDictToListMerging is True, then if to_dict contains a list,\n from_dict can contain a dictionary with int keys which can be used to\n sparsely update the list.\n\n if allowNewListMembers is True, and allowDictToListMerging is also True,\n then if merging an index into a list that currently isn't long enough to\n contain that index, then the list will be extended to be long enough (with\n None inserted in any intermediate indices)\n\n Note: if using RemovedKey objects and allowDictToList merging, then only\n indices greater than all of any indices updated / added should be removed,\n because the order in which items are updated / removed is indeterminate.\n \"\"\"\n listMerge = allowDictToListMerging and isinstance(to_dict, list)\n if listMerge:\n contains = lambda key: isinstance(key, int) and 0 <= key < len(to_dict)\n else:\n contains = lambda key: key in to_dict\n for key, from_val in from_dict.items():\n if contains(key):\n if isinstance(from_val, RemovedKey):\n del to_dict[key]\n continue\n elif isinstance(from_val, (AddedKey, ChangedKey)):\n from_val = from_val.newVal\n to_val = to_dict[key]\n if hasattr(from_val, 'items') and (hasattr(to_val, 'items') or \n allowDictToListMerging and isinstance(to_val, list)):\n mergeCascadingDicts(from_val, to_val, allowDictToListMerging)\n else:\n to_dict[key] = from_val\n else:\n if isinstance(from_val, RemovedKey):\n continue\n if listMerge and allowNewListMembers and key >= len(to_dict):\n to_dict.extend((None,) * (key + 1 - len(to_dict)))\n to_dict[key] = from_val\n\n\ndef setCascadingDictItem(dict, keys, value):\n currentDict = dict\n for key in keys[:-1]:\n if key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n currentDict[keys[-1]] = value\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\ndef deepPatch(input, predicate, changer):\n \"\"\"Recursively traverses the items stored in input (for basic data types:\n lists, tuples, sets, and dicts), calling changer on all items for which\n predicate returns true, and then replacing the original item with the\n changed item.\n\n Changes will be made in place when possible. The patched input (which may\n be a new object, or the original object, if altered in place) is returned.\n \"\"\"\n return deepPatchAltered(input, predicate, changer)[0]\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\ndef sequenceToSlices(intList, sort=True):\n \"\"\"convert a sequence of integers into a tuple of slice objects\"\"\"\n slices = []\n if intList:\n if sort:\n intList = sorted(intList)\n start = intList[0]\n stop = None\n step = None\n lastStep = None\n lastVal = start\n for curr in intList[1:]:\n curr = int(curr)\n thisStep = curr - lastVal\n if lastStep is None:\n pass\n elif thisStep > 0 and thisStep == lastStep:\n step = thisStep\n else:\n if step is not None:\n if step == 1:\n newslice = slice(start, lastVal + 1, None)\n else:\n newslice = slice(start, lastVal + 1, step)\n thisStep = None\n start = curr\n elif lastStep == 1:\n newslice = slice(start, lastVal + 1, lastStep)\n thisStep = None\n start = curr\n else:\n newslice = slice(start, stop + 1)\n start = lastVal\n slices.append(newslice)\n stop = None\n step = None\n lastStep = thisStep\n stop = lastVal\n lastVal = curr\n if step is not None:\n if step == 1:\n newslice = slice(start, lastVal + 1, None)\n else:\n newslice = slice(start, lastVal + 1, step)\n slices.append(newslice)\n elif lastStep == 1:\n slices.append(slice(start, lastVal + 1, lastStep))\n else:\n slices.append(slice(start, start + 1))\n if lastStep is not None:\n slices.append(slice(lastVal, lastVal + 1))\n return slices\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\ndef getImportableObject(importableName):\n import importlib\n if '.' in importableName:\n modulename, objName = importableName.rsplit('.', 1)\n else:\n modulename = 'builtins'\n if PY2:\n modulename = '__builtin__'\n objName = importableName\n moduleobj = importlib.import_module(modulename)\n return getattr(moduleobj, objName)\n\n\ndef getImportableName(obj):\n import inspect\n module = inspect.getmodule(obj)\n import builtins\n if PY2:\n import __builtin__ as builtins\n if module == builtins:\n return obj.__name__\n return '{}.{}'.format(module.__name__, obj.__name__)\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n\n\ndef isIterable(obj):\n \"\"\"\n Returns True if an object is iterable and not a string or ProxyUnicode type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n if isinstance(obj, basestring):\n return False\n elif isinstance(obj, ProxyUnicode):\n return False\n try:\n iter(obj)\n except TypeError:\n return False\n else:\n return True\n\n\ndef isScalar(obj):\n \"\"\"\n Returns True if an object is a number or complex type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number) and not isinstance(obj, complex)\n\n\ndef isNumeric(obj):\n \"\"\"\n Returns True if an object is a number type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number)\n\n\ndef isSequence(obj):\n \"\"\"\n same as `operator.isSequenceType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Sequence)\n\n\ndef isMapping(obj):\n \"\"\"\n Returns True if an object is a mapping (dictionary) type, otherwise returns False.\n\n same as `operator.isMappingType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Mapping)\n\n\n<assignment token>\n\n\ndef convertListArgs(args):\n if len(args) == 1 and isIterable(args[0]):\n return tuple(args[0])\n return args\n\n\ndef expandArgs(*args, **kwargs):\n \"\"\"\n 'Flattens' the arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 )\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] )\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1)\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True)\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True)\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True)\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n\n Note that with default depth (unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of doing a preorder traversal : [k for k in iter(theTree)] \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _expandArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _expandArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n return postorderArgs(limit, _expandArgsTest, *args)\n elif breadth:\n return breadthArgs(limit, _expandArgsTest, *args)\n else:\n return preorderArgs(limit, _expandArgsTest, *args)\n\n\ndef preorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a preorder expansion of args \"\"\"\n stack = [(x, 0) for x in args]\n result = _deque()\n while stack:\n arg, level = stack.pop()\n if testFn(arg) and level < limit:\n stack += [(x, level + 1) for x in arg]\n else:\n result.appendleft(arg)\n return tuple(result)\n\n\ndef postorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a postorder expansion of args \"\"\"\n if len(args) == 1:\n return args[0],\n else:\n deq = _deque((x, 0) for x in args)\n stack = []\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n deq = _deque([(x, level + 1) for x in arg] + list(deq))\n elif stack:\n while stack and level <= stack[-1][1]:\n result.append(stack.pop()[0])\n stack.append((arg, level))\n else:\n stack.append((arg, level))\n while stack:\n result.append(stack.pop()[0])\n return tuple(result)\n\n\ndef breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n result.append(arg)\n return tuple(result)\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef breadthIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg\n\n\ndef preorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a preorder expansion of args \"\"\"\n if limit:\n for arg in iterable:\n if testFn(arg):\n for a in preorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n yield arg\n else:\n for arg in iterable:\n yield arg\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\n<function token>\n\n\ndef listForNone(res):\n \"\"\"returns an empty list when the result is None\"\"\"\n if res is None:\n return []\n return res\n\n\ndef pairIter(sequence):\n \"\"\"\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n \"\"\"\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\ndef reorder(x, indexList=[], indexDict={}):\n \"\"\"\n Reorder a list based upon a list of positional indices and/or a dictionary\n of fromIndex:toIndex.\n\n >>> l = ['zero', 'one', 'two', 'three', 'four', 'five', 'six']\n >>> reorder( l, [1, 4] ) # based on positional indices: 0-->1, 1-->4\n ['one', 'four', 'zero', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, None, 4] ) # None can be used as a place-holder\n ['one', 'zero', 'four', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, 4], {5:6} ) # remapping via dictionary: move the value at index 5 to index 6\n ['one', 'four', 'zero', 'two', 'three', 'six', 'five']\n \"\"\"\n x = list(x)\n num = len(x)\n popCount = 0\n indexValDict = {}\n for i, index in enumerate(indexList):\n if index is not None:\n val = x.pop(index - popCount)\n assert index not in indexDict, indexDict\n indexValDict[i] = val\n popCount += 1\n for k, v in list(indexDict.items()):\n indexValDict[v] = x.pop(k - popCount)\n popCount += 1\n newlist = []\n for i in range(num):\n try:\n val = indexValDict[i]\n except KeyError:\n val = x.pop(0)\n newlist.append(val)\n return newlist\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\ndef compareCascadingDicts(dict1, dict2, encoding=None, useAddedKeys=False,\n useChangedKeys=False):\n \"\"\"compares two cascading dicts\n\n Parameters\n ----------\n dict1 : Union[dict, list, tuple]\n the first object to compare\n dict2 : Union[dict, list, tuple]\n the second object to compare\n encoding : Union[str, bool, None]\n controls how comparisons are made when one value is a str, and one is a\n unicode; if None, then comparisons are simply made with == (so ascii\n characters will compare equally); if the value False, then unicode and\n str are ALWAYS considered different - ie, u'foo' and 'foo' would not be\n considered equal; otherwise, it should be the name of a unicode\n encoding, which will be applied to the unicode string before comparing\n useAddedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'AddedKey' objects\n are used for keys which exist in dict2 but not in dict1; this allows\n a user to distinguish, purely by inspecting the differences dict, which\n keys are brand new, versus merely changed; mergeCascadingDicts will\n treat AddedKey objects exactly the same as though they were their\n contents - ie, useAddedKeys should make no difference to the behavior\n of mergeCascadingDicts\n useChangedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'ChangedKey'\n objects are used for keys which exist in both dict1 and dict2, but with\n different values\n\n Returns\n -------\n both : `set`\n keys that were present in both (non-recursively)\n (both, only1, and only2 should be discrete partitions of all the keys\n present in both dict1 and dict2)\n only1 : `set`\n keys that were present in only1 (non-recursively)\n only2 : `set`\n keys that were present in only2 (non-recursively)\n differences : `dict`\n recursive sparse dict containing information that was 'different' in\n dict2 - either not present in dict1, or having a different value in\n dict2, or removed in dict2 (in which case an instance of 'RemovedKey'\n will be set as the value in differences)\n Values that are different, and both dictionaries, will themselves have\n sparse entries, showing only what is different\n The return value should be such that if you do if you merge the\n differences with d1, you will get d2.\n \"\"\"\n areSets = False\n if isinstance(dict1, set) and isinstance(dict2, set):\n areSets = True\n v1 = dict1\n v2 = dict2\n else:\n if isinstance(dict1, (list, tuple)):\n dict1 = dict(enumerate(dict1))\n if isinstance(dict2, (list, tuple)):\n dict2 = dict(enumerate(dict2))\n v1 = set(dict1)\n v2 = set(dict2)\n both = v1 & v2\n only1 = v1 - both\n only2 = v2 - both\n if areSets:\n if useAddedKeys:\n differences = set(AddedKey(key) for key in only2)\n else:\n differences = set(only2)\n differences.update(RemovedKey(key) for key in only1)\n else:\n recurseTypes = dict, list, tuple, set\n if PY2:\n strUnicode = set([str, unicode])\n if useAddedKeys:\n differences = dict((key, AddedKey(dict2[key])) for key in only2)\n else:\n differences = dict((key, dict2[key]) for key in only2)\n differences.update((key, RemovedKey(dict1[key])) for key in only1)\n for key in both:\n val1 = dict1[key]\n val2 = dict2[key]\n areRecurseTypes = isinstance(val1, recurseTypes) and isinstance(\n val2, recurseTypes)\n if areRecurseTypes:\n if encoding is False or val1 != val2:\n subDiffs = compareCascadingDicts(val1, val2, encoding=\n encoding, useAddedKeys=useAddedKeys, useChangedKeys\n =useChangedKeys)[-1]\n if subDiffs:\n differences[key] = subDiffs\n else:\n if PY2 and set([type(val1), type(val2)]) == strUnicode:\n if encoding is False:\n equal = False\n elif encoding is None:\n equal = val1 == val2\n else:\n if type(val1) == str:\n strVal = val2\n unicodeVal = val1\n else:\n strVal = val1\n unicodeVal = val2\n try:\n encoded = unicodeVal.encode(encoding)\n except UnicodeEncodeError:\n equal = False\n else:\n equal = encoded == strVal\n else:\n equal = val1 == val2\n if not equal:\n if useChangedKeys:\n differences[key] = ChangedKey(val1, val2)\n else:\n differences[key] = val2\n return both, only1, only2, differences\n\n\ndef mergeCascadingDicts(from_dict, to_dict, allowDictToListMerging=False,\n allowNewListMembers=False):\n \"\"\"\n recursively update to_dict with values from from_dict.\n\n if any entries in 'from_dict' are instances of the class RemovedKey,\n then the key containing that value will be removed from to_dict\n\n if allowDictToListMerging is True, then if to_dict contains a list,\n from_dict can contain a dictionary with int keys which can be used to\n sparsely update the list.\n\n if allowNewListMembers is True, and allowDictToListMerging is also True,\n then if merging an index into a list that currently isn't long enough to\n contain that index, then the list will be extended to be long enough (with\n None inserted in any intermediate indices)\n\n Note: if using RemovedKey objects and allowDictToList merging, then only\n indices greater than all of any indices updated / added should be removed,\n because the order in which items are updated / removed is indeterminate.\n \"\"\"\n listMerge = allowDictToListMerging and isinstance(to_dict, list)\n if listMerge:\n contains = lambda key: isinstance(key, int) and 0 <= key < len(to_dict)\n else:\n contains = lambda key: key in to_dict\n for key, from_val in from_dict.items():\n if contains(key):\n if isinstance(from_val, RemovedKey):\n del to_dict[key]\n continue\n elif isinstance(from_val, (AddedKey, ChangedKey)):\n from_val = from_val.newVal\n to_val = to_dict[key]\n if hasattr(from_val, 'items') and (hasattr(to_val, 'items') or \n allowDictToListMerging and isinstance(to_val, list)):\n mergeCascadingDicts(from_val, to_val, allowDictToListMerging)\n else:\n to_dict[key] = from_val\n else:\n if isinstance(from_val, RemovedKey):\n continue\n if listMerge and allowNewListMembers and key >= len(to_dict):\n to_dict.extend((None,) * (key + 1 - len(to_dict)))\n to_dict[key] = from_val\n\n\ndef setCascadingDictItem(dict, keys, value):\n currentDict = dict\n for key in keys[:-1]:\n if key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n currentDict[keys[-1]] = value\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\ndef deepPatch(input, predicate, changer):\n \"\"\"Recursively traverses the items stored in input (for basic data types:\n lists, tuples, sets, and dicts), calling changer on all items for which\n predicate returns true, and then replacing the original item with the\n changed item.\n\n Changes will be made in place when possible. The patched input (which may\n be a new object, or the original object, if altered in place) is returned.\n \"\"\"\n return deepPatchAltered(input, predicate, changer)[0]\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\ndef sequenceToSlices(intList, sort=True):\n \"\"\"convert a sequence of integers into a tuple of slice objects\"\"\"\n slices = []\n if intList:\n if sort:\n intList = sorted(intList)\n start = intList[0]\n stop = None\n step = None\n lastStep = None\n lastVal = start\n for curr in intList[1:]:\n curr = int(curr)\n thisStep = curr - lastVal\n if lastStep is None:\n pass\n elif thisStep > 0 and thisStep == lastStep:\n step = thisStep\n else:\n if step is not None:\n if step == 1:\n newslice = slice(start, lastVal + 1, None)\n else:\n newslice = slice(start, lastVal + 1, step)\n thisStep = None\n start = curr\n elif lastStep == 1:\n newslice = slice(start, lastVal + 1, lastStep)\n thisStep = None\n start = curr\n else:\n newslice = slice(start, stop + 1)\n start = lastVal\n slices.append(newslice)\n stop = None\n step = None\n lastStep = thisStep\n stop = lastVal\n lastVal = curr\n if step is not None:\n if step == 1:\n newslice = slice(start, lastVal + 1, None)\n else:\n newslice = slice(start, lastVal + 1, step)\n slices.append(newslice)\n elif lastStep == 1:\n slices.append(slice(start, lastVal + 1, lastStep))\n else:\n slices.append(slice(start, start + 1))\n if lastStep is not None:\n slices.append(slice(lastVal, lastVal + 1))\n return slices\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\ndef getImportableObject(importableName):\n import importlib\n if '.' in importableName:\n modulename, objName = importableName.rsplit('.', 1)\n else:\n modulename = 'builtins'\n if PY2:\n modulename = '__builtin__'\n objName = importableName\n moduleobj = importlib.import_module(modulename)\n return getattr(moduleobj, objName)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n\n\ndef isIterable(obj):\n \"\"\"\n Returns True if an object is iterable and not a string or ProxyUnicode type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n if isinstance(obj, basestring):\n return False\n elif isinstance(obj, ProxyUnicode):\n return False\n try:\n iter(obj)\n except TypeError:\n return False\n else:\n return True\n\n\ndef isScalar(obj):\n \"\"\"\n Returns True if an object is a number or complex type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number) and not isinstance(obj, complex)\n\n\ndef isNumeric(obj):\n \"\"\"\n Returns True if an object is a number type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number)\n\n\ndef isSequence(obj):\n \"\"\"\n same as `operator.isSequenceType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Sequence)\n\n\ndef isMapping(obj):\n \"\"\"\n Returns True if an object is a mapping (dictionary) type, otherwise returns False.\n\n same as `operator.isMappingType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Mapping)\n\n\n<assignment token>\n\n\ndef convertListArgs(args):\n if len(args) == 1 and isIterable(args[0]):\n return tuple(args[0])\n return args\n\n\ndef expandArgs(*args, **kwargs):\n \"\"\"\n 'Flattens' the arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 )\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] )\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1)\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True)\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True)\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True)\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n\n Note that with default depth (unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of doing a preorder traversal : [k for k in iter(theTree)] \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _expandArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _expandArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n return postorderArgs(limit, _expandArgsTest, *args)\n elif breadth:\n return breadthArgs(limit, _expandArgsTest, *args)\n else:\n return preorderArgs(limit, _expandArgsTest, *args)\n\n\ndef preorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a preorder expansion of args \"\"\"\n stack = [(x, 0) for x in args]\n result = _deque()\n while stack:\n arg, level = stack.pop()\n if testFn(arg) and level < limit:\n stack += [(x, level + 1) for x in arg]\n else:\n result.appendleft(arg)\n return tuple(result)\n\n\ndef postorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a postorder expansion of args \"\"\"\n if len(args) == 1:\n return args[0],\n else:\n deq = _deque((x, 0) for x in args)\n stack = []\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n deq = _deque([(x, level + 1) for x in arg] + list(deq))\n elif stack:\n while stack and level <= stack[-1][1]:\n result.append(stack.pop()[0])\n stack.append((arg, level))\n else:\n stack.append((arg, level))\n while stack:\n result.append(stack.pop()[0])\n return tuple(result)\n\n\ndef breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n result.append(arg)\n return tuple(result)\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef breadthIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg\n\n\ndef preorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a preorder expansion of args \"\"\"\n if limit:\n for arg in iterable:\n if testFn(arg):\n for a in preorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n yield arg\n else:\n for arg in iterable:\n yield arg\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef pairIter(sequence):\n \"\"\"\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n \"\"\"\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\ndef reorder(x, indexList=[], indexDict={}):\n \"\"\"\n Reorder a list based upon a list of positional indices and/or a dictionary\n of fromIndex:toIndex.\n\n >>> l = ['zero', 'one', 'two', 'three', 'four', 'five', 'six']\n >>> reorder( l, [1, 4] ) # based on positional indices: 0-->1, 1-->4\n ['one', 'four', 'zero', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, None, 4] ) # None can be used as a place-holder\n ['one', 'zero', 'four', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, 4], {5:6} ) # remapping via dictionary: move the value at index 5 to index 6\n ['one', 'four', 'zero', 'two', 'three', 'six', 'five']\n \"\"\"\n x = list(x)\n num = len(x)\n popCount = 0\n indexValDict = {}\n for i, index in enumerate(indexList):\n if index is not None:\n val = x.pop(index - popCount)\n assert index not in indexDict, indexDict\n indexValDict[i] = val\n popCount += 1\n for k, v in list(indexDict.items()):\n indexValDict[v] = x.pop(k - popCount)\n popCount += 1\n newlist = []\n for i in range(num):\n try:\n val = indexValDict[i]\n except KeyError:\n val = x.pop(0)\n newlist.append(val)\n return newlist\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\ndef compareCascadingDicts(dict1, dict2, encoding=None, useAddedKeys=False,\n useChangedKeys=False):\n \"\"\"compares two cascading dicts\n\n Parameters\n ----------\n dict1 : Union[dict, list, tuple]\n the first object to compare\n dict2 : Union[dict, list, tuple]\n the second object to compare\n encoding : Union[str, bool, None]\n controls how comparisons are made when one value is a str, and one is a\n unicode; if None, then comparisons are simply made with == (so ascii\n characters will compare equally); if the value False, then unicode and\n str are ALWAYS considered different - ie, u'foo' and 'foo' would not be\n considered equal; otherwise, it should be the name of a unicode\n encoding, which will be applied to the unicode string before comparing\n useAddedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'AddedKey' objects\n are used for keys which exist in dict2 but not in dict1; this allows\n a user to distinguish, purely by inspecting the differences dict, which\n keys are brand new, versus merely changed; mergeCascadingDicts will\n treat AddedKey objects exactly the same as though they were their\n contents - ie, useAddedKeys should make no difference to the behavior\n of mergeCascadingDicts\n useChangedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'ChangedKey'\n objects are used for keys which exist in both dict1 and dict2, but with\n different values\n\n Returns\n -------\n both : `set`\n keys that were present in both (non-recursively)\n (both, only1, and only2 should be discrete partitions of all the keys\n present in both dict1 and dict2)\n only1 : `set`\n keys that were present in only1 (non-recursively)\n only2 : `set`\n keys that were present in only2 (non-recursively)\n differences : `dict`\n recursive sparse dict containing information that was 'different' in\n dict2 - either not present in dict1, or having a different value in\n dict2, or removed in dict2 (in which case an instance of 'RemovedKey'\n will be set as the value in differences)\n Values that are different, and both dictionaries, will themselves have\n sparse entries, showing only what is different\n The return value should be such that if you do if you merge the\n differences with d1, you will get d2.\n \"\"\"\n areSets = False\n if isinstance(dict1, set) and isinstance(dict2, set):\n areSets = True\n v1 = dict1\n v2 = dict2\n else:\n if isinstance(dict1, (list, tuple)):\n dict1 = dict(enumerate(dict1))\n if isinstance(dict2, (list, tuple)):\n dict2 = dict(enumerate(dict2))\n v1 = set(dict1)\n v2 = set(dict2)\n both = v1 & v2\n only1 = v1 - both\n only2 = v2 - both\n if areSets:\n if useAddedKeys:\n differences = set(AddedKey(key) for key in only2)\n else:\n differences = set(only2)\n differences.update(RemovedKey(key) for key in only1)\n else:\n recurseTypes = dict, list, tuple, set\n if PY2:\n strUnicode = set([str, unicode])\n if useAddedKeys:\n differences = dict((key, AddedKey(dict2[key])) for key in only2)\n else:\n differences = dict((key, dict2[key]) for key in only2)\n differences.update((key, RemovedKey(dict1[key])) for key in only1)\n for key in both:\n val1 = dict1[key]\n val2 = dict2[key]\n areRecurseTypes = isinstance(val1, recurseTypes) and isinstance(\n val2, recurseTypes)\n if areRecurseTypes:\n if encoding is False or val1 != val2:\n subDiffs = compareCascadingDicts(val1, val2, encoding=\n encoding, useAddedKeys=useAddedKeys, useChangedKeys\n =useChangedKeys)[-1]\n if subDiffs:\n differences[key] = subDiffs\n else:\n if PY2 and set([type(val1), type(val2)]) == strUnicode:\n if encoding is False:\n equal = False\n elif encoding is None:\n equal = val1 == val2\n else:\n if type(val1) == str:\n strVal = val2\n unicodeVal = val1\n else:\n strVal = val1\n unicodeVal = val2\n try:\n encoded = unicodeVal.encode(encoding)\n except UnicodeEncodeError:\n equal = False\n else:\n equal = encoded == strVal\n else:\n equal = val1 == val2\n if not equal:\n if useChangedKeys:\n differences[key] = ChangedKey(val1, val2)\n else:\n differences[key] = val2\n return both, only1, only2, differences\n\n\ndef mergeCascadingDicts(from_dict, to_dict, allowDictToListMerging=False,\n allowNewListMembers=False):\n \"\"\"\n recursively update to_dict with values from from_dict.\n\n if any entries in 'from_dict' are instances of the class RemovedKey,\n then the key containing that value will be removed from to_dict\n\n if allowDictToListMerging is True, then if to_dict contains a list,\n from_dict can contain a dictionary with int keys which can be used to\n sparsely update the list.\n\n if allowNewListMembers is True, and allowDictToListMerging is also True,\n then if merging an index into a list that currently isn't long enough to\n contain that index, then the list will be extended to be long enough (with\n None inserted in any intermediate indices)\n\n Note: if using RemovedKey objects and allowDictToList merging, then only\n indices greater than all of any indices updated / added should be removed,\n because the order in which items are updated / removed is indeterminate.\n \"\"\"\n listMerge = allowDictToListMerging and isinstance(to_dict, list)\n if listMerge:\n contains = lambda key: isinstance(key, int) and 0 <= key < len(to_dict)\n else:\n contains = lambda key: key in to_dict\n for key, from_val in from_dict.items():\n if contains(key):\n if isinstance(from_val, RemovedKey):\n del to_dict[key]\n continue\n elif isinstance(from_val, (AddedKey, ChangedKey)):\n from_val = from_val.newVal\n to_val = to_dict[key]\n if hasattr(from_val, 'items') and (hasattr(to_val, 'items') or \n allowDictToListMerging and isinstance(to_val, list)):\n mergeCascadingDicts(from_val, to_val, allowDictToListMerging)\n else:\n to_dict[key] = from_val\n else:\n if isinstance(from_val, RemovedKey):\n continue\n if listMerge and allowNewListMembers and key >= len(to_dict):\n to_dict.extend((None,) * (key + 1 - len(to_dict)))\n to_dict[key] = from_val\n\n\ndef setCascadingDictItem(dict, keys, value):\n currentDict = dict\n for key in keys[:-1]:\n if key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n currentDict[keys[-1]] = value\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\ndef deepPatch(input, predicate, changer):\n \"\"\"Recursively traverses the items stored in input (for basic data types:\n lists, tuples, sets, and dicts), calling changer on all items for which\n predicate returns true, and then replacing the original item with the\n changed item.\n\n Changes will be made in place when possible. The patched input (which may\n be a new object, or the original object, if altered in place) is returned.\n \"\"\"\n return deepPatchAltered(input, predicate, changer)[0]\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\ndef sequenceToSlices(intList, sort=True):\n \"\"\"convert a sequence of integers into a tuple of slice objects\"\"\"\n slices = []\n if intList:\n if sort:\n intList = sorted(intList)\n start = intList[0]\n stop = None\n step = None\n lastStep = None\n lastVal = start\n for curr in intList[1:]:\n curr = int(curr)\n thisStep = curr - lastVal\n if lastStep is None:\n pass\n elif thisStep > 0 and thisStep == lastStep:\n step = thisStep\n else:\n if step is not None:\n if step == 1:\n newslice = slice(start, lastVal + 1, None)\n else:\n newslice = slice(start, lastVal + 1, step)\n thisStep = None\n start = curr\n elif lastStep == 1:\n newslice = slice(start, lastVal + 1, lastStep)\n thisStep = None\n start = curr\n else:\n newslice = slice(start, stop + 1)\n start = lastVal\n slices.append(newslice)\n stop = None\n step = None\n lastStep = thisStep\n stop = lastVal\n lastVal = curr\n if step is not None:\n if step == 1:\n newslice = slice(start, lastVal + 1, None)\n else:\n newslice = slice(start, lastVal + 1, step)\n slices.append(newslice)\n elif lastStep == 1:\n slices.append(slice(start, lastVal + 1, lastStep))\n else:\n slices.append(slice(start, start + 1))\n if lastStep is not None:\n slices.append(slice(lastVal, lastVal + 1))\n return slices\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\ndef getImportableObject(importableName):\n import importlib\n if '.' in importableName:\n modulename, objName = importableName.rsplit('.', 1)\n else:\n modulename = 'builtins'\n if PY2:\n modulename = '__builtin__'\n objName = importableName\n moduleobj = importlib.import_module(modulename)\n return getattr(moduleobj, objName)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n\n\ndef isScalar(obj):\n \"\"\"\n Returns True if an object is a number or complex type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number) and not isinstance(obj, complex)\n\n\ndef isNumeric(obj):\n \"\"\"\n Returns True if an object is a number type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number)\n\n\ndef isSequence(obj):\n \"\"\"\n same as `operator.isSequenceType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Sequence)\n\n\ndef isMapping(obj):\n \"\"\"\n Returns True if an object is a mapping (dictionary) type, otherwise returns False.\n\n same as `operator.isMappingType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Mapping)\n\n\n<assignment token>\n\n\ndef convertListArgs(args):\n if len(args) == 1 and isIterable(args[0]):\n return tuple(args[0])\n return args\n\n\ndef expandArgs(*args, **kwargs):\n \"\"\"\n 'Flattens' the arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 )\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] )\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1)\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True)\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True)\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True)\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n\n Note that with default depth (unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of doing a preorder traversal : [k for k in iter(theTree)] \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _expandArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _expandArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n return postorderArgs(limit, _expandArgsTest, *args)\n elif breadth:\n return breadthArgs(limit, _expandArgsTest, *args)\n else:\n return preorderArgs(limit, _expandArgsTest, *args)\n\n\ndef preorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a preorder expansion of args \"\"\"\n stack = [(x, 0) for x in args]\n result = _deque()\n while stack:\n arg, level = stack.pop()\n if testFn(arg) and level < limit:\n stack += [(x, level + 1) for x in arg]\n else:\n result.appendleft(arg)\n return tuple(result)\n\n\ndef postorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a postorder expansion of args \"\"\"\n if len(args) == 1:\n return args[0],\n else:\n deq = _deque((x, 0) for x in args)\n stack = []\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n deq = _deque([(x, level + 1) for x in arg] + list(deq))\n elif stack:\n while stack and level <= stack[-1][1]:\n result.append(stack.pop()[0])\n stack.append((arg, level))\n else:\n stack.append((arg, level))\n while stack:\n result.append(stack.pop()[0])\n return tuple(result)\n\n\ndef breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n result.append(arg)\n return tuple(result)\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef breadthIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg\n\n\ndef preorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a preorder expansion of args \"\"\"\n if limit:\n for arg in iterable:\n if testFn(arg):\n for a in preorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n yield arg\n else:\n for arg in iterable:\n yield arg\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef pairIter(sequence):\n \"\"\"\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n \"\"\"\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\ndef reorder(x, indexList=[], indexDict={}):\n \"\"\"\n Reorder a list based upon a list of positional indices and/or a dictionary\n of fromIndex:toIndex.\n\n >>> l = ['zero', 'one', 'two', 'three', 'four', 'five', 'six']\n >>> reorder( l, [1, 4] ) # based on positional indices: 0-->1, 1-->4\n ['one', 'four', 'zero', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, None, 4] ) # None can be used as a place-holder\n ['one', 'zero', 'four', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, 4], {5:6} ) # remapping via dictionary: move the value at index 5 to index 6\n ['one', 'four', 'zero', 'two', 'three', 'six', 'five']\n \"\"\"\n x = list(x)\n num = len(x)\n popCount = 0\n indexValDict = {}\n for i, index in enumerate(indexList):\n if index is not None:\n val = x.pop(index - popCount)\n assert index not in indexDict, indexDict\n indexValDict[i] = val\n popCount += 1\n for k, v in list(indexDict.items()):\n indexValDict[v] = x.pop(k - popCount)\n popCount += 1\n newlist = []\n for i in range(num):\n try:\n val = indexValDict[i]\n except KeyError:\n val = x.pop(0)\n newlist.append(val)\n return newlist\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\ndef compareCascadingDicts(dict1, dict2, encoding=None, useAddedKeys=False,\n useChangedKeys=False):\n \"\"\"compares two cascading dicts\n\n Parameters\n ----------\n dict1 : Union[dict, list, tuple]\n the first object to compare\n dict2 : Union[dict, list, tuple]\n the second object to compare\n encoding : Union[str, bool, None]\n controls how comparisons are made when one value is a str, and one is a\n unicode; if None, then comparisons are simply made with == (so ascii\n characters will compare equally); if the value False, then unicode and\n str are ALWAYS considered different - ie, u'foo' and 'foo' would not be\n considered equal; otherwise, it should be the name of a unicode\n encoding, which will be applied to the unicode string before comparing\n useAddedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'AddedKey' objects\n are used for keys which exist in dict2 but not in dict1; this allows\n a user to distinguish, purely by inspecting the differences dict, which\n keys are brand new, versus merely changed; mergeCascadingDicts will\n treat AddedKey objects exactly the same as though they were their\n contents - ie, useAddedKeys should make no difference to the behavior\n of mergeCascadingDicts\n useChangedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'ChangedKey'\n objects are used for keys which exist in both dict1 and dict2, but with\n different values\n\n Returns\n -------\n both : `set`\n keys that were present in both (non-recursively)\n (both, only1, and only2 should be discrete partitions of all the keys\n present in both dict1 and dict2)\n only1 : `set`\n keys that were present in only1 (non-recursively)\n only2 : `set`\n keys that were present in only2 (non-recursively)\n differences : `dict`\n recursive sparse dict containing information that was 'different' in\n dict2 - either not present in dict1, or having a different value in\n dict2, or removed in dict2 (in which case an instance of 'RemovedKey'\n will be set as the value in differences)\n Values that are different, and both dictionaries, will themselves have\n sparse entries, showing only what is different\n The return value should be such that if you do if you merge the\n differences with d1, you will get d2.\n \"\"\"\n areSets = False\n if isinstance(dict1, set) and isinstance(dict2, set):\n areSets = True\n v1 = dict1\n v2 = dict2\n else:\n if isinstance(dict1, (list, tuple)):\n dict1 = dict(enumerate(dict1))\n if isinstance(dict2, (list, tuple)):\n dict2 = dict(enumerate(dict2))\n v1 = set(dict1)\n v2 = set(dict2)\n both = v1 & v2\n only1 = v1 - both\n only2 = v2 - both\n if areSets:\n if useAddedKeys:\n differences = set(AddedKey(key) for key in only2)\n else:\n differences = set(only2)\n differences.update(RemovedKey(key) for key in only1)\n else:\n recurseTypes = dict, list, tuple, set\n if PY2:\n strUnicode = set([str, unicode])\n if useAddedKeys:\n differences = dict((key, AddedKey(dict2[key])) for key in only2)\n else:\n differences = dict((key, dict2[key]) for key in only2)\n differences.update((key, RemovedKey(dict1[key])) for key in only1)\n for key in both:\n val1 = dict1[key]\n val2 = dict2[key]\n areRecurseTypes = isinstance(val1, recurseTypes) and isinstance(\n val2, recurseTypes)\n if areRecurseTypes:\n if encoding is False or val1 != val2:\n subDiffs = compareCascadingDicts(val1, val2, encoding=\n encoding, useAddedKeys=useAddedKeys, useChangedKeys\n =useChangedKeys)[-1]\n if subDiffs:\n differences[key] = subDiffs\n else:\n if PY2 and set([type(val1), type(val2)]) == strUnicode:\n if encoding is False:\n equal = False\n elif encoding is None:\n equal = val1 == val2\n else:\n if type(val1) == str:\n strVal = val2\n unicodeVal = val1\n else:\n strVal = val1\n unicodeVal = val2\n try:\n encoded = unicodeVal.encode(encoding)\n except UnicodeEncodeError:\n equal = False\n else:\n equal = encoded == strVal\n else:\n equal = val1 == val2\n if not equal:\n if useChangedKeys:\n differences[key] = ChangedKey(val1, val2)\n else:\n differences[key] = val2\n return both, only1, only2, differences\n\n\ndef mergeCascadingDicts(from_dict, to_dict, allowDictToListMerging=False,\n allowNewListMembers=False):\n \"\"\"\n recursively update to_dict with values from from_dict.\n\n if any entries in 'from_dict' are instances of the class RemovedKey,\n then the key containing that value will be removed from to_dict\n\n if allowDictToListMerging is True, then if to_dict contains a list,\n from_dict can contain a dictionary with int keys which can be used to\n sparsely update the list.\n\n if allowNewListMembers is True, and allowDictToListMerging is also True,\n then if merging an index into a list that currently isn't long enough to\n contain that index, then the list will be extended to be long enough (with\n None inserted in any intermediate indices)\n\n Note: if using RemovedKey objects and allowDictToList merging, then only\n indices greater than all of any indices updated / added should be removed,\n because the order in which items are updated / removed is indeterminate.\n \"\"\"\n listMerge = allowDictToListMerging and isinstance(to_dict, list)\n if listMerge:\n contains = lambda key: isinstance(key, int) and 0 <= key < len(to_dict)\n else:\n contains = lambda key: key in to_dict\n for key, from_val in from_dict.items():\n if contains(key):\n if isinstance(from_val, RemovedKey):\n del to_dict[key]\n continue\n elif isinstance(from_val, (AddedKey, ChangedKey)):\n from_val = from_val.newVal\n to_val = to_dict[key]\n if hasattr(from_val, 'items') and (hasattr(to_val, 'items') or \n allowDictToListMerging and isinstance(to_val, list)):\n mergeCascadingDicts(from_val, to_val, allowDictToListMerging)\n else:\n to_dict[key] = from_val\n else:\n if isinstance(from_val, RemovedKey):\n continue\n if listMerge and allowNewListMembers and key >= len(to_dict):\n to_dict.extend((None,) * (key + 1 - len(to_dict)))\n to_dict[key] = from_val\n\n\ndef setCascadingDictItem(dict, keys, value):\n currentDict = dict\n for key in keys[:-1]:\n if key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n currentDict[keys[-1]] = value\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\ndef deepPatch(input, predicate, changer):\n \"\"\"Recursively traverses the items stored in input (for basic data types:\n lists, tuples, sets, and dicts), calling changer on all items for which\n predicate returns true, and then replacing the original item with the\n changed item.\n\n Changes will be made in place when possible. The patched input (which may\n be a new object, or the original object, if altered in place) is returned.\n \"\"\"\n return deepPatchAltered(input, predicate, changer)[0]\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\ndef sequenceToSlices(intList, sort=True):\n \"\"\"convert a sequence of integers into a tuple of slice objects\"\"\"\n slices = []\n if intList:\n if sort:\n intList = sorted(intList)\n start = intList[0]\n stop = None\n step = None\n lastStep = None\n lastVal = start\n for curr in intList[1:]:\n curr = int(curr)\n thisStep = curr - lastVal\n if lastStep is None:\n pass\n elif thisStep > 0 and thisStep == lastStep:\n step = thisStep\n else:\n if step is not None:\n if step == 1:\n newslice = slice(start, lastVal + 1, None)\n else:\n newslice = slice(start, lastVal + 1, step)\n thisStep = None\n start = curr\n elif lastStep == 1:\n newslice = slice(start, lastVal + 1, lastStep)\n thisStep = None\n start = curr\n else:\n newslice = slice(start, stop + 1)\n start = lastVal\n slices.append(newslice)\n stop = None\n step = None\n lastStep = thisStep\n stop = lastVal\n lastVal = curr\n if step is not None:\n if step == 1:\n newslice = slice(start, lastVal + 1, None)\n else:\n newslice = slice(start, lastVal + 1, step)\n slices.append(newslice)\n elif lastStep == 1:\n slices.append(slice(start, lastVal + 1, lastStep))\n else:\n slices.append(slice(start, start + 1))\n if lastStep is not None:\n slices.append(slice(lastVal, lastVal + 1))\n return slices\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\ndef getImportableObject(importableName):\n import importlib\n if '.' in importableName:\n modulename, objName = importableName.rsplit('.', 1)\n else:\n modulename = 'builtins'\n if PY2:\n modulename = '__builtin__'\n objName = importableName\n moduleobj = importlib.import_module(modulename)\n return getattr(moduleobj, objName)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n\n\ndef isScalar(obj):\n \"\"\"\n Returns True if an object is a number or complex type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number) and not isinstance(obj, complex)\n\n\ndef isNumeric(obj):\n \"\"\"\n Returns True if an object is a number type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number)\n\n\ndef isSequence(obj):\n \"\"\"\n same as `operator.isSequenceType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Sequence)\n\n\ndef isMapping(obj):\n \"\"\"\n Returns True if an object is a mapping (dictionary) type, otherwise returns False.\n\n same as `operator.isMappingType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Mapping)\n\n\n<assignment token>\n\n\ndef convertListArgs(args):\n if len(args) == 1 and isIterable(args[0]):\n return tuple(args[0])\n return args\n\n\ndef expandArgs(*args, **kwargs):\n \"\"\"\n 'Flattens' the arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 )\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] )\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1)\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True)\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True)\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True)\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n\n Note that with default depth (unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of doing a preorder traversal : [k for k in iter(theTree)] \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _expandArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _expandArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n return postorderArgs(limit, _expandArgsTest, *args)\n elif breadth:\n return breadthArgs(limit, _expandArgsTest, *args)\n else:\n return preorderArgs(limit, _expandArgsTest, *args)\n\n\ndef preorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a preorder expansion of args \"\"\"\n stack = [(x, 0) for x in args]\n result = _deque()\n while stack:\n arg, level = stack.pop()\n if testFn(arg) and level < limit:\n stack += [(x, level + 1) for x in arg]\n else:\n result.appendleft(arg)\n return tuple(result)\n\n\ndef postorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a postorder expansion of args \"\"\"\n if len(args) == 1:\n return args[0],\n else:\n deq = _deque((x, 0) for x in args)\n stack = []\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n deq = _deque([(x, level + 1) for x in arg] + list(deq))\n elif stack:\n while stack and level <= stack[-1][1]:\n result.append(stack.pop()[0])\n stack.append((arg, level))\n else:\n stack.append((arg, level))\n while stack:\n result.append(stack.pop()[0])\n return tuple(result)\n\n\ndef breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n result.append(arg)\n return tuple(result)\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef breadthIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg\n\n\ndef preorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a preorder expansion of args \"\"\"\n if limit:\n for arg in iterable:\n if testFn(arg):\n for a in preorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n yield arg\n else:\n for arg in iterable:\n yield arg\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef pairIter(sequence):\n \"\"\"\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n \"\"\"\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\ndef reorder(x, indexList=[], indexDict={}):\n \"\"\"\n Reorder a list based upon a list of positional indices and/or a dictionary\n of fromIndex:toIndex.\n\n >>> l = ['zero', 'one', 'two', 'three', 'four', 'five', 'six']\n >>> reorder( l, [1, 4] ) # based on positional indices: 0-->1, 1-->4\n ['one', 'four', 'zero', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, None, 4] ) # None can be used as a place-holder\n ['one', 'zero', 'four', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, 4], {5:6} ) # remapping via dictionary: move the value at index 5 to index 6\n ['one', 'four', 'zero', 'two', 'three', 'six', 'five']\n \"\"\"\n x = list(x)\n num = len(x)\n popCount = 0\n indexValDict = {}\n for i, index in enumerate(indexList):\n if index is not None:\n val = x.pop(index - popCount)\n assert index not in indexDict, indexDict\n indexValDict[i] = val\n popCount += 1\n for k, v in list(indexDict.items()):\n indexValDict[v] = x.pop(k - popCount)\n popCount += 1\n newlist = []\n for i in range(num):\n try:\n val = indexValDict[i]\n except KeyError:\n val = x.pop(0)\n newlist.append(val)\n return newlist\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\ndef compareCascadingDicts(dict1, dict2, encoding=None, useAddedKeys=False,\n useChangedKeys=False):\n \"\"\"compares two cascading dicts\n\n Parameters\n ----------\n dict1 : Union[dict, list, tuple]\n the first object to compare\n dict2 : Union[dict, list, tuple]\n the second object to compare\n encoding : Union[str, bool, None]\n controls how comparisons are made when one value is a str, and one is a\n unicode; if None, then comparisons are simply made with == (so ascii\n characters will compare equally); if the value False, then unicode and\n str are ALWAYS considered different - ie, u'foo' and 'foo' would not be\n considered equal; otherwise, it should be the name of a unicode\n encoding, which will be applied to the unicode string before comparing\n useAddedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'AddedKey' objects\n are used for keys which exist in dict2 but not in dict1; this allows\n a user to distinguish, purely by inspecting the differences dict, which\n keys are brand new, versus merely changed; mergeCascadingDicts will\n treat AddedKey objects exactly the same as though they were their\n contents - ie, useAddedKeys should make no difference to the behavior\n of mergeCascadingDicts\n useChangedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'ChangedKey'\n objects are used for keys which exist in both dict1 and dict2, but with\n different values\n\n Returns\n -------\n both : `set`\n keys that were present in both (non-recursively)\n (both, only1, and only2 should be discrete partitions of all the keys\n present in both dict1 and dict2)\n only1 : `set`\n keys that were present in only1 (non-recursively)\n only2 : `set`\n keys that were present in only2 (non-recursively)\n differences : `dict`\n recursive sparse dict containing information that was 'different' in\n dict2 - either not present in dict1, or having a different value in\n dict2, or removed in dict2 (in which case an instance of 'RemovedKey'\n will be set as the value in differences)\n Values that are different, and both dictionaries, will themselves have\n sparse entries, showing only what is different\n The return value should be such that if you do if you merge the\n differences with d1, you will get d2.\n \"\"\"\n areSets = False\n if isinstance(dict1, set) and isinstance(dict2, set):\n areSets = True\n v1 = dict1\n v2 = dict2\n else:\n if isinstance(dict1, (list, tuple)):\n dict1 = dict(enumerate(dict1))\n if isinstance(dict2, (list, tuple)):\n dict2 = dict(enumerate(dict2))\n v1 = set(dict1)\n v2 = set(dict2)\n both = v1 & v2\n only1 = v1 - both\n only2 = v2 - both\n if areSets:\n if useAddedKeys:\n differences = set(AddedKey(key) for key in only2)\n else:\n differences = set(only2)\n differences.update(RemovedKey(key) for key in only1)\n else:\n recurseTypes = dict, list, tuple, set\n if PY2:\n strUnicode = set([str, unicode])\n if useAddedKeys:\n differences = dict((key, AddedKey(dict2[key])) for key in only2)\n else:\n differences = dict((key, dict2[key]) for key in only2)\n differences.update((key, RemovedKey(dict1[key])) for key in only1)\n for key in both:\n val1 = dict1[key]\n val2 = dict2[key]\n areRecurseTypes = isinstance(val1, recurseTypes) and isinstance(\n val2, recurseTypes)\n if areRecurseTypes:\n if encoding is False or val1 != val2:\n subDiffs = compareCascadingDicts(val1, val2, encoding=\n encoding, useAddedKeys=useAddedKeys, useChangedKeys\n =useChangedKeys)[-1]\n if subDiffs:\n differences[key] = subDiffs\n else:\n if PY2 and set([type(val1), type(val2)]) == strUnicode:\n if encoding is False:\n equal = False\n elif encoding is None:\n equal = val1 == val2\n else:\n if type(val1) == str:\n strVal = val2\n unicodeVal = val1\n else:\n strVal = val1\n unicodeVal = val2\n try:\n encoded = unicodeVal.encode(encoding)\n except UnicodeEncodeError:\n equal = False\n else:\n equal = encoded == strVal\n else:\n equal = val1 == val2\n if not equal:\n if useChangedKeys:\n differences[key] = ChangedKey(val1, val2)\n else:\n differences[key] = val2\n return both, only1, only2, differences\n\n\ndef mergeCascadingDicts(from_dict, to_dict, allowDictToListMerging=False,\n allowNewListMembers=False):\n \"\"\"\n recursively update to_dict with values from from_dict.\n\n if any entries in 'from_dict' are instances of the class RemovedKey,\n then the key containing that value will be removed from to_dict\n\n if allowDictToListMerging is True, then if to_dict contains a list,\n from_dict can contain a dictionary with int keys which can be used to\n sparsely update the list.\n\n if allowNewListMembers is True, and allowDictToListMerging is also True,\n then if merging an index into a list that currently isn't long enough to\n contain that index, then the list will be extended to be long enough (with\n None inserted in any intermediate indices)\n\n Note: if using RemovedKey objects and allowDictToList merging, then only\n indices greater than all of any indices updated / added should be removed,\n because the order in which items are updated / removed is indeterminate.\n \"\"\"\n listMerge = allowDictToListMerging and isinstance(to_dict, list)\n if listMerge:\n contains = lambda key: isinstance(key, int) and 0 <= key < len(to_dict)\n else:\n contains = lambda key: key in to_dict\n for key, from_val in from_dict.items():\n if contains(key):\n if isinstance(from_val, RemovedKey):\n del to_dict[key]\n continue\n elif isinstance(from_val, (AddedKey, ChangedKey)):\n from_val = from_val.newVal\n to_val = to_dict[key]\n if hasattr(from_val, 'items') and (hasattr(to_val, 'items') or \n allowDictToListMerging and isinstance(to_val, list)):\n mergeCascadingDicts(from_val, to_val, allowDictToListMerging)\n else:\n to_dict[key] = from_val\n else:\n if isinstance(from_val, RemovedKey):\n continue\n if listMerge and allowNewListMembers and key >= len(to_dict):\n to_dict.extend((None,) * (key + 1 - len(to_dict)))\n to_dict[key] = from_val\n\n\ndef setCascadingDictItem(dict, keys, value):\n currentDict = dict\n for key in keys[:-1]:\n if key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n currentDict[keys[-1]] = value\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\ndef deepPatch(input, predicate, changer):\n \"\"\"Recursively traverses the items stored in input (for basic data types:\n lists, tuples, sets, and dicts), calling changer on all items for which\n predicate returns true, and then replacing the original item with the\n changed item.\n\n Changes will be made in place when possible. The patched input (which may\n be a new object, or the original object, if altered in place) is returned.\n \"\"\"\n return deepPatchAltered(input, predicate, changer)[0]\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\n<function token>\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\ndef getImportableObject(importableName):\n import importlib\n if '.' in importableName:\n modulename, objName = importableName.rsplit('.', 1)\n else:\n modulename = 'builtins'\n if PY2:\n modulename = '__builtin__'\n objName = importableName\n moduleobj = importlib.import_module(modulename)\n return getattr(moduleobj, objName)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n\n\ndef isNumeric(obj):\n \"\"\"\n Returns True if an object is a number type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number)\n\n\ndef isSequence(obj):\n \"\"\"\n same as `operator.isSequenceType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Sequence)\n\n\ndef isMapping(obj):\n \"\"\"\n Returns True if an object is a mapping (dictionary) type, otherwise returns False.\n\n same as `operator.isMappingType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Mapping)\n\n\n<assignment token>\n\n\ndef convertListArgs(args):\n if len(args) == 1 and isIterable(args[0]):\n return tuple(args[0])\n return args\n\n\ndef expandArgs(*args, **kwargs):\n \"\"\"\n 'Flattens' the arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 )\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] )\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1)\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True)\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True)\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True)\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n\n Note that with default depth (unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of doing a preorder traversal : [k for k in iter(theTree)] \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _expandArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _expandArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n return postorderArgs(limit, _expandArgsTest, *args)\n elif breadth:\n return breadthArgs(limit, _expandArgsTest, *args)\n else:\n return preorderArgs(limit, _expandArgsTest, *args)\n\n\ndef preorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a preorder expansion of args \"\"\"\n stack = [(x, 0) for x in args]\n result = _deque()\n while stack:\n arg, level = stack.pop()\n if testFn(arg) and level < limit:\n stack += [(x, level + 1) for x in arg]\n else:\n result.appendleft(arg)\n return tuple(result)\n\n\ndef postorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a postorder expansion of args \"\"\"\n if len(args) == 1:\n return args[0],\n else:\n deq = _deque((x, 0) for x in args)\n stack = []\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n deq = _deque([(x, level + 1) for x in arg] + list(deq))\n elif stack:\n while stack and level <= stack[-1][1]:\n result.append(stack.pop()[0])\n stack.append((arg, level))\n else:\n stack.append((arg, level))\n while stack:\n result.append(stack.pop()[0])\n return tuple(result)\n\n\ndef breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n result.append(arg)\n return tuple(result)\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef breadthIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg\n\n\ndef preorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a preorder expansion of args \"\"\"\n if limit:\n for arg in iterable:\n if testFn(arg):\n for a in preorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n yield arg\n else:\n for arg in iterable:\n yield arg\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef pairIter(sequence):\n \"\"\"\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n \"\"\"\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\ndef reorder(x, indexList=[], indexDict={}):\n \"\"\"\n Reorder a list based upon a list of positional indices and/or a dictionary\n of fromIndex:toIndex.\n\n >>> l = ['zero', 'one', 'two', 'three', 'four', 'five', 'six']\n >>> reorder( l, [1, 4] ) # based on positional indices: 0-->1, 1-->4\n ['one', 'four', 'zero', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, None, 4] ) # None can be used as a place-holder\n ['one', 'zero', 'four', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, 4], {5:6} ) # remapping via dictionary: move the value at index 5 to index 6\n ['one', 'four', 'zero', 'two', 'three', 'six', 'five']\n \"\"\"\n x = list(x)\n num = len(x)\n popCount = 0\n indexValDict = {}\n for i, index in enumerate(indexList):\n if index is not None:\n val = x.pop(index - popCount)\n assert index not in indexDict, indexDict\n indexValDict[i] = val\n popCount += 1\n for k, v in list(indexDict.items()):\n indexValDict[v] = x.pop(k - popCount)\n popCount += 1\n newlist = []\n for i in range(num):\n try:\n val = indexValDict[i]\n except KeyError:\n val = x.pop(0)\n newlist.append(val)\n return newlist\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\ndef compareCascadingDicts(dict1, dict2, encoding=None, useAddedKeys=False,\n useChangedKeys=False):\n \"\"\"compares two cascading dicts\n\n Parameters\n ----------\n dict1 : Union[dict, list, tuple]\n the first object to compare\n dict2 : Union[dict, list, tuple]\n the second object to compare\n encoding : Union[str, bool, None]\n controls how comparisons are made when one value is a str, and one is a\n unicode; if None, then comparisons are simply made with == (so ascii\n characters will compare equally); if the value False, then unicode and\n str are ALWAYS considered different - ie, u'foo' and 'foo' would not be\n considered equal; otherwise, it should be the name of a unicode\n encoding, which will be applied to the unicode string before comparing\n useAddedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'AddedKey' objects\n are used for keys which exist in dict2 but not in dict1; this allows\n a user to distinguish, purely by inspecting the differences dict, which\n keys are brand new, versus merely changed; mergeCascadingDicts will\n treat AddedKey objects exactly the same as though they were their\n contents - ie, useAddedKeys should make no difference to the behavior\n of mergeCascadingDicts\n useChangedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'ChangedKey'\n objects are used for keys which exist in both dict1 and dict2, but with\n different values\n\n Returns\n -------\n both : `set`\n keys that were present in both (non-recursively)\n (both, only1, and only2 should be discrete partitions of all the keys\n present in both dict1 and dict2)\n only1 : `set`\n keys that were present in only1 (non-recursively)\n only2 : `set`\n keys that were present in only2 (non-recursively)\n differences : `dict`\n recursive sparse dict containing information that was 'different' in\n dict2 - either not present in dict1, or having a different value in\n dict2, or removed in dict2 (in which case an instance of 'RemovedKey'\n will be set as the value in differences)\n Values that are different, and both dictionaries, will themselves have\n sparse entries, showing only what is different\n The return value should be such that if you do if you merge the\n differences with d1, you will get d2.\n \"\"\"\n areSets = False\n if isinstance(dict1, set) and isinstance(dict2, set):\n areSets = True\n v1 = dict1\n v2 = dict2\n else:\n if isinstance(dict1, (list, tuple)):\n dict1 = dict(enumerate(dict1))\n if isinstance(dict2, (list, tuple)):\n dict2 = dict(enumerate(dict2))\n v1 = set(dict1)\n v2 = set(dict2)\n both = v1 & v2\n only1 = v1 - both\n only2 = v2 - both\n if areSets:\n if useAddedKeys:\n differences = set(AddedKey(key) for key in only2)\n else:\n differences = set(only2)\n differences.update(RemovedKey(key) for key in only1)\n else:\n recurseTypes = dict, list, tuple, set\n if PY2:\n strUnicode = set([str, unicode])\n if useAddedKeys:\n differences = dict((key, AddedKey(dict2[key])) for key in only2)\n else:\n differences = dict((key, dict2[key]) for key in only2)\n differences.update((key, RemovedKey(dict1[key])) for key in only1)\n for key in both:\n val1 = dict1[key]\n val2 = dict2[key]\n areRecurseTypes = isinstance(val1, recurseTypes) and isinstance(\n val2, recurseTypes)\n if areRecurseTypes:\n if encoding is False or val1 != val2:\n subDiffs = compareCascadingDicts(val1, val2, encoding=\n encoding, useAddedKeys=useAddedKeys, useChangedKeys\n =useChangedKeys)[-1]\n if subDiffs:\n differences[key] = subDiffs\n else:\n if PY2 and set([type(val1), type(val2)]) == strUnicode:\n if encoding is False:\n equal = False\n elif encoding is None:\n equal = val1 == val2\n else:\n if type(val1) == str:\n strVal = val2\n unicodeVal = val1\n else:\n strVal = val1\n unicodeVal = val2\n try:\n encoded = unicodeVal.encode(encoding)\n except UnicodeEncodeError:\n equal = False\n else:\n equal = encoded == strVal\n else:\n equal = val1 == val2\n if not equal:\n if useChangedKeys:\n differences[key] = ChangedKey(val1, val2)\n else:\n differences[key] = val2\n return both, only1, only2, differences\n\n\ndef mergeCascadingDicts(from_dict, to_dict, allowDictToListMerging=False,\n allowNewListMembers=False):\n \"\"\"\n recursively update to_dict with values from from_dict.\n\n if any entries in 'from_dict' are instances of the class RemovedKey,\n then the key containing that value will be removed from to_dict\n\n if allowDictToListMerging is True, then if to_dict contains a list,\n from_dict can contain a dictionary with int keys which can be used to\n sparsely update the list.\n\n if allowNewListMembers is True, and allowDictToListMerging is also True,\n then if merging an index into a list that currently isn't long enough to\n contain that index, then the list will be extended to be long enough (with\n None inserted in any intermediate indices)\n\n Note: if using RemovedKey objects and allowDictToList merging, then only\n indices greater than all of any indices updated / added should be removed,\n because the order in which items are updated / removed is indeterminate.\n \"\"\"\n listMerge = allowDictToListMerging and isinstance(to_dict, list)\n if listMerge:\n contains = lambda key: isinstance(key, int) and 0 <= key < len(to_dict)\n else:\n contains = lambda key: key in to_dict\n for key, from_val in from_dict.items():\n if contains(key):\n if isinstance(from_val, RemovedKey):\n del to_dict[key]\n continue\n elif isinstance(from_val, (AddedKey, ChangedKey)):\n from_val = from_val.newVal\n to_val = to_dict[key]\n if hasattr(from_val, 'items') and (hasattr(to_val, 'items') or \n allowDictToListMerging and isinstance(to_val, list)):\n mergeCascadingDicts(from_val, to_val, allowDictToListMerging)\n else:\n to_dict[key] = from_val\n else:\n if isinstance(from_val, RemovedKey):\n continue\n if listMerge and allowNewListMembers and key >= len(to_dict):\n to_dict.extend((None,) * (key + 1 - len(to_dict)))\n to_dict[key] = from_val\n\n\ndef setCascadingDictItem(dict, keys, value):\n currentDict = dict\n for key in keys[:-1]:\n if key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n currentDict[keys[-1]] = value\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\ndef deepPatch(input, predicate, changer):\n \"\"\"Recursively traverses the items stored in input (for basic data types:\n lists, tuples, sets, and dicts), calling changer on all items for which\n predicate returns true, and then replacing the original item with the\n changed item.\n\n Changes will be made in place when possible. The patched input (which may\n be a new object, or the original object, if altered in place) is returned.\n \"\"\"\n return deepPatchAltered(input, predicate, changer)[0]\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\n<function token>\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\ndef getImportableObject(importableName):\n import importlib\n if '.' in importableName:\n modulename, objName = importableName.rsplit('.', 1)\n else:\n modulename = 'builtins'\n if PY2:\n modulename = '__builtin__'\n objName = importableName\n moduleobj = importlib.import_module(modulename)\n return getattr(moduleobj, objName)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n\n\ndef isNumeric(obj):\n \"\"\"\n Returns True if an object is a number type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number)\n\n\ndef isSequence(obj):\n \"\"\"\n same as `operator.isSequenceType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Sequence)\n\n\ndef isMapping(obj):\n \"\"\"\n Returns True if an object is a mapping (dictionary) type, otherwise returns False.\n\n same as `operator.isMappingType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Mapping)\n\n\n<assignment token>\n\n\ndef convertListArgs(args):\n if len(args) == 1 and isIterable(args[0]):\n return tuple(args[0])\n return args\n\n\n<function token>\n\n\ndef preorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a preorder expansion of args \"\"\"\n stack = [(x, 0) for x in args]\n result = _deque()\n while stack:\n arg, level = stack.pop()\n if testFn(arg) and level < limit:\n stack += [(x, level + 1) for x in arg]\n else:\n result.appendleft(arg)\n return tuple(result)\n\n\ndef postorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a postorder expansion of args \"\"\"\n if len(args) == 1:\n return args[0],\n else:\n deq = _deque((x, 0) for x in args)\n stack = []\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n deq = _deque([(x, level + 1) for x in arg] + list(deq))\n elif stack:\n while stack and level <= stack[-1][1]:\n result.append(stack.pop()[0])\n stack.append((arg, level))\n else:\n stack.append((arg, level))\n while stack:\n result.append(stack.pop()[0])\n return tuple(result)\n\n\ndef breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n result.append(arg)\n return tuple(result)\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef breadthIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg\n\n\ndef preorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a preorder expansion of args \"\"\"\n if limit:\n for arg in iterable:\n if testFn(arg):\n for a in preorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n yield arg\n else:\n for arg in iterable:\n yield arg\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef pairIter(sequence):\n \"\"\"\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n \"\"\"\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\ndef reorder(x, indexList=[], indexDict={}):\n \"\"\"\n Reorder a list based upon a list of positional indices and/or a dictionary\n of fromIndex:toIndex.\n\n >>> l = ['zero', 'one', 'two', 'three', 'four', 'five', 'six']\n >>> reorder( l, [1, 4] ) # based on positional indices: 0-->1, 1-->4\n ['one', 'four', 'zero', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, None, 4] ) # None can be used as a place-holder\n ['one', 'zero', 'four', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, 4], {5:6} ) # remapping via dictionary: move the value at index 5 to index 6\n ['one', 'four', 'zero', 'two', 'three', 'six', 'five']\n \"\"\"\n x = list(x)\n num = len(x)\n popCount = 0\n indexValDict = {}\n for i, index in enumerate(indexList):\n if index is not None:\n val = x.pop(index - popCount)\n assert index not in indexDict, indexDict\n indexValDict[i] = val\n popCount += 1\n for k, v in list(indexDict.items()):\n indexValDict[v] = x.pop(k - popCount)\n popCount += 1\n newlist = []\n for i in range(num):\n try:\n val = indexValDict[i]\n except KeyError:\n val = x.pop(0)\n newlist.append(val)\n return newlist\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\ndef compareCascadingDicts(dict1, dict2, encoding=None, useAddedKeys=False,\n useChangedKeys=False):\n \"\"\"compares two cascading dicts\n\n Parameters\n ----------\n dict1 : Union[dict, list, tuple]\n the first object to compare\n dict2 : Union[dict, list, tuple]\n the second object to compare\n encoding : Union[str, bool, None]\n controls how comparisons are made when one value is a str, and one is a\n unicode; if None, then comparisons are simply made with == (so ascii\n characters will compare equally); if the value False, then unicode and\n str are ALWAYS considered different - ie, u'foo' and 'foo' would not be\n considered equal; otherwise, it should be the name of a unicode\n encoding, which will be applied to the unicode string before comparing\n useAddedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'AddedKey' objects\n are used for keys which exist in dict2 but not in dict1; this allows\n a user to distinguish, purely by inspecting the differences dict, which\n keys are brand new, versus merely changed; mergeCascadingDicts will\n treat AddedKey objects exactly the same as though they were their\n contents - ie, useAddedKeys should make no difference to the behavior\n of mergeCascadingDicts\n useChangedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'ChangedKey'\n objects are used for keys which exist in both dict1 and dict2, but with\n different values\n\n Returns\n -------\n both : `set`\n keys that were present in both (non-recursively)\n (both, only1, and only2 should be discrete partitions of all the keys\n present in both dict1 and dict2)\n only1 : `set`\n keys that were present in only1 (non-recursively)\n only2 : `set`\n keys that were present in only2 (non-recursively)\n differences : `dict`\n recursive sparse dict containing information that was 'different' in\n dict2 - either not present in dict1, or having a different value in\n dict2, or removed in dict2 (in which case an instance of 'RemovedKey'\n will be set as the value in differences)\n Values that are different, and both dictionaries, will themselves have\n sparse entries, showing only what is different\n The return value should be such that if you do if you merge the\n differences with d1, you will get d2.\n \"\"\"\n areSets = False\n if isinstance(dict1, set) and isinstance(dict2, set):\n areSets = True\n v1 = dict1\n v2 = dict2\n else:\n if isinstance(dict1, (list, tuple)):\n dict1 = dict(enumerate(dict1))\n if isinstance(dict2, (list, tuple)):\n dict2 = dict(enumerate(dict2))\n v1 = set(dict1)\n v2 = set(dict2)\n both = v1 & v2\n only1 = v1 - both\n only2 = v2 - both\n if areSets:\n if useAddedKeys:\n differences = set(AddedKey(key) for key in only2)\n else:\n differences = set(only2)\n differences.update(RemovedKey(key) for key in only1)\n else:\n recurseTypes = dict, list, tuple, set\n if PY2:\n strUnicode = set([str, unicode])\n if useAddedKeys:\n differences = dict((key, AddedKey(dict2[key])) for key in only2)\n else:\n differences = dict((key, dict2[key]) for key in only2)\n differences.update((key, RemovedKey(dict1[key])) for key in only1)\n for key in both:\n val1 = dict1[key]\n val2 = dict2[key]\n areRecurseTypes = isinstance(val1, recurseTypes) and isinstance(\n val2, recurseTypes)\n if areRecurseTypes:\n if encoding is False or val1 != val2:\n subDiffs = compareCascadingDicts(val1, val2, encoding=\n encoding, useAddedKeys=useAddedKeys, useChangedKeys\n =useChangedKeys)[-1]\n if subDiffs:\n differences[key] = subDiffs\n else:\n if PY2 and set([type(val1), type(val2)]) == strUnicode:\n if encoding is False:\n equal = False\n elif encoding is None:\n equal = val1 == val2\n else:\n if type(val1) == str:\n strVal = val2\n unicodeVal = val1\n else:\n strVal = val1\n unicodeVal = val2\n try:\n encoded = unicodeVal.encode(encoding)\n except UnicodeEncodeError:\n equal = False\n else:\n equal = encoded == strVal\n else:\n equal = val1 == val2\n if not equal:\n if useChangedKeys:\n differences[key] = ChangedKey(val1, val2)\n else:\n differences[key] = val2\n return both, only1, only2, differences\n\n\ndef mergeCascadingDicts(from_dict, to_dict, allowDictToListMerging=False,\n allowNewListMembers=False):\n \"\"\"\n recursively update to_dict with values from from_dict.\n\n if any entries in 'from_dict' are instances of the class RemovedKey,\n then the key containing that value will be removed from to_dict\n\n if allowDictToListMerging is True, then if to_dict contains a list,\n from_dict can contain a dictionary with int keys which can be used to\n sparsely update the list.\n\n if allowNewListMembers is True, and allowDictToListMerging is also True,\n then if merging an index into a list that currently isn't long enough to\n contain that index, then the list will be extended to be long enough (with\n None inserted in any intermediate indices)\n\n Note: if using RemovedKey objects and allowDictToList merging, then only\n indices greater than all of any indices updated / added should be removed,\n because the order in which items are updated / removed is indeterminate.\n \"\"\"\n listMerge = allowDictToListMerging and isinstance(to_dict, list)\n if listMerge:\n contains = lambda key: isinstance(key, int) and 0 <= key < len(to_dict)\n else:\n contains = lambda key: key in to_dict\n for key, from_val in from_dict.items():\n if contains(key):\n if isinstance(from_val, RemovedKey):\n del to_dict[key]\n continue\n elif isinstance(from_val, (AddedKey, ChangedKey)):\n from_val = from_val.newVal\n to_val = to_dict[key]\n if hasattr(from_val, 'items') and (hasattr(to_val, 'items') or \n allowDictToListMerging and isinstance(to_val, list)):\n mergeCascadingDicts(from_val, to_val, allowDictToListMerging)\n else:\n to_dict[key] = from_val\n else:\n if isinstance(from_val, RemovedKey):\n continue\n if listMerge and allowNewListMembers and key >= len(to_dict):\n to_dict.extend((None,) * (key + 1 - len(to_dict)))\n to_dict[key] = from_val\n\n\ndef setCascadingDictItem(dict, keys, value):\n currentDict = dict\n for key in keys[:-1]:\n if key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n currentDict[keys[-1]] = value\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\ndef deepPatch(input, predicate, changer):\n \"\"\"Recursively traverses the items stored in input (for basic data types:\n lists, tuples, sets, and dicts), calling changer on all items for which\n predicate returns true, and then replacing the original item with the\n changed item.\n\n Changes will be made in place when possible. The patched input (which may\n be a new object, or the original object, if altered in place) is returned.\n \"\"\"\n return deepPatchAltered(input, predicate, changer)[0]\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\n<function token>\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\ndef getImportableObject(importableName):\n import importlib\n if '.' in importableName:\n modulename, objName = importableName.rsplit('.', 1)\n else:\n modulename = 'builtins'\n if PY2:\n modulename = '__builtin__'\n objName = importableName\n moduleobj = importlib.import_module(modulename)\n return getattr(moduleobj, objName)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n\n\ndef isNumeric(obj):\n \"\"\"\n Returns True if an object is a number type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number)\n\n\ndef isSequence(obj):\n \"\"\"\n same as `operator.isSequenceType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Sequence)\n\n\ndef isMapping(obj):\n \"\"\"\n Returns True if an object is a mapping (dictionary) type, otherwise returns False.\n\n same as `operator.isMappingType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Mapping)\n\n\n<assignment token>\n\n\ndef convertListArgs(args):\n if len(args) == 1 and isIterable(args[0]):\n return tuple(args[0])\n return args\n\n\n<function token>\n\n\ndef preorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a preorder expansion of args \"\"\"\n stack = [(x, 0) for x in args]\n result = _deque()\n while stack:\n arg, level = stack.pop()\n if testFn(arg) and level < limit:\n stack += [(x, level + 1) for x in arg]\n else:\n result.appendleft(arg)\n return tuple(result)\n\n\n<function token>\n\n\ndef breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n result.append(arg)\n return tuple(result)\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef breadthIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg\n\n\ndef preorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a preorder expansion of args \"\"\"\n if limit:\n for arg in iterable:\n if testFn(arg):\n for a in preorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n yield arg\n else:\n for arg in iterable:\n yield arg\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef pairIter(sequence):\n \"\"\"\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n \"\"\"\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\ndef reorder(x, indexList=[], indexDict={}):\n \"\"\"\n Reorder a list based upon a list of positional indices and/or a dictionary\n of fromIndex:toIndex.\n\n >>> l = ['zero', 'one', 'two', 'three', 'four', 'five', 'six']\n >>> reorder( l, [1, 4] ) # based on positional indices: 0-->1, 1-->4\n ['one', 'four', 'zero', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, None, 4] ) # None can be used as a place-holder\n ['one', 'zero', 'four', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, 4], {5:6} ) # remapping via dictionary: move the value at index 5 to index 6\n ['one', 'four', 'zero', 'two', 'three', 'six', 'five']\n \"\"\"\n x = list(x)\n num = len(x)\n popCount = 0\n indexValDict = {}\n for i, index in enumerate(indexList):\n if index is not None:\n val = x.pop(index - popCount)\n assert index not in indexDict, indexDict\n indexValDict[i] = val\n popCount += 1\n for k, v in list(indexDict.items()):\n indexValDict[v] = x.pop(k - popCount)\n popCount += 1\n newlist = []\n for i in range(num):\n try:\n val = indexValDict[i]\n except KeyError:\n val = x.pop(0)\n newlist.append(val)\n return newlist\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\ndef compareCascadingDicts(dict1, dict2, encoding=None, useAddedKeys=False,\n useChangedKeys=False):\n \"\"\"compares two cascading dicts\n\n Parameters\n ----------\n dict1 : Union[dict, list, tuple]\n the first object to compare\n dict2 : Union[dict, list, tuple]\n the second object to compare\n encoding : Union[str, bool, None]\n controls how comparisons are made when one value is a str, and one is a\n unicode; if None, then comparisons are simply made with == (so ascii\n characters will compare equally); if the value False, then unicode and\n str are ALWAYS considered different - ie, u'foo' and 'foo' would not be\n considered equal; otherwise, it should be the name of a unicode\n encoding, which will be applied to the unicode string before comparing\n useAddedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'AddedKey' objects\n are used for keys which exist in dict2 but not in dict1; this allows\n a user to distinguish, purely by inspecting the differences dict, which\n keys are brand new, versus merely changed; mergeCascadingDicts will\n treat AddedKey objects exactly the same as though they were their\n contents - ie, useAddedKeys should make no difference to the behavior\n of mergeCascadingDicts\n useChangedKeys : bool\n if True, then similarly to how 'RemovedKey' objects are used in the\n returned diferences object (see the Returns section), 'ChangedKey'\n objects are used for keys which exist in both dict1 and dict2, but with\n different values\n\n Returns\n -------\n both : `set`\n keys that were present in both (non-recursively)\n (both, only1, and only2 should be discrete partitions of all the keys\n present in both dict1 and dict2)\n only1 : `set`\n keys that were present in only1 (non-recursively)\n only2 : `set`\n keys that were present in only2 (non-recursively)\n differences : `dict`\n recursive sparse dict containing information that was 'different' in\n dict2 - either not present in dict1, or having a different value in\n dict2, or removed in dict2 (in which case an instance of 'RemovedKey'\n will be set as the value in differences)\n Values that are different, and both dictionaries, will themselves have\n sparse entries, showing only what is different\n The return value should be such that if you do if you merge the\n differences with d1, you will get d2.\n \"\"\"\n areSets = False\n if isinstance(dict1, set) and isinstance(dict2, set):\n areSets = True\n v1 = dict1\n v2 = dict2\n else:\n if isinstance(dict1, (list, tuple)):\n dict1 = dict(enumerate(dict1))\n if isinstance(dict2, (list, tuple)):\n dict2 = dict(enumerate(dict2))\n v1 = set(dict1)\n v2 = set(dict2)\n both = v1 & v2\n only1 = v1 - both\n only2 = v2 - both\n if areSets:\n if useAddedKeys:\n differences = set(AddedKey(key) for key in only2)\n else:\n differences = set(only2)\n differences.update(RemovedKey(key) for key in only1)\n else:\n recurseTypes = dict, list, tuple, set\n if PY2:\n strUnicode = set([str, unicode])\n if useAddedKeys:\n differences = dict((key, AddedKey(dict2[key])) for key in only2)\n else:\n differences = dict((key, dict2[key]) for key in only2)\n differences.update((key, RemovedKey(dict1[key])) for key in only1)\n for key in both:\n val1 = dict1[key]\n val2 = dict2[key]\n areRecurseTypes = isinstance(val1, recurseTypes) and isinstance(\n val2, recurseTypes)\n if areRecurseTypes:\n if encoding is False or val1 != val2:\n subDiffs = compareCascadingDicts(val1, val2, encoding=\n encoding, useAddedKeys=useAddedKeys, useChangedKeys\n =useChangedKeys)[-1]\n if subDiffs:\n differences[key] = subDiffs\n else:\n if PY2 and set([type(val1), type(val2)]) == strUnicode:\n if encoding is False:\n equal = False\n elif encoding is None:\n equal = val1 == val2\n else:\n if type(val1) == str:\n strVal = val2\n unicodeVal = val1\n else:\n strVal = val1\n unicodeVal = val2\n try:\n encoded = unicodeVal.encode(encoding)\n except UnicodeEncodeError:\n equal = False\n else:\n equal = encoded == strVal\n else:\n equal = val1 == val2\n if not equal:\n if useChangedKeys:\n differences[key] = ChangedKey(val1, val2)\n else:\n differences[key] = val2\n return both, only1, only2, differences\n\n\ndef mergeCascadingDicts(from_dict, to_dict, allowDictToListMerging=False,\n allowNewListMembers=False):\n \"\"\"\n recursively update to_dict with values from from_dict.\n\n if any entries in 'from_dict' are instances of the class RemovedKey,\n then the key containing that value will be removed from to_dict\n\n if allowDictToListMerging is True, then if to_dict contains a list,\n from_dict can contain a dictionary with int keys which can be used to\n sparsely update the list.\n\n if allowNewListMembers is True, and allowDictToListMerging is also True,\n then if merging an index into a list that currently isn't long enough to\n contain that index, then the list will be extended to be long enough (with\n None inserted in any intermediate indices)\n\n Note: if using RemovedKey objects and allowDictToList merging, then only\n indices greater than all of any indices updated / added should be removed,\n because the order in which items are updated / removed is indeterminate.\n \"\"\"\n listMerge = allowDictToListMerging and isinstance(to_dict, list)\n if listMerge:\n contains = lambda key: isinstance(key, int) and 0 <= key < len(to_dict)\n else:\n contains = lambda key: key in to_dict\n for key, from_val in from_dict.items():\n if contains(key):\n if isinstance(from_val, RemovedKey):\n del to_dict[key]\n continue\n elif isinstance(from_val, (AddedKey, ChangedKey)):\n from_val = from_val.newVal\n to_val = to_dict[key]\n if hasattr(from_val, 'items') and (hasattr(to_val, 'items') or \n allowDictToListMerging and isinstance(to_val, list)):\n mergeCascadingDicts(from_val, to_val, allowDictToListMerging)\n else:\n to_dict[key] = from_val\n else:\n if isinstance(from_val, RemovedKey):\n continue\n if listMerge and allowNewListMembers and key >= len(to_dict):\n to_dict.extend((None,) * (key + 1 - len(to_dict)))\n to_dict[key] = from_val\n\n\ndef setCascadingDictItem(dict, keys, value):\n currentDict = dict\n for key in keys[:-1]:\n if key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n currentDict[keys[-1]] = value\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\ndef deepPatch(input, predicate, changer):\n \"\"\"Recursively traverses the items stored in input (for basic data types:\n lists, tuples, sets, and dicts), calling changer on all items for which\n predicate returns true, and then replacing the original item with the\n changed item.\n\n Changes will be made in place when possible. The patched input (which may\n be a new object, or the original object, if altered in place) is returned.\n \"\"\"\n return deepPatchAltered(input, predicate, changer)[0]\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\n<function token>\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\ndef getImportableObject(importableName):\n import importlib\n if '.' in importableName:\n modulename, objName = importableName.rsplit('.', 1)\n else:\n modulename = 'builtins'\n if PY2:\n modulename = '__builtin__'\n objName = importableName\n moduleobj = importlib.import_module(modulename)\n return getattr(moduleobj, objName)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n\n\ndef isNumeric(obj):\n \"\"\"\n Returns True if an object is a number type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number)\n\n\ndef isSequence(obj):\n \"\"\"\n same as `operator.isSequenceType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Sequence)\n\n\ndef isMapping(obj):\n \"\"\"\n Returns True if an object is a mapping (dictionary) type, otherwise returns False.\n\n same as `operator.isMappingType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Mapping)\n\n\n<assignment token>\n\n\ndef convertListArgs(args):\n if len(args) == 1 and isIterable(args[0]):\n return tuple(args[0])\n return args\n\n\n<function token>\n\n\ndef preorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a preorder expansion of args \"\"\"\n stack = [(x, 0) for x in args]\n result = _deque()\n while stack:\n arg, level = stack.pop()\n if testFn(arg) and level < limit:\n stack += [(x, level + 1) for x in arg]\n else:\n result.appendleft(arg)\n return tuple(result)\n\n\n<function token>\n\n\ndef breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n result.append(arg)\n return tuple(result)\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef breadthIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg\n\n\ndef preorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a preorder expansion of args \"\"\"\n if limit:\n for arg in iterable:\n if testFn(arg):\n for a in preorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n yield arg\n else:\n for arg in iterable:\n yield arg\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef pairIter(sequence):\n \"\"\"\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n \"\"\"\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\ndef reorder(x, indexList=[], indexDict={}):\n \"\"\"\n Reorder a list based upon a list of positional indices and/or a dictionary\n of fromIndex:toIndex.\n\n >>> l = ['zero', 'one', 'two', 'three', 'four', 'five', 'six']\n >>> reorder( l, [1, 4] ) # based on positional indices: 0-->1, 1-->4\n ['one', 'four', 'zero', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, None, 4] ) # None can be used as a place-holder\n ['one', 'zero', 'four', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, 4], {5:6} ) # remapping via dictionary: move the value at index 5 to index 6\n ['one', 'four', 'zero', 'two', 'three', 'six', 'five']\n \"\"\"\n x = list(x)\n num = len(x)\n popCount = 0\n indexValDict = {}\n for i, index in enumerate(indexList):\n if index is not None:\n val = x.pop(index - popCount)\n assert index not in indexDict, indexDict\n indexValDict[i] = val\n popCount += 1\n for k, v in list(indexDict.items()):\n indexValDict[v] = x.pop(k - popCount)\n popCount += 1\n newlist = []\n for i in range(num):\n try:\n val = indexValDict[i]\n except KeyError:\n val = x.pop(0)\n newlist.append(val)\n return newlist\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n\n\ndef mergeCascadingDicts(from_dict, to_dict, allowDictToListMerging=False,\n allowNewListMembers=False):\n \"\"\"\n recursively update to_dict with values from from_dict.\n\n if any entries in 'from_dict' are instances of the class RemovedKey,\n then the key containing that value will be removed from to_dict\n\n if allowDictToListMerging is True, then if to_dict contains a list,\n from_dict can contain a dictionary with int keys which can be used to\n sparsely update the list.\n\n if allowNewListMembers is True, and allowDictToListMerging is also True,\n then if merging an index into a list that currently isn't long enough to\n contain that index, then the list will be extended to be long enough (with\n None inserted in any intermediate indices)\n\n Note: if using RemovedKey objects and allowDictToList merging, then only\n indices greater than all of any indices updated / added should be removed,\n because the order in which items are updated / removed is indeterminate.\n \"\"\"\n listMerge = allowDictToListMerging and isinstance(to_dict, list)\n if listMerge:\n contains = lambda key: isinstance(key, int) and 0 <= key < len(to_dict)\n else:\n contains = lambda key: key in to_dict\n for key, from_val in from_dict.items():\n if contains(key):\n if isinstance(from_val, RemovedKey):\n del to_dict[key]\n continue\n elif isinstance(from_val, (AddedKey, ChangedKey)):\n from_val = from_val.newVal\n to_val = to_dict[key]\n if hasattr(from_val, 'items') and (hasattr(to_val, 'items') or \n allowDictToListMerging and isinstance(to_val, list)):\n mergeCascadingDicts(from_val, to_val, allowDictToListMerging)\n else:\n to_dict[key] = from_val\n else:\n if isinstance(from_val, RemovedKey):\n continue\n if listMerge and allowNewListMembers and key >= len(to_dict):\n to_dict.extend((None,) * (key + 1 - len(to_dict)))\n to_dict[key] = from_val\n\n\ndef setCascadingDictItem(dict, keys, value):\n currentDict = dict\n for key in keys[:-1]:\n if key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n currentDict[keys[-1]] = value\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\ndef deepPatch(input, predicate, changer):\n \"\"\"Recursively traverses the items stored in input (for basic data types:\n lists, tuples, sets, and dicts), calling changer on all items for which\n predicate returns true, and then replacing the original item with the\n changed item.\n\n Changes will be made in place when possible. The patched input (which may\n be a new object, or the original object, if altered in place) is returned.\n \"\"\"\n return deepPatchAltered(input, predicate, changer)[0]\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\n<function token>\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\ndef getImportableObject(importableName):\n import importlib\n if '.' in importableName:\n modulename, objName = importableName.rsplit('.', 1)\n else:\n modulename = 'builtins'\n if PY2:\n modulename = '__builtin__'\n objName = importableName\n moduleobj = importlib.import_module(modulename)\n return getattr(moduleobj, objName)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n\n\ndef isNumeric(obj):\n \"\"\"\n Returns True if an object is a number type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number)\n\n\ndef isSequence(obj):\n \"\"\"\n same as `operator.isSequenceType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Sequence)\n\n\ndef isMapping(obj):\n \"\"\"\n Returns True if an object is a mapping (dictionary) type, otherwise returns False.\n\n same as `operator.isMappingType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Mapping)\n\n\n<assignment token>\n\n\ndef convertListArgs(args):\n if len(args) == 1 and isIterable(args[0]):\n return tuple(args[0])\n return args\n\n\n<function token>\n\n\ndef preorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a preorder expansion of args \"\"\"\n stack = [(x, 0) for x in args]\n result = _deque()\n while stack:\n arg, level = stack.pop()\n if testFn(arg) and level < limit:\n stack += [(x, level + 1) for x in arg]\n else:\n result.appendleft(arg)\n return tuple(result)\n\n\n<function token>\n\n\ndef breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n result.append(arg)\n return tuple(result)\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef breadthIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg\n\n\n<function token>\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef pairIter(sequence):\n \"\"\"\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n \"\"\"\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\ndef reorder(x, indexList=[], indexDict={}):\n \"\"\"\n Reorder a list based upon a list of positional indices and/or a dictionary\n of fromIndex:toIndex.\n\n >>> l = ['zero', 'one', 'two', 'three', 'four', 'five', 'six']\n >>> reorder( l, [1, 4] ) # based on positional indices: 0-->1, 1-->4\n ['one', 'four', 'zero', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, None, 4] ) # None can be used as a place-holder\n ['one', 'zero', 'four', 'two', 'three', 'five', 'six']\n >>> reorder( l, [1, 4], {5:6} ) # remapping via dictionary: move the value at index 5 to index 6\n ['one', 'four', 'zero', 'two', 'three', 'six', 'five']\n \"\"\"\n x = list(x)\n num = len(x)\n popCount = 0\n indexValDict = {}\n for i, index in enumerate(indexList):\n if index is not None:\n val = x.pop(index - popCount)\n assert index not in indexDict, indexDict\n indexValDict[i] = val\n popCount += 1\n for k, v in list(indexDict.items()):\n indexValDict[v] = x.pop(k - popCount)\n popCount += 1\n newlist = []\n for i in range(num):\n try:\n val = indexValDict[i]\n except KeyError:\n val = x.pop(0)\n newlist.append(val)\n return newlist\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n\n\ndef mergeCascadingDicts(from_dict, to_dict, allowDictToListMerging=False,\n allowNewListMembers=False):\n \"\"\"\n recursively update to_dict with values from from_dict.\n\n if any entries in 'from_dict' are instances of the class RemovedKey,\n then the key containing that value will be removed from to_dict\n\n if allowDictToListMerging is True, then if to_dict contains a list,\n from_dict can contain a dictionary with int keys which can be used to\n sparsely update the list.\n\n if allowNewListMembers is True, and allowDictToListMerging is also True,\n then if merging an index into a list that currently isn't long enough to\n contain that index, then the list will be extended to be long enough (with\n None inserted in any intermediate indices)\n\n Note: if using RemovedKey objects and allowDictToList merging, then only\n indices greater than all of any indices updated / added should be removed,\n because the order in which items are updated / removed is indeterminate.\n \"\"\"\n listMerge = allowDictToListMerging and isinstance(to_dict, list)\n if listMerge:\n contains = lambda key: isinstance(key, int) and 0 <= key < len(to_dict)\n else:\n contains = lambda key: key in to_dict\n for key, from_val in from_dict.items():\n if contains(key):\n if isinstance(from_val, RemovedKey):\n del to_dict[key]\n continue\n elif isinstance(from_val, (AddedKey, ChangedKey)):\n from_val = from_val.newVal\n to_val = to_dict[key]\n if hasattr(from_val, 'items') and (hasattr(to_val, 'items') or \n allowDictToListMerging and isinstance(to_val, list)):\n mergeCascadingDicts(from_val, to_val, allowDictToListMerging)\n else:\n to_dict[key] = from_val\n else:\n if isinstance(from_val, RemovedKey):\n continue\n if listMerge and allowNewListMembers and key >= len(to_dict):\n to_dict.extend((None,) * (key + 1 - len(to_dict)))\n to_dict[key] = from_val\n\n\ndef setCascadingDictItem(dict, keys, value):\n currentDict = dict\n for key in keys[:-1]:\n if key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n currentDict[keys[-1]] = value\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\ndef deepPatch(input, predicate, changer):\n \"\"\"Recursively traverses the items stored in input (for basic data types:\n lists, tuples, sets, and dicts), calling changer on all items for which\n predicate returns true, and then replacing the original item with the\n changed item.\n\n Changes will be made in place when possible. The patched input (which may\n be a new object, or the original object, if altered in place) is returned.\n \"\"\"\n return deepPatchAltered(input, predicate, changer)[0]\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\n<function token>\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\ndef getImportableObject(importableName):\n import importlib\n if '.' in importableName:\n modulename, objName = importableName.rsplit('.', 1)\n else:\n modulename = 'builtins'\n if PY2:\n modulename = '__builtin__'\n objName = importableName\n moduleobj = importlib.import_module(modulename)\n return getattr(moduleobj, objName)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n\n\ndef isNumeric(obj):\n \"\"\"\n Returns True if an object is a number type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number)\n\n\ndef isSequence(obj):\n \"\"\"\n same as `operator.isSequenceType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Sequence)\n\n\ndef isMapping(obj):\n \"\"\"\n Returns True if an object is a mapping (dictionary) type, otherwise returns False.\n\n same as `operator.isMappingType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Mapping)\n\n\n<assignment token>\n\n\ndef convertListArgs(args):\n if len(args) == 1 and isIterable(args[0]):\n return tuple(args[0])\n return args\n\n\n<function token>\n\n\ndef preorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a preorder expansion of args \"\"\"\n stack = [(x, 0) for x in args]\n result = _deque()\n while stack:\n arg, level = stack.pop()\n if testFn(arg) and level < limit:\n stack += [(x, level + 1) for x in arg]\n else:\n result.appendleft(arg)\n return tuple(result)\n\n\n<function token>\n\n\ndef breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n result.append(arg)\n return tuple(result)\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef breadthIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg\n\n\n<function token>\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef pairIter(sequence):\n \"\"\"\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n \"\"\"\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\n<function token>\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n\n\ndef mergeCascadingDicts(from_dict, to_dict, allowDictToListMerging=False,\n allowNewListMembers=False):\n \"\"\"\n recursively update to_dict with values from from_dict.\n\n if any entries in 'from_dict' are instances of the class RemovedKey,\n then the key containing that value will be removed from to_dict\n\n if allowDictToListMerging is True, then if to_dict contains a list,\n from_dict can contain a dictionary with int keys which can be used to\n sparsely update the list.\n\n if allowNewListMembers is True, and allowDictToListMerging is also True,\n then if merging an index into a list that currently isn't long enough to\n contain that index, then the list will be extended to be long enough (with\n None inserted in any intermediate indices)\n\n Note: if using RemovedKey objects and allowDictToList merging, then only\n indices greater than all of any indices updated / added should be removed,\n because the order in which items are updated / removed is indeterminate.\n \"\"\"\n listMerge = allowDictToListMerging and isinstance(to_dict, list)\n if listMerge:\n contains = lambda key: isinstance(key, int) and 0 <= key < len(to_dict)\n else:\n contains = lambda key: key in to_dict\n for key, from_val in from_dict.items():\n if contains(key):\n if isinstance(from_val, RemovedKey):\n del to_dict[key]\n continue\n elif isinstance(from_val, (AddedKey, ChangedKey)):\n from_val = from_val.newVal\n to_val = to_dict[key]\n if hasattr(from_val, 'items') and (hasattr(to_val, 'items') or \n allowDictToListMerging and isinstance(to_val, list)):\n mergeCascadingDicts(from_val, to_val, allowDictToListMerging)\n else:\n to_dict[key] = from_val\n else:\n if isinstance(from_val, RemovedKey):\n continue\n if listMerge and allowNewListMembers and key >= len(to_dict):\n to_dict.extend((None,) * (key + 1 - len(to_dict)))\n to_dict[key] = from_val\n\n\ndef setCascadingDictItem(dict, keys, value):\n currentDict = dict\n for key in keys[:-1]:\n if key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n currentDict[keys[-1]] = value\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\ndef deepPatch(input, predicate, changer):\n \"\"\"Recursively traverses the items stored in input (for basic data types:\n lists, tuples, sets, and dicts), calling changer on all items for which\n predicate returns true, and then replacing the original item with the\n changed item.\n\n Changes will be made in place when possible. The patched input (which may\n be a new object, or the original object, if altered in place) is returned.\n \"\"\"\n return deepPatchAltered(input, predicate, changer)[0]\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\n<function token>\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\ndef getImportableObject(importableName):\n import importlib\n if '.' in importableName:\n modulename, objName = importableName.rsplit('.', 1)\n else:\n modulename = 'builtins'\n if PY2:\n modulename = '__builtin__'\n objName = importableName\n moduleobj = importlib.import_module(modulename)\n return getattr(moduleobj, objName)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n\n\ndef isNumeric(obj):\n \"\"\"\n Returns True if an object is a number type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number)\n\n\n<function token>\n\n\ndef isMapping(obj):\n \"\"\"\n Returns True if an object is a mapping (dictionary) type, otherwise returns False.\n\n same as `operator.isMappingType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Mapping)\n\n\n<assignment token>\n\n\ndef convertListArgs(args):\n if len(args) == 1 and isIterable(args[0]):\n return tuple(args[0])\n return args\n\n\n<function token>\n\n\ndef preorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a preorder expansion of args \"\"\"\n stack = [(x, 0) for x in args]\n result = _deque()\n while stack:\n arg, level = stack.pop()\n if testFn(arg) and level < limit:\n stack += [(x, level + 1) for x in arg]\n else:\n result.appendleft(arg)\n return tuple(result)\n\n\n<function token>\n\n\ndef breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n result.append(arg)\n return tuple(result)\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef breadthIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg\n\n\n<function token>\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef pairIter(sequence):\n \"\"\"\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n \"\"\"\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\n<function token>\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n\n\ndef mergeCascadingDicts(from_dict, to_dict, allowDictToListMerging=False,\n allowNewListMembers=False):\n \"\"\"\n recursively update to_dict with values from from_dict.\n\n if any entries in 'from_dict' are instances of the class RemovedKey,\n then the key containing that value will be removed from to_dict\n\n if allowDictToListMerging is True, then if to_dict contains a list,\n from_dict can contain a dictionary with int keys which can be used to\n sparsely update the list.\n\n if allowNewListMembers is True, and allowDictToListMerging is also True,\n then if merging an index into a list that currently isn't long enough to\n contain that index, then the list will be extended to be long enough (with\n None inserted in any intermediate indices)\n\n Note: if using RemovedKey objects and allowDictToList merging, then only\n indices greater than all of any indices updated / added should be removed,\n because the order in which items are updated / removed is indeterminate.\n \"\"\"\n listMerge = allowDictToListMerging and isinstance(to_dict, list)\n if listMerge:\n contains = lambda key: isinstance(key, int) and 0 <= key < len(to_dict)\n else:\n contains = lambda key: key in to_dict\n for key, from_val in from_dict.items():\n if contains(key):\n if isinstance(from_val, RemovedKey):\n del to_dict[key]\n continue\n elif isinstance(from_val, (AddedKey, ChangedKey)):\n from_val = from_val.newVal\n to_val = to_dict[key]\n if hasattr(from_val, 'items') and (hasattr(to_val, 'items') or \n allowDictToListMerging and isinstance(to_val, list)):\n mergeCascadingDicts(from_val, to_val, allowDictToListMerging)\n else:\n to_dict[key] = from_val\n else:\n if isinstance(from_val, RemovedKey):\n continue\n if listMerge and allowNewListMembers and key >= len(to_dict):\n to_dict.extend((None,) * (key + 1 - len(to_dict)))\n to_dict[key] = from_val\n\n\ndef setCascadingDictItem(dict, keys, value):\n currentDict = dict\n for key in keys[:-1]:\n if key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n currentDict[keys[-1]] = value\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\ndef deepPatch(input, predicate, changer):\n \"\"\"Recursively traverses the items stored in input (for basic data types:\n lists, tuples, sets, and dicts), calling changer on all items for which\n predicate returns true, and then replacing the original item with the\n changed item.\n\n Changes will be made in place when possible. The patched input (which may\n be a new object, or the original object, if altered in place) is returned.\n \"\"\"\n return deepPatchAltered(input, predicate, changer)[0]\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\n<function token>\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\ndef getImportableObject(importableName):\n import importlib\n if '.' in importableName:\n modulename, objName = importableName.rsplit('.', 1)\n else:\n modulename = 'builtins'\n if PY2:\n modulename = '__builtin__'\n objName = importableName\n moduleobj = importlib.import_module(modulename)\n return getattr(moduleobj, objName)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n\n\ndef isNumeric(obj):\n \"\"\"\n Returns True if an object is a number type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number)\n\n\n<function token>\n\n\ndef isMapping(obj):\n \"\"\"\n Returns True if an object is a mapping (dictionary) type, otherwise returns False.\n\n same as `operator.isMappingType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Mapping)\n\n\n<assignment token>\n\n\ndef convertListArgs(args):\n if len(args) == 1 and isIterable(args[0]):\n return tuple(args[0])\n return args\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n result.append(arg)\n return tuple(result)\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef breadthIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" iterator doing a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg\n\n\n<function token>\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef pairIter(sequence):\n \"\"\"\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n \"\"\"\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\n<function token>\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n\n\ndef mergeCascadingDicts(from_dict, to_dict, allowDictToListMerging=False,\n allowNewListMembers=False):\n \"\"\"\n recursively update to_dict with values from from_dict.\n\n if any entries in 'from_dict' are instances of the class RemovedKey,\n then the key containing that value will be removed from to_dict\n\n if allowDictToListMerging is True, then if to_dict contains a list,\n from_dict can contain a dictionary with int keys which can be used to\n sparsely update the list.\n\n if allowNewListMembers is True, and allowDictToListMerging is also True,\n then if merging an index into a list that currently isn't long enough to\n contain that index, then the list will be extended to be long enough (with\n None inserted in any intermediate indices)\n\n Note: if using RemovedKey objects and allowDictToList merging, then only\n indices greater than all of any indices updated / added should be removed,\n because the order in which items are updated / removed is indeterminate.\n \"\"\"\n listMerge = allowDictToListMerging and isinstance(to_dict, list)\n if listMerge:\n contains = lambda key: isinstance(key, int) and 0 <= key < len(to_dict)\n else:\n contains = lambda key: key in to_dict\n for key, from_val in from_dict.items():\n if contains(key):\n if isinstance(from_val, RemovedKey):\n del to_dict[key]\n continue\n elif isinstance(from_val, (AddedKey, ChangedKey)):\n from_val = from_val.newVal\n to_val = to_dict[key]\n if hasattr(from_val, 'items') and (hasattr(to_val, 'items') or \n allowDictToListMerging and isinstance(to_val, list)):\n mergeCascadingDicts(from_val, to_val, allowDictToListMerging)\n else:\n to_dict[key] = from_val\n else:\n if isinstance(from_val, RemovedKey):\n continue\n if listMerge and allowNewListMembers and key >= len(to_dict):\n to_dict.extend((None,) * (key + 1 - len(to_dict)))\n to_dict[key] = from_val\n\n\ndef setCascadingDictItem(dict, keys, value):\n currentDict = dict\n for key in keys[:-1]:\n if key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n currentDict[keys[-1]] = value\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\ndef deepPatch(input, predicate, changer):\n \"\"\"Recursively traverses the items stored in input (for basic data types:\n lists, tuples, sets, and dicts), calling changer on all items for which\n predicate returns true, and then replacing the original item with the\n changed item.\n\n Changes will be made in place when possible. The patched input (which may\n be a new object, or the original object, if altered in place) is returned.\n \"\"\"\n return deepPatchAltered(input, predicate, changer)[0]\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\n<function token>\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\ndef getImportableObject(importableName):\n import importlib\n if '.' in importableName:\n modulename, objName = importableName.rsplit('.', 1)\n else:\n modulename = 'builtins'\n if PY2:\n modulename = '__builtin__'\n objName = importableName\n moduleobj = importlib.import_module(modulename)\n return getattr(moduleobj, objName)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n\n\ndef isNumeric(obj):\n \"\"\"\n Returns True if an object is a number type, otherwise returns False.\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, numbers.Number)\n\n\n<function token>\n\n\ndef isMapping(obj):\n \"\"\"\n Returns True if an object is a mapping (dictionary) type, otherwise returns False.\n\n same as `operator.isMappingType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Mapping)\n\n\n<assignment token>\n\n\ndef convertListArgs(args):\n if len(args) == 1 and isIterable(args[0]):\n return tuple(args[0])\n return args\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n result.append(arg)\n return tuple(result)\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef pairIter(sequence):\n \"\"\"\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n \"\"\"\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\n<function token>\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n\n\ndef mergeCascadingDicts(from_dict, to_dict, allowDictToListMerging=False,\n allowNewListMembers=False):\n \"\"\"\n recursively update to_dict with values from from_dict.\n\n if any entries in 'from_dict' are instances of the class RemovedKey,\n then the key containing that value will be removed from to_dict\n\n if allowDictToListMerging is True, then if to_dict contains a list,\n from_dict can contain a dictionary with int keys which can be used to\n sparsely update the list.\n\n if allowNewListMembers is True, and allowDictToListMerging is also True,\n then if merging an index into a list that currently isn't long enough to\n contain that index, then the list will be extended to be long enough (with\n None inserted in any intermediate indices)\n\n Note: if using RemovedKey objects and allowDictToList merging, then only\n indices greater than all of any indices updated / added should be removed,\n because the order in which items are updated / removed is indeterminate.\n \"\"\"\n listMerge = allowDictToListMerging and isinstance(to_dict, list)\n if listMerge:\n contains = lambda key: isinstance(key, int) and 0 <= key < len(to_dict)\n else:\n contains = lambda key: key in to_dict\n for key, from_val in from_dict.items():\n if contains(key):\n if isinstance(from_val, RemovedKey):\n del to_dict[key]\n continue\n elif isinstance(from_val, (AddedKey, ChangedKey)):\n from_val = from_val.newVal\n to_val = to_dict[key]\n if hasattr(from_val, 'items') and (hasattr(to_val, 'items') or \n allowDictToListMerging and isinstance(to_val, list)):\n mergeCascadingDicts(from_val, to_val, allowDictToListMerging)\n else:\n to_dict[key] = from_val\n else:\n if isinstance(from_val, RemovedKey):\n continue\n if listMerge and allowNewListMembers and key >= len(to_dict):\n to_dict.extend((None,) * (key + 1 - len(to_dict)))\n to_dict[key] = from_val\n\n\ndef setCascadingDictItem(dict, keys, value):\n currentDict = dict\n for key in keys[:-1]:\n if key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n currentDict[keys[-1]] = value\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\ndef deepPatch(input, predicate, changer):\n \"\"\"Recursively traverses the items stored in input (for basic data types:\n lists, tuples, sets, and dicts), calling changer on all items for which\n predicate returns true, and then replacing the original item with the\n changed item.\n\n Changes will be made in place when possible. The patched input (which may\n be a new object, or the original object, if altered in place) is returned.\n \"\"\"\n return deepPatchAltered(input, predicate, changer)[0]\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\n<function token>\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\ndef getImportableObject(importableName):\n import importlib\n if '.' in importableName:\n modulename, objName = importableName.rsplit('.', 1)\n else:\n modulename = 'builtins'\n if PY2:\n modulename = '__builtin__'\n objName = importableName\n moduleobj = importlib.import_module(modulename)\n return getattr(moduleobj, objName)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef isMapping(obj):\n \"\"\"\n Returns True if an object is a mapping (dictionary) type, otherwise returns False.\n\n same as `operator.isMappingType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Mapping)\n\n\n<assignment token>\n\n\ndef convertListArgs(args):\n if len(args) == 1 and isIterable(args[0]):\n return tuple(args[0])\n return args\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n result.append(arg)\n return tuple(result)\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef pairIter(sequence):\n \"\"\"\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n \"\"\"\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\n<function token>\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n\n\ndef mergeCascadingDicts(from_dict, to_dict, allowDictToListMerging=False,\n allowNewListMembers=False):\n \"\"\"\n recursively update to_dict with values from from_dict.\n\n if any entries in 'from_dict' are instances of the class RemovedKey,\n then the key containing that value will be removed from to_dict\n\n if allowDictToListMerging is True, then if to_dict contains a list,\n from_dict can contain a dictionary with int keys which can be used to\n sparsely update the list.\n\n if allowNewListMembers is True, and allowDictToListMerging is also True,\n then if merging an index into a list that currently isn't long enough to\n contain that index, then the list will be extended to be long enough (with\n None inserted in any intermediate indices)\n\n Note: if using RemovedKey objects and allowDictToList merging, then only\n indices greater than all of any indices updated / added should be removed,\n because the order in which items are updated / removed is indeterminate.\n \"\"\"\n listMerge = allowDictToListMerging and isinstance(to_dict, list)\n if listMerge:\n contains = lambda key: isinstance(key, int) and 0 <= key < len(to_dict)\n else:\n contains = lambda key: key in to_dict\n for key, from_val in from_dict.items():\n if contains(key):\n if isinstance(from_val, RemovedKey):\n del to_dict[key]\n continue\n elif isinstance(from_val, (AddedKey, ChangedKey)):\n from_val = from_val.newVal\n to_val = to_dict[key]\n if hasattr(from_val, 'items') and (hasattr(to_val, 'items') or \n allowDictToListMerging and isinstance(to_val, list)):\n mergeCascadingDicts(from_val, to_val, allowDictToListMerging)\n else:\n to_dict[key] = from_val\n else:\n if isinstance(from_val, RemovedKey):\n continue\n if listMerge and allowNewListMembers and key >= len(to_dict):\n to_dict.extend((None,) * (key + 1 - len(to_dict)))\n to_dict[key] = from_val\n\n\ndef setCascadingDictItem(dict, keys, value):\n currentDict = dict\n for key in keys[:-1]:\n if key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n currentDict[keys[-1]] = value\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\ndef deepPatch(input, predicate, changer):\n \"\"\"Recursively traverses the items stored in input (for basic data types:\n lists, tuples, sets, and dicts), calling changer on all items for which\n predicate returns true, and then replacing the original item with the\n changed item.\n\n Changes will be made in place when possible. The patched input (which may\n be a new object, or the original object, if altered in place) is returned.\n \"\"\"\n return deepPatchAltered(input, predicate, changer)[0]\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\n<function token>\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\ndef getImportableObject(importableName):\n import importlib\n if '.' in importableName:\n modulename, objName = importableName.rsplit('.', 1)\n else:\n modulename = 'builtins'\n if PY2:\n modulename = '__builtin__'\n objName = importableName\n moduleobj = importlib.import_module(modulename)\n return getattr(moduleobj, objName)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef isMapping(obj):\n \"\"\"\n Returns True if an object is a mapping (dictionary) type, otherwise returns False.\n\n same as `operator.isMappingType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Mapping)\n\n\n<assignment token>\n\n\ndef convertListArgs(args):\n if len(args) == 1 and isIterable(args[0]):\n return tuple(args[0])\n return args\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n result.append(arg)\n return tuple(result)\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef pairIter(sequence):\n \"\"\"\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n \"\"\"\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\n<function token>\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n\n\ndef mergeCascadingDicts(from_dict, to_dict, allowDictToListMerging=False,\n allowNewListMembers=False):\n \"\"\"\n recursively update to_dict with values from from_dict.\n\n if any entries in 'from_dict' are instances of the class RemovedKey,\n then the key containing that value will be removed from to_dict\n\n if allowDictToListMerging is True, then if to_dict contains a list,\n from_dict can contain a dictionary with int keys which can be used to\n sparsely update the list.\n\n if allowNewListMembers is True, and allowDictToListMerging is also True,\n then if merging an index into a list that currently isn't long enough to\n contain that index, then the list will be extended to be long enough (with\n None inserted in any intermediate indices)\n\n Note: if using RemovedKey objects and allowDictToList merging, then only\n indices greater than all of any indices updated / added should be removed,\n because the order in which items are updated / removed is indeterminate.\n \"\"\"\n listMerge = allowDictToListMerging and isinstance(to_dict, list)\n if listMerge:\n contains = lambda key: isinstance(key, int) and 0 <= key < len(to_dict)\n else:\n contains = lambda key: key in to_dict\n for key, from_val in from_dict.items():\n if contains(key):\n if isinstance(from_val, RemovedKey):\n del to_dict[key]\n continue\n elif isinstance(from_val, (AddedKey, ChangedKey)):\n from_val = from_val.newVal\n to_val = to_dict[key]\n if hasattr(from_val, 'items') and (hasattr(to_val, 'items') or \n allowDictToListMerging and isinstance(to_val, list)):\n mergeCascadingDicts(from_val, to_val, allowDictToListMerging)\n else:\n to_dict[key] = from_val\n else:\n if isinstance(from_val, RemovedKey):\n continue\n if listMerge and allowNewListMembers and key >= len(to_dict):\n to_dict.extend((None,) * (key + 1 - len(to_dict)))\n to_dict[key] = from_val\n\n\n<function token>\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\ndef deepPatch(input, predicate, changer):\n \"\"\"Recursively traverses the items stored in input (for basic data types:\n lists, tuples, sets, and dicts), calling changer on all items for which\n predicate returns true, and then replacing the original item with the\n changed item.\n\n Changes will be made in place when possible. The patched input (which may\n be a new object, or the original object, if altered in place) is returned.\n \"\"\"\n return deepPatchAltered(input, predicate, changer)[0]\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\n<function token>\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\ndef getImportableObject(importableName):\n import importlib\n if '.' in importableName:\n modulename, objName = importableName.rsplit('.', 1)\n else:\n modulename = 'builtins'\n if PY2:\n modulename = '__builtin__'\n objName = importableName\n moduleobj = importlib.import_module(modulename)\n return getattr(moduleobj, objName)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef isMapping(obj):\n \"\"\"\n Returns True if an object is a mapping (dictionary) type, otherwise returns False.\n\n same as `operator.isMappingType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Mapping)\n\n\n<assignment token>\n\n\ndef convertListArgs(args):\n if len(args) == 1 and isIterable(args[0]):\n return tuple(args[0])\n return args\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n result.append(arg)\n return tuple(result)\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef pairIter(sequence):\n \"\"\"\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n \"\"\"\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\n<function token>\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\ndef deepPatch(input, predicate, changer):\n \"\"\"Recursively traverses the items stored in input (for basic data types:\n lists, tuples, sets, and dicts), calling changer on all items for which\n predicate returns true, and then replacing the original item with the\n changed item.\n\n Changes will be made in place when possible. The patched input (which may\n be a new object, or the original object, if altered in place) is returned.\n \"\"\"\n return deepPatchAltered(input, predicate, changer)[0]\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\n<function token>\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\ndef getImportableObject(importableName):\n import importlib\n if '.' in importableName:\n modulename, objName = importableName.rsplit('.', 1)\n else:\n modulename = 'builtins'\n if PY2:\n modulename = '__builtin__'\n objName = importableName\n moduleobj = importlib.import_module(modulename)\n return getattr(moduleobj, objName)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef isMapping(obj):\n \"\"\"\n Returns True if an object is a mapping (dictionary) type, otherwise returns False.\n\n same as `operator.isMappingType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Mapping)\n\n\n<assignment token>\n\n\ndef convertListArgs(args):\n if len(args) == 1 and isIterable(args[0]):\n return tuple(args[0])\n return args\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n result.append(arg)\n return tuple(result)\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef pairIter(sequence):\n \"\"\"\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n \"\"\"\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\n<function token>\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\n<function token>\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\n<function token>\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\ndef getImportableObject(importableName):\n import importlib\n if '.' in importableName:\n modulename, objName = importableName.rsplit('.', 1)\n else:\n modulename = 'builtins'\n if PY2:\n modulename = '__builtin__'\n objName = importableName\n moduleobj = importlib.import_module(modulename)\n return getattr(moduleobj, objName)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef isMapping(obj):\n \"\"\"\n Returns True if an object is a mapping (dictionary) type, otherwise returns False.\n\n same as `operator.isMappingType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Mapping)\n\n\n<assignment token>\n\n\ndef convertListArgs(args):\n if len(args) == 1 and isIterable(args[0]):\n return tuple(args[0])\n return args\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n result.append(arg)\n return tuple(result)\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef pairIter(sequence):\n \"\"\"\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n \"\"\"\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\n<function token>\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\n<function token>\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\n<function token>\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef isMapping(obj):\n \"\"\"\n Returns True if an object is a mapping (dictionary) type, otherwise returns False.\n\n same as `operator.isMappingType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Mapping)\n\n\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n \"\"\" returns a list of a breadth first expansion of args \"\"\"\n deq = _deque((x, 0) for x in args)\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n result.append(arg)\n return tuple(result)\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef pairIter(sequence):\n \"\"\"\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n \"\"\"\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\n<function token>\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\n<function token>\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\n<function token>\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef isMapping(obj):\n \"\"\"\n Returns True if an object is a mapping (dictionary) type, otherwise returns False.\n\n same as `operator.isMappingType`\n\n Returns\n -------\n bool\n \"\"\"\n return isinstance(obj, Mapping)\n\n\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef pairIter(sequence):\n \"\"\"\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n \"\"\"\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\n<function token>\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\n<function token>\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\n<function token>\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\n<function token>\n<function token>\n\n\ndef pairIter(sequence):\n \"\"\"\n Returns an iterator over every 2 items of sequence.\n\n ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]\n\n If sequence has an odd number of items, the last item will not be returned\n in a pair.\n \"\"\"\n theIter = iter(sequence)\n return zip(theIter, theIter)\n\n\n<function token>\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\n<function token>\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\n<function token>\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\n<function token>\n\n\ndef deepPatchAltered(input, predicate, changer):\n \"\"\"Like deepPatch, but returns a pair, (alteredInput, wasAltered)\"\"\"\n anyAltered = False\n if isinstance(input, dict):\n alteredKeys = {}\n for key, val in list(input.items()):\n newVal, altered = deepPatchAltered(val, predicate, changer)\n if altered:\n anyAltered = True\n input[key] = newVal\n newKey, altered = deepPatchAltered(key, predicate, changer)\n if altered:\n anyAltered = True\n alteredKeys[newKey] = input.pop(key)\n input.update(alteredKeys)\n elif isinstance(input, list):\n for i, item in enumerate(input):\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n input[i] = newItem\n elif isinstance(input, tuple):\n asList = list(input)\n newList, altered = deepPatchAltered(asList, predicate, changer)\n if altered:\n anyAltered = True\n input = tuple(newList)\n elif isinstance(input, set):\n toRemove = set()\n toAdd = set()\n for item in input:\n newItem, altered = deepPatchAltered(item, predicate, changer)\n if altered:\n anyAltered = True\n toRemove.add(item)\n toAdd.add(newItem)\n input.difference_update(toRemove)\n input.update(toAdd)\n if predicate(input):\n anyAltered = True\n input = changer(input)\n return input, anyAltered\n\n\n<function token>\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n \"\"\" iterator doing a postorder expansion of args \"\"\"\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef getCascadingDictItem(dict, keys, default={}):\n currentDict = dict\n for key in keys[:-1]:\n if isMapping(currentDict) and key not in currentDict:\n currentDict[key] = {}\n currentDict = currentDict[key]\n try:\n return currentDict[keys[-1]]\n except KeyError:\n return default\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef izip_longest(*args, **kwds):\n fillvalue = kwds.get('fillvalue')\n\n def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):\n yield counter()\n fillers = itertools.repeat(fillvalue)\n iters = [itertools.chain(it, sentinel(), fillers) for it in args]\n try:\n for tup in zip(*iters):\n yield tup\n except IndexError:\n pass\n\n\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef iterateArgs(*args, **kwargs):\n \"\"\" Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its\n elements that will be inserted at its place in the returned arguments.\n\n By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.\n\n :Keywords:\n depth : int\n will specify the nested depth limit after which iterables are returned as they are\n\n type\n for type='list' will only expand lists, by default type='all' expands any iterable sequence\n\n postorder : bool\n will return elements depth first, from leaves to roots\n\n breadth : bool\n will return elements breadth first, roots, then first depth level, etc.\n\n For a nested list represent trees::\n\n a____b____c\n | |____d\n e____f\n |____g\n\n preorder(default) :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))\n ('a', 'b', ['c', 'd'], 'e', 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))\n ('a', 'b', 'c', 'd', 'e', 'f', 'g')\n\n postorder :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))\n ('b', ['c', 'd'], 'a', 'f', 'g', 'e')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))\n ('c', 'd', 'b', 'a', 'f', 'g', 'e')\n\n breadth :\n\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))\n ('a', 'e', 'b', ['c', 'd'], 'f', 'g')\n >>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))\n ('a', 'e', 'b', 'f', 'g', 'c', 'd')\n\n Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree\n result will be the equivalent of using a preorder iterator : iter(theTree) \"\"\"\n tpe = kwargs.get('type', 'all')\n limit = kwargs.get('limit', sys.getrecursionlimit())\n postorder = kwargs.get('postorder', False)\n breadth = kwargs.get('breadth', False)\n if tpe == 'list' or tpe == list:\n\n def _iterateArgsTest(arg):\n return type(arg) == list\n elif tpe == 'all':\n\n def _iterateArgsTest(arg):\n return isIterable(arg)\n else:\n raise ValueError('unknown expand type=%s' % tpe)\n if postorder:\n for arg in postorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n elif breadth:\n for arg in breadthIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n else:\n for arg in preorderIterArgs(limit, _iterateArgsTest, *args):\n yield arg\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n\n def __eq__(self, other):\n return self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n <function token>\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.oldVal)\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n <function token>\n\n def __ne__(self, other):\n return not self.oldVal == other.oldVal\n\n def __hash__(self):\n return hash(self.oldVal)\n <function token>\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass RemovedKey(object):\n\n def __init__(self, oldVal):\n self.oldVal = oldVal\n <function token>\n <function token>\n\n def __hash__(self):\n return hash(self.oldVal)\n <function token>\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass RemovedKey(object):\n <function token>\n <function token>\n <function token>\n\n def __hash__(self):\n return hash(self.oldVal)\n <function token>\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass RemovedKey(object):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<class token>\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n\n def __ne__(self, other):\n return not self.newVal == other.newVal\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<class token>\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n <function token>\n\n def __hash__(self):\n return hash(self.newVal)\n\n def __repr__(self):\n return '%s(%r)' % (type(self).__name__, self.newVal)\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<class token>\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n <function token>\n\n def __hash__(self):\n return hash(self.newVal)\n <function token>\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<class token>\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal\n <function token>\n <function token>\n <function token>\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<class token>\n\n\nclass AddedKey(object):\n\n def __init__(self, newVal):\n self.newVal = newVal\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<class token>\n\n\nclass AddedKey(object):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n <function token>\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n\n def __repr__(self):\n return '%s(%r, %r)' % (type(self).__name__, self.oldVal, self.newVal)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n\n def __eq__(self, other):\n return self.newVal == other.newVal and self.oldVal == other.oldVal\n <function token>\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass ChangedKey(object):\n\n def __init__(self, oldVal, newVal):\n self.oldVal = oldVal\n self.newVal = newVal\n <function token>\n <function token>\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass ChangedKey(object):\n <function token>\n <function token>\n <function token>\n\n def __hash__(self):\n return hash((self.oldVal, self.newVal))\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass ChangedKey(object):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,698 |
f856b2f6ca49bfea465d73d1c45b11f52b2d4a8f
|
def isBeautifulString(inputString):
l = [0] * 26
for i in inputString:
l[ord(i) - ord('a')] += 1
j = 0
for i in range(26):
if l[i] == 0:
j = i
else:
break
c = l[j]
while j < 25 and l[j + 1] > 0:
if l[j] < l[j + 1]:
return False
j += 1
return c == len(inputString)
print(isBeautifulString("aabbb"))
|
[
"def isBeautifulString(inputString):\n l = [0] * 26\n for i in inputString:\n l[ord(i) - ord('a')] += 1\n\n j = 0\n for i in range(26):\n if l[i] == 0:\n j = i\n else:\n break\n c = l[j]\n while j < 25 and l[j + 1] > 0:\n if l[j] < l[j + 1]:\n return False\n\n j += 1\n\n\n return c == len(inputString)\n\nprint(isBeautifulString(\"aabbb\"))\n",
"def isBeautifulString(inputString):\n l = [0] * 26\n for i in inputString:\n l[ord(i) - ord('a')] += 1\n j = 0\n for i in range(26):\n if l[i] == 0:\n j = i\n else:\n break\n c = l[j]\n while j < 25 and l[j + 1] > 0:\n if l[j] < l[j + 1]:\n return False\n j += 1\n return c == len(inputString)\n\n\nprint(isBeautifulString('aabbb'))\n",
"def isBeautifulString(inputString):\n l = [0] * 26\n for i in inputString:\n l[ord(i) - ord('a')] += 1\n j = 0\n for i in range(26):\n if l[i] == 0:\n j = i\n else:\n break\n c = l[j]\n while j < 25 and l[j + 1] > 0:\n if l[j] < l[j + 1]:\n return False\n j += 1\n return c == len(inputString)\n\n\n<code token>\n",
"<function token>\n<code token>\n"
] | false |
99,699 |
894fd35096dec5631e54881a509fe1ae018fb0fd
|
#import requests
#import codecs
# #抓網頁,
# header_data = {
# "User-Agent":"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
# "Accept-Language":"zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7",
# "Cookie":"PHPSESSID=82r6aonhv5nlnn3kbadupjb675",
# "Referer":"http://teaching.bo-yuan.net/"}
# #用post丟訊息給網頁
# u1 = requests.post(
# "http://teaching.bo-yuan.net/",
# headers = header_data,
# params = {"uid":"5e9113485c1b6"},
# data = {"ex[class]":"5e81f839475de", "ex[username]":"17楊子奇", "ex[password]":"d13dc5"})
#
# # cookie 和uid會每次登入都變更,需要再次確認. 先用post丟資訊出去,再用get拿取登入後的畫面
# #用get拿取網頁資訊出現亂碼表示需要更換編碼
# u2 = requests.get(
# "http://teaching.bo-yuan.net/",
# headers = header_data)
##會遇到亂碼的時候表示編碼需要更改,中文編碼:Big5 or utf8兩種
# u2.encoding="UTF-8"
# if u2.text.find("ac1=member") != -1: #登入進去找到只有登入前沒有,登入後才有的頁面找到他的參數放進來確認
# print("登入成功")
# else:
# print("登入失敗")
#用python try 登入密碼的方式
# header_data = {
# "User-Agent":"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
# "Accept-Language":"zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7",
# "Cookie":"PHPSESSID=82r6aonhv5nlnn3kbadupjb675",
# "Referer":"http://teaching.bo-yuan.net/"}
#
# for psw in range(0, 9999,1):
# u1 = requests.post(
# "http://teaching.bo-yuan.net/",
# headers = header_data,
# params = {"uid":"5e9113485c1b6"},
# data = {"ex[class]":"5e81f839475de", "ex[username]":"99測試", "ex[password]": psw})
#
# u2 = requests.get(
# "http://teaching.bo-yuan.net/",
# headers = header_data)
#
# u2.encoding="UTF-8"
# if u2.text.find("ac1=member") != -1:
# print("登入成功, 密碼是:", psw)
# break
# else:
# print("登入失敗")
# #把csv的檔案讀取成list
# import csv
# r = list(csv.reader(codecs.open("自來水水質抽驗結果(106年1月).csv", "r", "utf8")))
# for i in r:
# print(i)
# import csv, io
# u3 = requests.get("https://tppkl.blob.core.windows.net/blobfs/TaipeiTree.csv",
# headers = {
# "User-Agent":"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36",
# "Accept-Language": "zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7"
# })
# print(u3.text)
# r = list(csv.reader(io.StringIO(u3.text)))
# for i in r:
# print(r)
#json資料轉換
# import json,requests
# u4 = requests.get("https://data.taipei/api/getDatasetInfo/downloadResource",
# headers = {
# "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36",
# "Accept-Language": "zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7"
# },
#
# params = {
# "id":"ac589468-529b-4636-a9b2-ab57ae41cbcb",
# "rid":"24c9f8fe-88db-4a6e-895c-498fbc94df94"
# })
# # json.loads是把json檔案開啟成為list檔案, list理面是字典
# d = json.loads(u4.text)
# for i in d:
# print(i["o_tlc_agency_name"], i["o_tlc_agency_category"], i["o_tlc_agency_categorychild"])
# #把網站上的API 位址貼上,並換上參數,limit在網頁上可以直接操作,也可加入參數中調整,或調整成根據輸入顯是結果
# import json,requests
# u5 = requests.get("https://data.taipei/opendata/datalist/apiAccess",
# headers = {
# "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36",
# "Accept-Language": "zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7"
# },
#
# params = {
# "scope":"resourceAquire",
# "rid":"24c9f8fe-88db-4a6e-895c-498fbc94df94",
# "limit": input("請輸入要查看幾筆資料:")
# })
# # json.loads是把json檔案開啟成為list檔案, list理面是字典
# d = json.loads(u5.text)
# print("========================")
# for i in d["result"]["results"]:
# print(i["o_tlc_agency_categorychild"], i["o_tlc_agency_address"])
# #testbymyself csv
# import csv, requests, io
# u6 = requests.get("https://data.taipei/api/getDatasetInfo/downloadResource",
# params= {
# "id":"262e80cf-579c-4bfb-ba73-31621bc84616",
# "rid":"f8cd1bf4-55db-4566-a2d6-f53a8cf446bb"
# })
# r = list(csv.reader(io.StringIO(u6.text)))
# for i in r:
# print(i[1], i[2], i[3])
#json
# import requests, json, prettytable
# u7 = requests.get(" https://data.taipei/api/getDatasetInfo/downloadResource",
# params={
# "id":"262e80cf-579c-4bfb-ba73-31621bc84616",
# "rid":"6af9f68b-b9e8-4ce4-ba29-c1038c556cd8"
# })
# r = json.loads(u7.text)
# t = prettytable.PrettyTable(["燈號", "地區", "燈種", "距離"]) #把資料裝入prettytable裏面
# for i in r:
# t.add_row([i["LIGHTID"], i["Dist"], i["LightKind1"], i["LightWatt1"]])
# print(t)
# #中國信託網頁,資料並非在網址裏面, 會由動態載入方式處理
# import requests
#
# u8 = requests.post(
# "https://www.ctbcbank.com/IB/api/adapters/IB_Adapter/resource/preLogin",
# headers = {
# "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36",
# "Accept-Language": "zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7",
# "Connection": "keep-alive",
# "Content-Length": "495",
# "Content-Type": "application/json",
# "Cookie": "CTBC_ctbcbank_dap=!/Ek7q9Bq+97IUF9b2FCKhpyPkKrRw+WKiNysKyyh9KQIIidQzzdNUE79LTwi1H1eb9V6PyT7AQu97Q==; pzqXflQLz5wTSg5443S=PWZicSlpBae6xVzN30P9tR9Rf07A_jz0ZyuCYAwGHCtJgEpR4fPpmpmhgs_udtbq; BIGipServer~EBMW~POOL_EBMW_WEB=!4tEBp+EzpIXGd7db2FCKhpyPkKrRw0emjjaKd/vntI5G0+gr3z/mRSqIlx37vFvksgPEu6YIhh9v/w==; TS01a2d410=01c2a9c2b982fd4bff8a68c708615a34323fd4df21b7a813340b659a3065265e03c45c29c5ac7fda2708e99f687fb89028e17a46bc; ngsessionid=1586583582641; v1st=E6F52D56D95FF2C2; BIGipServer~EBMW~POOL_EBMW_TAG=!ZJJMQAxsfMnor+hb2FCKhpyPkKrRw6ReBuwhmrqmHQdI/tmRW700k4+/4dCFk5LHpuzCnjwMdNW0AQ==; TS0152eaef=01c2a9c2b982fd4bff8a68c708615a34323fd4df21b7a813340b659a3065265e03c45c29c5ac7fda2708e99f687fb89028e17a46bc; s_fid=74BA97DEFE52C0AA-20D658B380139084; s_cc=true; JSESSIONID=0000pVGyIuMGsbJ5c1KX0vq2F8d:-1; EBMWSID=FBdnwDClYYTT5zhRWusNMHwhdYHaqT4HK1J7XBU92xmynLt47OCE!-864382513; BIGipServer~EBMW~POOL_EBMWIB_AP=!aTFrjez9QJJE5rNb2FCKhpyPkKrRw2IfAnvwXVUHoXir90Wym87LasTUWDZsqoMkCi9pv7PeDnwrnLo=; _ga=GA1.2.478375549.1586583592; _gid=GA1.2.527796260.1586583592; _gat_UA-136408356-1=1; _gat_UA-136403503-1=1; _gat_UA-148818971-1=1; _gat_UA-135916138-1=1; _gat_UA-135916138-2=1; pzqXflQLz5wTSg5443T=1NuKrkgrrMeBpUE2EOo7e84FAr_28dQ41LDN2hbM2Nc.JHimgbO1IH0uNWq9Agg0slWStN8p6HRMCNTWAxU6FlRsdNuYTPYWmZxZ0xnJcCvTjHxnddFbZrGlC8SeRK8DwgSGLNJbWCuexmSM6lY.ouh1yJnYMyyb3L5HMMk31ptj9jdgJ1PsPDbiJU5NuphOkWDBR8ppFNHUiBTRPOggF03.QVUE91i2e0BIULQxz0vhxJd_JAscu3QUoa8Rfmh2_FbcNX.GdI1m34wKzHEKsQjIRTRQkexVrw9EseyCvhJW6CA; BIGipServer~RTDS~POOL_RTDS_9080=!dyvkNbgNymWm0tWnphmIOaVF7qu+1OLDrGM2G0HEL8jogNWdapthYTzcWDOCOibvz7CjeqIHZvbdxA==",
# "Host": "www.ctbcbank.com",
# "Origin": "https://www.ctbcbank.com",
# "Referer": "https://www.ctbcbank.com/twrbo/zh_tw/index/h_locate_index/h_locate_inquiry.html",
# "Sec-Fetch-Dest": "empty",
# "Sec-Fetch-Mode": "cors",
# "Sec-Fetch-Site": "same-origin",
# "x-auth-token": "2f6dafdc-154a-4108-b23a-9c3b6374514a",
# "X-Channel-Id": "EBMW_WEB_O",
# "X-Requested-With": "MFPAsync"
# },#params放的是網址上面問號以後的參數,美個參數用&分隔
# params = {
# "IIhfvu":"1qxEbjcIbnoSC5n4BtenF6rl6Lq4yKb2TrZ6cPQmcqgTjb0MIHRWgbizi2__6pcj0zCBzq9HAbOmpqH16GzNezOJ3zskQVFNRx0Zq3wro3gsMl9mIpBJsuzhb0EKr_rKM.8CEVOvy3E1IYViF8Aix5RzouYHAkKTI6HPqDp60U5aBxiNWN8wMS8.tdyiG4sbSvC7YFh5cTwGQXGXRW7n_XXaIdCwGA7HQgK0qRkk3uJJItmC1H5h_smEpGlKF9JJuNjAZg7xddyzQOqhbTV4W0bs92Pu6mDtIIxjrzt7STbimpxc6v2cDW3dzWQaeYobYsp5w1FwJYw5Jwx.DJX2brCfmnH_J3w6bCzWqKExzfeNj9V5NSVxoUUAk57cSEbwTPCFSLEHib5u_GzfFFSWNng7nbwko27JGSiW"
# },#從chrome裡面的Response headers找
# data = {
# "deviceIxd": "none",
# "trackingIxd": "d6b0f2c2-4afc-4d1a-8d18-b172728637bf",
# "txnIxd": "76f6126a-76c4-4a1a-9f74-ed0128fe95bd",
# "model": "chrome",
# "platform": "windows7",
# "ersion": "6.1",
# "runtime": "chrome",
# "runtimeVer": 80,
# "network": "unknown",
# "appVer": "5.01.18",
# "clientNo": "1586583845836",
# "clientTime": 1586583850081,
# "token": "7a85de18-bae5-4bf5-bc60-03eb8eb16d6b",
# "locale": "zh_TW",
# "fromSys": "1",
# "seed": "20200411133940891000389520621406",
# "deviceToken": "none",
# "resource": "/twrbo-general/qu002/010",
# "rqData": {}
# }
# )
# print(u8.status_code) #顯示 400 表示伺服器不理會我的問題
# #華南銀行
# import requests, codecs
#
# u9 = requests.get(
# "https://www.hncb.com.tw/hncb/XML/Taiwan.xml",
# headers = {
# "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36}",
# "Sec-Fetch-Dest": "empty",
# "Referer": "https://www.hncb.com.tw/wps/portal/HNCB/branches"}
#
# )
# print(u9.status_code) #確定可以存取
# with codecs.open("2020.04.11.xml", "w", "UTF-8") as f:
# f.write(u9.text)
# #beautifulsoup函式解析讀取出來是網頁資訊
# from bs4 import BeautifulSoup
# import requests, codecs
# u10 = requests.get(
# "https://www.taiwan.net.tw/m1.aspx",
# headers = {
# "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36",
# "Accept-Language": "zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7",
# "Cookie": "ASP.NET_SessionId=pxqsxdgpvft3zelzrdxxxcsa; TwSessID=; _ga=GA1.3.1470569981.1586590876; _gid=GA1.3.1340334767.1586590876; _gat_gtag_UA_5278761_9=1",
# },
# params= {
# "sNo":"0001001"
# })
# b1 = BeautifulSoup(u10.text, "html.parser")
# # a1 = b1.find_all("a", {"class":"columnBlock-title"}) #抓標題
# # for i in a1:
# # print(i.text)
# # a2 = b1.find_all("span", {"class":"date"})
# # for i in a2:
# # print(i.text)
#
# #把每一頁都存檔
# fn =1
# a3 = b1.find_all("div", {"class":"columnBlock-info"})
# for i in a3:
# title = i.find("a", {"class":"columnBlock-title"})
# date = i.find("span", {"class":"date"})
# if title.attrs["href"].find("m1.aspx") != -1:
# r2 = requests.get(
# "https://www.taiwan.net.tw/" + title.attrs["href"],
# headers={
# "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36",
# "Accept-Language": "zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7",
# "Cookie": "ASP.NET_SessionId=pxqsxdgpvft3zelzrdxxxcsa; TwSessID=; _ga=GA1.3.1470569981.1586590876; _gid=GA1.3.1340334767.1586590876",
# }
# )
# b2 = BeautifulSoup(r2.text, "html.parser")
# with codecs.open("html/"+ str(f(n))+ ".txt", "w", "UTF-8") as f:
# f.write(title.text +"\r\n")
# f.write(date.text + "\r\n\r\n")
# f.write(b2.find("div", {"class":"content"}).find("p").text)
# fn += 1
#104網頁查詢
import requests, prettytable
from bs4 import BeautifulSoup
keyword = input("請輸入要搜尋的關鍵字:")
t = prettytable.PrettyTable(["公司名稱", "職缺名稱"], encoding="UTF-8")
for page in range(1, 3, 1):
u11 = requests.get(
"https://www.104.com.tw/jobs/search/",
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36",
"Accept-Language": "zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7"
},
params= {
"keyword":keyword,
"order":1,
"jobsource":"2018indexpoc",
"ro": 0,
"asc":0,
"page":page,
"mode":"s"
})
b1 = BeautifulSoup(u11.text, "html.parser")
a1 = b1.find_all("article", {"class":"job-list-item"})
for i in a1:
t.add_row([i.attrs["data-cust-name"], i.attrs["data-job-name"]])
print(t)
|
[
"#import requests\n#import codecs\n\n# #抓網頁,\n# header_data = {\n# \"User-Agent\":\"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n# \"Accept-Language\":\"zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7\",\n# \"Cookie\":\"PHPSESSID=82r6aonhv5nlnn3kbadupjb675\",\n# \"Referer\":\"http://teaching.bo-yuan.net/\"}\n# #用post丟訊息給網頁\n# u1 = requests.post(\n# \"http://teaching.bo-yuan.net/\",\n# headers = header_data,\n# params = {\"uid\":\"5e9113485c1b6\"},\n# data = {\"ex[class]\":\"5e81f839475de\", \"ex[username]\":\"17楊子奇\", \"ex[password]\":\"d13dc5\"})\n#\n# # cookie 和uid會每次登入都變更,需要再次確認. 先用post丟資訊出去,再用get拿取登入後的畫面\n# #用get拿取網頁資訊出現亂碼表示需要更換編碼\n# u2 = requests.get(\n# \"http://teaching.bo-yuan.net/\",\n# headers = header_data)\n##會遇到亂碼的時候表示編碼需要更改,中文編碼:Big5 or utf8兩種\n# u2.encoding=\"UTF-8\" \n# if u2.text.find(\"ac1=member\") != -1: #登入進去找到只有登入前沒有,登入後才有的頁面找到他的參數放進來確認\n# print(\"登入成功\")\n# else:\n# print(\"登入失敗\")\n\n#用python try 登入密碼的方式\n\n# header_data = {\n# \"User-Agent\":\"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n# \"Accept-Language\":\"zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7\",\n# \"Cookie\":\"PHPSESSID=82r6aonhv5nlnn3kbadupjb675\",\n# \"Referer\":\"http://teaching.bo-yuan.net/\"}\n#\n# for psw in range(0, 9999,1):\n# u1 = requests.post(\n# \"http://teaching.bo-yuan.net/\",\n# headers = header_data,\n# params = {\"uid\":\"5e9113485c1b6\"},\n# data = {\"ex[class]\":\"5e81f839475de\", \"ex[username]\":\"99測試\", \"ex[password]\": psw})\n#\n# u2 = requests.get(\n# \"http://teaching.bo-yuan.net/\",\n# headers = header_data)\n#\n# u2.encoding=\"UTF-8\"\n# if u2.text.find(\"ac1=member\") != -1:\n# print(\"登入成功, 密碼是:\", psw)\n# break\n# else:\n# print(\"登入失敗\")\n\n# #把csv的檔案讀取成list\n# import csv\n# r = list(csv.reader(codecs.open(\"自來水水質抽驗結果(106年1月).csv\", \"r\", \"utf8\")))\n# for i in r:\n# print(i)\n# import csv, io\n# u3 = requests.get(\"https://tppkl.blob.core.windows.net/blobfs/TaipeiTree.csv\",\n# headers = {\n# \"User-Agent\":\"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36\",\n# \"Accept-Language\": \"zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7\"\n# })\n# print(u3.text)\n# r = list(csv.reader(io.StringIO(u3.text)))\n# for i in r:\n# print(r)\n\n#json資料轉換\n# import json,requests\n# u4 = requests.get(\"https://data.taipei/api/getDatasetInfo/downloadResource\",\n# headers = {\n# \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36\",\n# \"Accept-Language\": \"zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7\"\n# },\n#\n# params = {\n# \"id\":\"ac589468-529b-4636-a9b2-ab57ae41cbcb\",\n# \"rid\":\"24c9f8fe-88db-4a6e-895c-498fbc94df94\"\n# })\n# # json.loads是把json檔案開啟成為list檔案, list理面是字典\n# d = json.loads(u4.text)\n# for i in d:\n# print(i[\"o_tlc_agency_name\"], i[\"o_tlc_agency_category\"], i[\"o_tlc_agency_categorychild\"])\n\n# #把網站上的API 位址貼上,並換上參數,limit在網頁上可以直接操作,也可加入參數中調整,或調整成根據輸入顯是結果\n# import json,requests\n# u5 = requests.get(\"https://data.taipei/opendata/datalist/apiAccess\",\n# headers = {\n# \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36\",\n# \"Accept-Language\": \"zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7\"\n# },\n#\n# params = {\n# \"scope\":\"resourceAquire\",\n# \"rid\":\"24c9f8fe-88db-4a6e-895c-498fbc94df94\",\n# \"limit\": input(\"請輸入要查看幾筆資料:\")\n# })\n# # json.loads是把json檔案開啟成為list檔案, list理面是字典\n# d = json.loads(u5.text)\n# print(\"========================\")\n# for i in d[\"result\"][\"results\"]:\n# print(i[\"o_tlc_agency_categorychild\"], i[\"o_tlc_agency_address\"])\n\n# #testbymyself csv\n# import csv, requests, io\n# u6 = requests.get(\"https://data.taipei/api/getDatasetInfo/downloadResource\",\n# params= {\n# \"id\":\"262e80cf-579c-4bfb-ba73-31621bc84616\",\n# \"rid\":\"f8cd1bf4-55db-4566-a2d6-f53a8cf446bb\"\n# })\n# r = list(csv.reader(io.StringIO(u6.text)))\n# for i in r:\n# print(i[1], i[2], i[3])\n\n#json\n# import requests, json, prettytable\n# u7 = requests.get(\"\thttps://data.taipei/api/getDatasetInfo/downloadResource\",\n# params={\n# \"id\":\"262e80cf-579c-4bfb-ba73-31621bc84616\",\n# \"rid\":\"6af9f68b-b9e8-4ce4-ba29-c1038c556cd8\"\n# })\n# r = json.loads(u7.text)\n# t = prettytable.PrettyTable([\"燈號\", \"地區\", \"燈種\", \"距離\"]) #把資料裝入prettytable裏面\n# for i in r:\n# t.add_row([i[\"LIGHTID\"], i[\"Dist\"], i[\"LightKind1\"], i[\"LightWatt1\"]])\n# print(t)\n\n# #中國信託網頁,資料並非在網址裏面, 會由動態載入方式處理\n# import requests\n#\n# u8 = requests.post(\n# \"https://www.ctbcbank.com/IB/api/adapters/IB_Adapter/resource/preLogin\",\n# headers = {\n# \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36\",\n# \"Accept-Language\": \"zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7\",\n# \"Connection\": \"keep-alive\",\n# \"Content-Length\": \"495\",\n# \"Content-Type\": \"application/json\",\n# \"Cookie\": \"CTBC_ctbcbank_dap=!/Ek7q9Bq+97IUF9b2FCKhpyPkKrRw+WKiNysKyyh9KQIIidQzzdNUE79LTwi1H1eb9V6PyT7AQu97Q==; pzqXflQLz5wTSg5443S=PWZicSlpBae6xVzN30P9tR9Rf07A_jz0ZyuCYAwGHCtJgEpR4fPpmpmhgs_udtbq; BIGipServer~EBMW~POOL_EBMW_WEB=!4tEBp+EzpIXGd7db2FCKhpyPkKrRw0emjjaKd/vntI5G0+gr3z/mRSqIlx37vFvksgPEu6YIhh9v/w==; TS01a2d410=01c2a9c2b982fd4bff8a68c708615a34323fd4df21b7a813340b659a3065265e03c45c29c5ac7fda2708e99f687fb89028e17a46bc; ngsessionid=1586583582641; v1st=E6F52D56D95FF2C2; BIGipServer~EBMW~POOL_EBMW_TAG=!ZJJMQAxsfMnor+hb2FCKhpyPkKrRw6ReBuwhmrqmHQdI/tmRW700k4+/4dCFk5LHpuzCnjwMdNW0AQ==; TS0152eaef=01c2a9c2b982fd4bff8a68c708615a34323fd4df21b7a813340b659a3065265e03c45c29c5ac7fda2708e99f687fb89028e17a46bc; s_fid=74BA97DEFE52C0AA-20D658B380139084; s_cc=true; JSESSIONID=0000pVGyIuMGsbJ5c1KX0vq2F8d:-1; EBMWSID=FBdnwDClYYTT5zhRWusNMHwhdYHaqT4HK1J7XBU92xmynLt47OCE!-864382513; BIGipServer~EBMW~POOL_EBMWIB_AP=!aTFrjez9QJJE5rNb2FCKhpyPkKrRw2IfAnvwXVUHoXir90Wym87LasTUWDZsqoMkCi9pv7PeDnwrnLo=; _ga=GA1.2.478375549.1586583592; _gid=GA1.2.527796260.1586583592; _gat_UA-136408356-1=1; _gat_UA-136403503-1=1; _gat_UA-148818971-1=1; _gat_UA-135916138-1=1; _gat_UA-135916138-2=1; pzqXflQLz5wTSg5443T=1NuKrkgrrMeBpUE2EOo7e84FAr_28dQ41LDN2hbM2Nc.JHimgbO1IH0uNWq9Agg0slWStN8p6HRMCNTWAxU6FlRsdNuYTPYWmZxZ0xnJcCvTjHxnddFbZrGlC8SeRK8DwgSGLNJbWCuexmSM6lY.ouh1yJnYMyyb3L5HMMk31ptj9jdgJ1PsPDbiJU5NuphOkWDBR8ppFNHUiBTRPOggF03.QVUE91i2e0BIULQxz0vhxJd_JAscu3QUoa8Rfmh2_FbcNX.GdI1m34wKzHEKsQjIRTRQkexVrw9EseyCvhJW6CA; BIGipServer~RTDS~POOL_RTDS_9080=!dyvkNbgNymWm0tWnphmIOaVF7qu+1OLDrGM2G0HEL8jogNWdapthYTzcWDOCOibvz7CjeqIHZvbdxA==\",\n# \"Host\": \"www.ctbcbank.com\",\n# \"Origin\": \"https://www.ctbcbank.com\",\n# \"Referer\": \"https://www.ctbcbank.com/twrbo/zh_tw/index/h_locate_index/h_locate_inquiry.html\",\n# \"Sec-Fetch-Dest\": \"empty\",\n# \"Sec-Fetch-Mode\": \"cors\",\n# \"Sec-Fetch-Site\": \"same-origin\",\n# \"x-auth-token\": \"2f6dafdc-154a-4108-b23a-9c3b6374514a\",\n# \"X-Channel-Id\": \"EBMW_WEB_O\",\n# \"X-Requested-With\": \"MFPAsync\"\n# },#params放的是網址上面問號以後的參數,美個參數用&分隔\n# params = {\n# \"IIhfvu\":\"1qxEbjcIbnoSC5n4BtenF6rl6Lq4yKb2TrZ6cPQmcqgTjb0MIHRWgbizi2__6pcj0zCBzq9HAbOmpqH16GzNezOJ3zskQVFNRx0Zq3wro3gsMl9mIpBJsuzhb0EKr_rKM.8CEVOvy3E1IYViF8Aix5RzouYHAkKTI6HPqDp60U5aBxiNWN8wMS8.tdyiG4sbSvC7YFh5cTwGQXGXRW7n_XXaIdCwGA7HQgK0qRkk3uJJItmC1H5h_smEpGlKF9JJuNjAZg7xddyzQOqhbTV4W0bs92Pu6mDtIIxjrzt7STbimpxc6v2cDW3dzWQaeYobYsp5w1FwJYw5Jwx.DJX2brCfmnH_J3w6bCzWqKExzfeNj9V5NSVxoUUAk57cSEbwTPCFSLEHib5u_GzfFFSWNng7nbwko27JGSiW\"\n# },#從chrome裡面的Response headers找\n# data = {\n# \"deviceIxd\": \"none\",\n# \"trackingIxd\": \"d6b0f2c2-4afc-4d1a-8d18-b172728637bf\",\n# \"txnIxd\": \"76f6126a-76c4-4a1a-9f74-ed0128fe95bd\",\n# \"model\": \"chrome\",\n# \"platform\": \"windows7\",\n# \"ersion\": \"6.1\",\n# \"runtime\": \"chrome\",\n# \"runtimeVer\": 80,\n# \"network\": \"unknown\",\n# \"appVer\": \"5.01.18\",\n# \"clientNo\": \"1586583845836\",\n# \"clientTime\": 1586583850081,\n# \"token\": \"7a85de18-bae5-4bf5-bc60-03eb8eb16d6b\",\n# \"locale\": \"zh_TW\",\n# \"fromSys\": \"1\",\n# \"seed\": \"20200411133940891000389520621406\",\n# \"deviceToken\": \"none\",\n# \"resource\": \"/twrbo-general/qu002/010\",\n# \"rqData\": {}\n# }\n# )\n# print(u8.status_code) #顯示 400 表示伺服器不理會我的問題\n\n# #華南銀行\n# import requests, codecs\n#\n# u9 = requests.get(\n# \"https://www.hncb.com.tw/hncb/XML/Taiwan.xml\",\n# headers = {\n# \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36}\",\n# \"Sec-Fetch-Dest\": \"empty\",\n# \"Referer\": \"https://www.hncb.com.tw/wps/portal/HNCB/branches\"}\n#\n# )\n# print(u9.status_code) #確定可以存取\n# with codecs.open(\"2020.04.11.xml\", \"w\", \"UTF-8\") as f:\n# f.write(u9.text)\n\n# #beautifulsoup函式解析讀取出來是網頁資訊\n# from bs4 import BeautifulSoup\n# import requests, codecs\n# u10 = requests.get(\n# \"https://www.taiwan.net.tw/m1.aspx\",\n# headers = {\n# \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36\",\n# \"Accept-Language\": \"zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7\",\n# \"Cookie\": \"ASP.NET_SessionId=pxqsxdgpvft3zelzrdxxxcsa; TwSessID=; _ga=GA1.3.1470569981.1586590876; _gid=GA1.3.1340334767.1586590876; _gat_gtag_UA_5278761_9=1\",\n# },\n# params= {\n# \"sNo\":\"0001001\"\n# })\n# b1 = BeautifulSoup(u10.text, \"html.parser\")\n# # a1 = b1.find_all(\"a\", {\"class\":\"columnBlock-title\"}) #抓標題\n# # for i in a1:\n# # print(i.text)\n# # a2 = b1.find_all(\"span\", {\"class\":\"date\"})\n# # for i in a2:\n# # print(i.text)\n#\n# #把每一頁都存檔\n# fn =1\n# a3 = b1.find_all(\"div\", {\"class\":\"columnBlock-info\"})\n# for i in a3:\n# title = i.find(\"a\", {\"class\":\"columnBlock-title\"})\n# date = i.find(\"span\", {\"class\":\"date\"})\n# if title.attrs[\"href\"].find(\"m1.aspx\") != -1:\n# r2 = requests.get(\n# \"https://www.taiwan.net.tw/\" + title.attrs[\"href\"],\n# headers={\n# \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36\",\n# \"Accept-Language\": \"zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7\",\n# \"Cookie\": \"ASP.NET_SessionId=pxqsxdgpvft3zelzrdxxxcsa; TwSessID=; _ga=GA1.3.1470569981.1586590876; _gid=GA1.3.1340334767.1586590876\",\n# }\n# )\n# b2 = BeautifulSoup(r2.text, \"html.parser\")\n# with codecs.open(\"html/\"+ str(f(n))+ \".txt\", \"w\", \"UTF-8\") as f:\n# f.write(title.text +\"\\r\\n\")\n# f.write(date.text + \"\\r\\n\\r\\n\")\n# f.write(b2.find(\"div\", {\"class\":\"content\"}).find(\"p\").text)\n# fn += 1\n\n#104網頁查詢\nimport requests, prettytable\nfrom bs4 import BeautifulSoup\nkeyword = input(\"請輸入要搜尋的關鍵字:\")\nt = prettytable.PrettyTable([\"公司名稱\", \"職缺名稱\"], encoding=\"UTF-8\")\nfor page in range(1, 3, 1):\n u11 = requests.get(\n \"https://www.104.com.tw/jobs/search/\",\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36\",\n \"Accept-Language\": \"zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7\"\n },\n params= {\n \"keyword\":keyword,\n \"order\":1,\n \"jobsource\":\"2018indexpoc\",\n \"ro\": 0,\n \"asc\":0,\n \"page\":page,\n \"mode\":\"s\"\n })\n b1 = BeautifulSoup(u11.text, \"html.parser\")\n a1 = b1.find_all(\"article\", {\"class\":\"job-list-item\"})\n for i in a1:\n t.add_row([i.attrs[\"data-cust-name\"], i.attrs[\"data-job-name\"]])\nprint(t)",
"import requests, prettytable\nfrom bs4 import BeautifulSoup\nkeyword = input('請輸入要搜尋的關鍵字:')\nt = prettytable.PrettyTable(['公司名稱', '職缺名稱'], encoding='UTF-8')\nfor page in range(1, 3, 1):\n u11 = requests.get('https://www.104.com.tw/jobs/search/', headers={\n 'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36'\n , 'Accept-Language': 'zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7'}, params\n ={'keyword': keyword, 'order': 1, 'jobsource': '2018indexpoc', 'ro':\n 0, 'asc': 0, 'page': page, 'mode': 's'})\n b1 = BeautifulSoup(u11.text, 'html.parser')\n a1 = b1.find_all('article', {'class': 'job-list-item'})\n for i in a1:\n t.add_row([i.attrs['data-cust-name'], i.attrs['data-job-name']])\nprint(t)\n",
"<import token>\nkeyword = input('請輸入要搜尋的關鍵字:')\nt = prettytable.PrettyTable(['公司名稱', '職缺名稱'], encoding='UTF-8')\nfor page in range(1, 3, 1):\n u11 = requests.get('https://www.104.com.tw/jobs/search/', headers={\n 'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36'\n , 'Accept-Language': 'zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7'}, params\n ={'keyword': keyword, 'order': 1, 'jobsource': '2018indexpoc', 'ro':\n 0, 'asc': 0, 'page': page, 'mode': 's'})\n b1 = BeautifulSoup(u11.text, 'html.parser')\n a1 = b1.find_all('article', {'class': 'job-list-item'})\n for i in a1:\n t.add_row([i.attrs['data-cust-name'], i.attrs['data-job-name']])\nprint(t)\n",
"<import token>\n<assignment token>\nfor page in range(1, 3, 1):\n u11 = requests.get('https://www.104.com.tw/jobs/search/', headers={\n 'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36'\n , 'Accept-Language': 'zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7'}, params\n ={'keyword': keyword, 'order': 1, 'jobsource': '2018indexpoc', 'ro':\n 0, 'asc': 0, 'page': page, 'mode': 's'})\n b1 = BeautifulSoup(u11.text, 'html.parser')\n a1 = b1.find_all('article', {'class': 'job-list-item'})\n for i in a1:\n t.add_row([i.attrs['data-cust-name'], i.attrs['data-job-name']])\nprint(t)\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.