code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
from flask import *
app = Flask(__name__)
from app import views
from app import admin_views
from app import usr_reg
from app import cookie
from app import db_connect
|
normal
|
{
"blob_id": "e736991f364ba9ff709348e4b1f612b1e9673281",
"index": 252,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp = Flask(__name__)\n<mask token>\n",
"step-3": "from flask import *\napp = Flask(__name__)\nfrom app import views\nfrom app import admin_views\nfrom app import usr_reg\nfrom app import cookie\nfrom app import db_connect\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import re
with open('input.txt') as f:
input_file = f.readlines()
input_file = [x.strip() for x in input_file]
def check_passport(text):
arr = text.split()
dct = {}
for elem in arr:
key = elem.split(":")[0]
val = elem.split(":")[1]
dct[key] = val
try:
if len(dct['byr']) != 4 or int(dct['byr']) < 1920 or int(dct['byr']) > 2002:
print("byr invalid")
return False
if len(dct['iyr']) !=4 or int(dct['iyr']) < 2010 or int(dct['iyr']) > 2030:
print("iyr invalid")
return False
if len(dct['eyr']) != 4 or int(dct['eyr']) < 2020 or int(dct['eyr']) > 2030:
print("eyr invalid")
return False
if dct['hgt'][-2:] == 'in':
if int(dct['hgt'][:-2]) < 59 or int(dct['hgt'][:-2]) > 76:
print("hgt invalid")
return False
elif dct['hgt'][-2:] == 'cm':
if int(dct['hgt'][:-2]) < 150 or int(dct['hgt'][:-2]) > 193:
print("hgt invalid")
return False
else:
print("hgt invalid")
return False
if dct['hcl'][0] != "#" or not re.compile("[0-9a-f]{6}").fullmatch(dct['hcl'][1:]):
print("hcl invalid")
return False
ecl_options = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']
if not any(dct['ecl'] == option for option in ecl_options):
print("ecl invalid")
return False
if not re.compile("[0-9]{9}").fullmatch(dct['pid']):
print("pid invalid")
return False
return True
except KeyError as e:
print("Key error: " + str(e))
return False
# if ("byr:" in text and "iyr:" in text and "eyr:" in text and "hgt:" in text and "hcl:" in text and "ecl:" in text and "pid:" in text):
# return True
# else:
# return False
grouped_input = []
curr = ""
for i in input_file:
if i != "":
curr += " " + i
else:
grouped_input.append(curr[1:])
curr = ""
count = 0
for i in range(0, len(grouped_input)):
print(str(check_passport(grouped_input[i])) + " " + grouped_input[i])
if check_passport(grouped_input[i]):
count += 1
print(count)
|
normal
|
{
"blob_id": "166329c967e83806e3482179a56ac7e5541d5010",
"index": 1589,
"step-1": "<mask token>\n\n\ndef check_passport(text):\n arr = text.split()\n dct = {}\n for elem in arr:\n key = elem.split(':')[0]\n val = elem.split(':')[1]\n dct[key] = val\n try:\n if len(dct['byr']) != 4 or int(dct['byr']) < 1920 or int(dct['byr']\n ) > 2002:\n print('byr invalid')\n return False\n if len(dct['iyr']) != 4 or int(dct['iyr']) < 2010 or int(dct['iyr']\n ) > 2030:\n print('iyr invalid')\n return False\n if len(dct['eyr']) != 4 or int(dct['eyr']) < 2020 or int(dct['eyr']\n ) > 2030:\n print('eyr invalid')\n return False\n if dct['hgt'][-2:] == 'in':\n if int(dct['hgt'][:-2]) < 59 or int(dct['hgt'][:-2]) > 76:\n print('hgt invalid')\n return False\n elif dct['hgt'][-2:] == 'cm':\n if int(dct['hgt'][:-2]) < 150 or int(dct['hgt'][:-2]) > 193:\n print('hgt invalid')\n return False\n else:\n print('hgt invalid')\n return False\n if dct['hcl'][0] != '#' or not re.compile('[0-9a-f]{6}').fullmatch(dct\n ['hcl'][1:]):\n print('hcl invalid')\n return False\n ecl_options = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n if not any(dct['ecl'] == option for option in ecl_options):\n print('ecl invalid')\n return False\n if not re.compile('[0-9]{9}').fullmatch(dct['pid']):\n print('pid invalid')\n return False\n return True\n except KeyError as e:\n print('Key error: ' + str(e))\n return False\n\n\n<mask token>\n",
"step-2": "<mask token>\nwith open('input.txt') as f:\n input_file = f.readlines()\n<mask token>\n\n\ndef check_passport(text):\n arr = text.split()\n dct = {}\n for elem in arr:\n key = elem.split(':')[0]\n val = elem.split(':')[1]\n dct[key] = val\n try:\n if len(dct['byr']) != 4 or int(dct['byr']) < 1920 or int(dct['byr']\n ) > 2002:\n print('byr invalid')\n return False\n if len(dct['iyr']) != 4 or int(dct['iyr']) < 2010 or int(dct['iyr']\n ) > 2030:\n print('iyr invalid')\n return False\n if len(dct['eyr']) != 4 or int(dct['eyr']) < 2020 or int(dct['eyr']\n ) > 2030:\n print('eyr invalid')\n return False\n if dct['hgt'][-2:] == 'in':\n if int(dct['hgt'][:-2]) < 59 or int(dct['hgt'][:-2]) > 76:\n print('hgt invalid')\n return False\n elif dct['hgt'][-2:] == 'cm':\n if int(dct['hgt'][:-2]) < 150 or int(dct['hgt'][:-2]) > 193:\n print('hgt invalid')\n return False\n else:\n print('hgt invalid')\n return False\n if dct['hcl'][0] != '#' or not re.compile('[0-9a-f]{6}').fullmatch(dct\n ['hcl'][1:]):\n print('hcl invalid')\n return False\n ecl_options = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n if not any(dct['ecl'] == option for option in ecl_options):\n print('ecl invalid')\n return False\n if not re.compile('[0-9]{9}').fullmatch(dct['pid']):\n print('pid invalid')\n return False\n return True\n except KeyError as e:\n print('Key error: ' + str(e))\n return False\n\n\n<mask token>\nfor i in input_file:\n if i != '':\n curr += ' ' + i\n else:\n grouped_input.append(curr[1:])\n curr = ''\n<mask token>\nfor i in range(0, len(grouped_input)):\n print(str(check_passport(grouped_input[i])) + ' ' + grouped_input[i])\n if check_passport(grouped_input[i]):\n count += 1\nprint(count)\n",
"step-3": "<mask token>\nwith open('input.txt') as f:\n input_file = f.readlines()\ninput_file = [x.strip() for x in input_file]\n\n\ndef check_passport(text):\n arr = text.split()\n dct = {}\n for elem in arr:\n key = elem.split(':')[0]\n val = elem.split(':')[1]\n dct[key] = val\n try:\n if len(dct['byr']) != 4 or int(dct['byr']) < 1920 or int(dct['byr']\n ) > 2002:\n print('byr invalid')\n return False\n if len(dct['iyr']) != 4 or int(dct['iyr']) < 2010 or int(dct['iyr']\n ) > 2030:\n print('iyr invalid')\n return False\n if len(dct['eyr']) != 4 or int(dct['eyr']) < 2020 or int(dct['eyr']\n ) > 2030:\n print('eyr invalid')\n return False\n if dct['hgt'][-2:] == 'in':\n if int(dct['hgt'][:-2]) < 59 or int(dct['hgt'][:-2]) > 76:\n print('hgt invalid')\n return False\n elif dct['hgt'][-2:] == 'cm':\n if int(dct['hgt'][:-2]) < 150 or int(dct['hgt'][:-2]) > 193:\n print('hgt invalid')\n return False\n else:\n print('hgt invalid')\n return False\n if dct['hcl'][0] != '#' or not re.compile('[0-9a-f]{6}').fullmatch(dct\n ['hcl'][1:]):\n print('hcl invalid')\n return False\n ecl_options = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n if not any(dct['ecl'] == option for option in ecl_options):\n print('ecl invalid')\n return False\n if not re.compile('[0-9]{9}').fullmatch(dct['pid']):\n print('pid invalid')\n return False\n return True\n except KeyError as e:\n print('Key error: ' + str(e))\n return False\n\n\ngrouped_input = []\ncurr = ''\nfor i in input_file:\n if i != '':\n curr += ' ' + i\n else:\n grouped_input.append(curr[1:])\n curr = ''\ncount = 0\nfor i in range(0, len(grouped_input)):\n print(str(check_passport(grouped_input[i])) + ' ' + grouped_input[i])\n if check_passport(grouped_input[i]):\n count += 1\nprint(count)\n",
"step-4": "import re\nwith open('input.txt') as f:\n input_file = f.readlines()\ninput_file = [x.strip() for x in input_file]\n\n\ndef check_passport(text):\n arr = text.split()\n dct = {}\n for elem in arr:\n key = elem.split(':')[0]\n val = elem.split(':')[1]\n dct[key] = val\n try:\n if len(dct['byr']) != 4 or int(dct['byr']) < 1920 or int(dct['byr']\n ) > 2002:\n print('byr invalid')\n return False\n if len(dct['iyr']) != 4 or int(dct['iyr']) < 2010 or int(dct['iyr']\n ) > 2030:\n print('iyr invalid')\n return False\n if len(dct['eyr']) != 4 or int(dct['eyr']) < 2020 or int(dct['eyr']\n ) > 2030:\n print('eyr invalid')\n return False\n if dct['hgt'][-2:] == 'in':\n if int(dct['hgt'][:-2]) < 59 or int(dct['hgt'][:-2]) > 76:\n print('hgt invalid')\n return False\n elif dct['hgt'][-2:] == 'cm':\n if int(dct['hgt'][:-2]) < 150 or int(dct['hgt'][:-2]) > 193:\n print('hgt invalid')\n return False\n else:\n print('hgt invalid')\n return False\n if dct['hcl'][0] != '#' or not re.compile('[0-9a-f]{6}').fullmatch(dct\n ['hcl'][1:]):\n print('hcl invalid')\n return False\n ecl_options = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n if not any(dct['ecl'] == option for option in ecl_options):\n print('ecl invalid')\n return False\n if not re.compile('[0-9]{9}').fullmatch(dct['pid']):\n print('pid invalid')\n return False\n return True\n except KeyError as e:\n print('Key error: ' + str(e))\n return False\n\n\ngrouped_input = []\ncurr = ''\nfor i in input_file:\n if i != '':\n curr += ' ' + i\n else:\n grouped_input.append(curr[1:])\n curr = ''\ncount = 0\nfor i in range(0, len(grouped_input)):\n print(str(check_passport(grouped_input[i])) + ' ' + grouped_input[i])\n if check_passport(grouped_input[i]):\n count += 1\nprint(count)\n",
"step-5": "import re\n\nwith open('input.txt') as f:\n input_file = f.readlines()\ninput_file = [x.strip() for x in input_file]\n\ndef check_passport(text):\n arr = text.split()\n dct = {}\n for elem in arr:\n key = elem.split(\":\")[0]\n val = elem.split(\":\")[1]\n dct[key] = val\n \n try:\n if len(dct['byr']) != 4 or int(dct['byr']) < 1920 or int(dct['byr']) > 2002:\n print(\"byr invalid\")\n return False\n if len(dct['iyr']) !=4 or int(dct['iyr']) < 2010 or int(dct['iyr']) > 2030:\n print(\"iyr invalid\")\n return False\n if len(dct['eyr']) != 4 or int(dct['eyr']) < 2020 or int(dct['eyr']) > 2030:\n print(\"eyr invalid\")\n return False\n if dct['hgt'][-2:] == 'in':\n if int(dct['hgt'][:-2]) < 59 or int(dct['hgt'][:-2]) > 76:\n print(\"hgt invalid\")\n return False\n elif dct['hgt'][-2:] == 'cm':\n if int(dct['hgt'][:-2]) < 150 or int(dct['hgt'][:-2]) > 193:\n print(\"hgt invalid\")\n return False\n else:\n print(\"hgt invalid\")\n return False\n\n if dct['hcl'][0] != \"#\" or not re.compile(\"[0-9a-f]{6}\").fullmatch(dct['hcl'][1:]):\n print(\"hcl invalid\")\n return False\n\n ecl_options = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n if not any(dct['ecl'] == option for option in ecl_options):\n print(\"ecl invalid\")\n return False\n if not re.compile(\"[0-9]{9}\").fullmatch(dct['pid']):\n print(\"pid invalid\")\n return False\n \n return True\n\n except KeyError as e:\n print(\"Key error: \" + str(e))\n return False\n \n \n # if (\"byr:\" in text and \"iyr:\" in text and \"eyr:\" in text and \"hgt:\" in text and \"hcl:\" in text and \"ecl:\" in text and \"pid:\" in text):\n # return True\n # else:\n # return False\n\n\ngrouped_input = []\ncurr = \"\"\nfor i in input_file:\n if i != \"\":\n curr += \" \" + i\n else:\n grouped_input.append(curr[1:])\n curr = \"\"\n\ncount = 0\nfor i in range(0, len(grouped_input)):\n print(str(check_passport(grouped_input[i])) + \" \" + grouped_input[i])\n if check_passport(grouped_input[i]):\n count += 1\n\nprint(count)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from twisted.internet import reactor
from scrapy.crawler import Crawler
from scrapy.settings import CrawlerSettings
from scrapy import log, signals
from spiders.songspk_spider import SongsPKSpider
from scrapy.xlib.pydispatch import dispatcher
def stop_reactor():
reactor.stop()
dispatcher.connect(stop_reactor, signal=signals.spider_closed)
spider = SongsPKSpider(domain='aqaq.com')
crawler = Crawler(CrawlerSettings())
crawler.configure()
crawler.crawl(spider)
crawler.start()
log.start(loglevel=log.DEBUG)
log.msg("------------>Running reactor")
result = reactor.run()
print result
log.msg("------------>Running stoped")
|
normal
|
{
"blob_id": "0d14534b210b13ede4a687e418d05d756d221950",
"index": 3297,
"step-1": "from twisted.internet import reactor\nfrom scrapy.crawler import Crawler\nfrom scrapy.settings import CrawlerSettings\nfrom scrapy import log, signals\nfrom spiders.songspk_spider import SongsPKSpider\nfrom scrapy.xlib.pydispatch import dispatcher\n\ndef stop_reactor():\n reactor.stop()\n\ndispatcher.connect(stop_reactor, signal=signals.spider_closed)\n\nspider = SongsPKSpider(domain='aqaq.com')\ncrawler = Crawler(CrawlerSettings())\ncrawler.configure()\ncrawler.crawl(spider)\ncrawler.start()\nlog.start(loglevel=log.DEBUG)\nlog.msg(\"------------>Running reactor\")\nresult = reactor.run()\nprint result\nlog.msg(\"------------>Running stoped\")\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import json
import pickle
import zlib
from diskcollections.interfaces import IHandler
class PickleHandler(IHandler):
dumps = staticmethod(pickle.dumps)
loads = staticmethod(pickle.loads)
class PickleZLibHandler(IHandler):
@staticmethod
def dumps(obj, protocol=pickle.HIGHEST_PROTOCOL, level=zlib.
Z_DEFAULT_COMPRESSION):
pickled = pickle.dumps(obj, protocol=protocol)
compressed = zlib.compress(pickled, level)
return compressed
@staticmethod
def loads(compressed):
pickled = zlib.decompress(compressed)
obj = pickle.loads(pickled)
return obj
class JsonHandler(IHandler):
dumps = staticmethod(json.dumps)
loads = staticmethod(json.loads)
class JsonZLibHandler(IHandler):
@staticmethod
def dumps(obj, level=zlib.Z_DEFAULT_COMPRESSION):
jsoned = json.dumps(obj).encode()
compressed = zlib.compress(jsoned, level)
return compressed
@staticmethod
def loads(compressed):
jsoned = zlib.decompress(compressed).decode()
obj = json.loads(jsoned)
return obj
|
normal
|
{
"blob_id": "60202758a0a42fc26dc1bca9f134a70f28967093",
"index": 2728,
"step-1": "<mask token>\n\n\nclass PickleZLibHandler(IHandler):\n\n @staticmethod\n def dumps(obj, protocol=pickle.HIGHEST_PROTOCOL, level=zlib.\n Z_DEFAULT_COMPRESSION):\n pickled = pickle.dumps(obj, protocol=protocol)\n compressed = zlib.compress(pickled, level)\n return compressed\n\n @staticmethod\n def loads(compressed):\n pickled = zlib.decompress(compressed)\n obj = pickle.loads(pickled)\n return obj\n\n\nclass JsonHandler(IHandler):\n dumps = staticmethod(json.dumps)\n loads = staticmethod(json.loads)\n\n\nclass JsonZLibHandler(IHandler):\n\n @staticmethod\n def dumps(obj, level=zlib.Z_DEFAULT_COMPRESSION):\n jsoned = json.dumps(obj).encode()\n compressed = zlib.compress(jsoned, level)\n return compressed\n\n @staticmethod\n def loads(compressed):\n jsoned = zlib.decompress(compressed).decode()\n obj = json.loads(jsoned)\n return obj\n",
"step-2": "<mask token>\n\n\nclass PickleHandler(IHandler):\n <mask token>\n <mask token>\n\n\nclass PickleZLibHandler(IHandler):\n\n @staticmethod\n def dumps(obj, protocol=pickle.HIGHEST_PROTOCOL, level=zlib.\n Z_DEFAULT_COMPRESSION):\n pickled = pickle.dumps(obj, protocol=protocol)\n compressed = zlib.compress(pickled, level)\n return compressed\n\n @staticmethod\n def loads(compressed):\n pickled = zlib.decompress(compressed)\n obj = pickle.loads(pickled)\n return obj\n\n\nclass JsonHandler(IHandler):\n dumps = staticmethod(json.dumps)\n loads = staticmethod(json.loads)\n\n\nclass JsonZLibHandler(IHandler):\n\n @staticmethod\n def dumps(obj, level=zlib.Z_DEFAULT_COMPRESSION):\n jsoned = json.dumps(obj).encode()\n compressed = zlib.compress(jsoned, level)\n return compressed\n\n @staticmethod\n def loads(compressed):\n jsoned = zlib.decompress(compressed).decode()\n obj = json.loads(jsoned)\n return obj\n",
"step-3": "<mask token>\n\n\nclass PickleHandler(IHandler):\n dumps = staticmethod(pickle.dumps)\n loads = staticmethod(pickle.loads)\n\n\nclass PickleZLibHandler(IHandler):\n\n @staticmethod\n def dumps(obj, protocol=pickle.HIGHEST_PROTOCOL, level=zlib.\n Z_DEFAULT_COMPRESSION):\n pickled = pickle.dumps(obj, protocol=protocol)\n compressed = zlib.compress(pickled, level)\n return compressed\n\n @staticmethod\n def loads(compressed):\n pickled = zlib.decompress(compressed)\n obj = pickle.loads(pickled)\n return obj\n\n\nclass JsonHandler(IHandler):\n dumps = staticmethod(json.dumps)\n loads = staticmethod(json.loads)\n\n\nclass JsonZLibHandler(IHandler):\n\n @staticmethod\n def dumps(obj, level=zlib.Z_DEFAULT_COMPRESSION):\n jsoned = json.dumps(obj).encode()\n compressed = zlib.compress(jsoned, level)\n return compressed\n\n @staticmethod\n def loads(compressed):\n jsoned = zlib.decompress(compressed).decode()\n obj = json.loads(jsoned)\n return obj\n",
"step-4": "import json\nimport pickle\nimport zlib\nfrom diskcollections.interfaces import IHandler\n\n\nclass PickleHandler(IHandler):\n dumps = staticmethod(pickle.dumps)\n loads = staticmethod(pickle.loads)\n\n\nclass PickleZLibHandler(IHandler):\n\n @staticmethod\n def dumps(obj, protocol=pickle.HIGHEST_PROTOCOL, level=zlib.\n Z_DEFAULT_COMPRESSION):\n pickled = pickle.dumps(obj, protocol=protocol)\n compressed = zlib.compress(pickled, level)\n return compressed\n\n @staticmethod\n def loads(compressed):\n pickled = zlib.decompress(compressed)\n obj = pickle.loads(pickled)\n return obj\n\n\nclass JsonHandler(IHandler):\n dumps = staticmethod(json.dumps)\n loads = staticmethod(json.loads)\n\n\nclass JsonZLibHandler(IHandler):\n\n @staticmethod\n def dumps(obj, level=zlib.Z_DEFAULT_COMPRESSION):\n jsoned = json.dumps(obj).encode()\n compressed = zlib.compress(jsoned, level)\n return compressed\n\n @staticmethod\n def loads(compressed):\n jsoned = zlib.decompress(compressed).decode()\n obj = json.loads(jsoned)\n return obj\n",
"step-5": null,
"step-ids": [
8,
9,
10,
11
]
}
|
[
8,
9,
10,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@game_blueprint.route('/<string:game_id>')
@requires_login
def game_index(game_id):
return render_template('game/game.html')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
game_blueprint = Blueprint('game', __name__)
@game_blueprint.route('/<string:game_id>')
@requires_login
def game_index(game_id):
return render_template('game/game.html')
<|reserved_special_token_1|>
from flask import Blueprint, render_template, request, session, url_for, redirect
from flask_socketio import join_room, leave_room, send, emit
from models.game.game import Game
from models.games.games import Games
from decorators.req_login import requires_login
game_blueprint = Blueprint('game', __name__)
@game_blueprint.route('/<string:game_id>')
@requires_login
def game_index(game_id):
return render_template('game/game.html')
|
flexible
|
{
"blob_id": "1ccb23435d8501ed82debf91bd6bf856830d01cb",
"index": 6063,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@game_blueprint.route('/<string:game_id>')\n@requires_login\ndef game_index(game_id):\n return render_template('game/game.html')\n",
"step-3": "<mask token>\ngame_blueprint = Blueprint('game', __name__)\n\n\n@game_blueprint.route('/<string:game_id>')\n@requires_login\ndef game_index(game_id):\n return render_template('game/game.html')\n",
"step-4": "from flask import Blueprint, render_template, request, session, url_for, redirect\nfrom flask_socketio import join_room, leave_room, send, emit\nfrom models.game.game import Game\nfrom models.games.games import Games\nfrom decorators.req_login import requires_login\ngame_blueprint = Blueprint('game', __name__)\n\n\n@game_blueprint.route('/<string:game_id>')\n@requires_login\ndef game_index(game_id):\n return render_template('game/game.html')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
r = requests.get(url)
r.raise_for_status()
print(r.encoding)
r.encoding = r.apparent_encoding
print(r.text[:1000])
print(r.apparent_encoding)
except:
print('error')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
url = 'https://item.jd.com/100008348550.html'
try:
r = requests.get(url)
r.raise_for_status()
print(r.encoding)
r.encoding = r.apparent_encoding
print(r.text[:1000])
print(r.apparent_encoding)
except:
print('error')
<|reserved_special_token_1|>
import requests
url = 'https://item.jd.com/100008348550.html'
try:
r = requests.get(url)
r.raise_for_status()
print(r.encoding)
r.encoding = r.apparent_encoding
print(r.text[:1000])
print(r.apparent_encoding)
except:
print('error')
|
flexible
|
{
"blob_id": "0271c45a21047b948946dd76f147692bb16b8bcf",
"index": 5378,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n r = requests.get(url)\n r.raise_for_status()\n print(r.encoding)\n r.encoding = r.apparent_encoding\n print(r.text[:1000])\n print(r.apparent_encoding)\nexcept:\n print('error')\n",
"step-3": "<mask token>\nurl = 'https://item.jd.com/100008348550.html'\ntry:\n r = requests.get(url)\n r.raise_for_status()\n print(r.encoding)\n r.encoding = r.apparent_encoding\n print(r.text[:1000])\n print(r.apparent_encoding)\nexcept:\n print('error')\n",
"step-4": "import requests\nurl = 'https://item.jd.com/100008348550.html'\ntry:\n r = requests.get(url)\n r.raise_for_status()\n print(r.encoding)\n r.encoding = r.apparent_encoding\n print(r.text[:1000])\n print(r.apparent_encoding)\nexcept:\n print('error')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class URL(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class URL(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return self.label
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class URL(models.Model):
label = models.CharField(null=True, blank=True, max_length=30)
address = models.URLField()
slug = models.SlugField(unique=True, max_length=8)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.label
<|reserved_special_token_1|>
from django.db import models
from django.utils import timezone
class URL(models.Model):
label = models.CharField(null=True, blank=True, max_length=30)
address = models.URLField()
slug = models.SlugField(unique=True, max_length=8)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.label
<|reserved_special_token_1|>
from django.db import models
from django.utils import timezone
# Create your models here.
class URL(models.Model):
label = models.CharField(null=True, blank=True, max_length=30)
address = models.URLField()
slug = models.SlugField(unique=True, max_length=8)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.label
|
flexible
|
{
"blob_id": "2dcb02ea2f36dd31eda13c1d666201f861c117e7",
"index": 4027,
"step-1": "<mask token>\n\n\nclass URL(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass URL(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.label\n",
"step-3": "<mask token>\n\n\nclass URL(models.Model):\n label = models.CharField(null=True, blank=True, max_length=30)\n address = models.URLField()\n slug = models.SlugField(unique=True, max_length=8)\n created = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.label\n",
"step-4": "from django.db import models\nfrom django.utils import timezone\n\n\nclass URL(models.Model):\n label = models.CharField(null=True, blank=True, max_length=30)\n address = models.URLField()\n slug = models.SlugField(unique=True, max_length=8)\n created = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.label\n",
"step-5": "from django.db import models\nfrom django.utils import timezone\n\n# Create your models here.\n\nclass URL(models.Model):\n label = models.CharField(null=True, blank=True, max_length=30)\n address = models.URLField()\n slug = models.SlugField(unique=True, max_length=8)\n created = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.label",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def work1(num):
global g_num
for i in range(num):
g_num += 1
print('__in work1: g_num is {}'.format(g_num))
def work2(num):
global g_num
for i in range(num):
g_num += 1
print('__in work2: g_num is {}'.format(g_num))
def main():
print('__线程创建之前g_num is {}'.format(g_num))
num = 10000000000
t1 = threading.Thread(target=work1, args=(num,))
t1.start()
t2 = threading.Thread(target=work2, args=(num,))
t2.start()
while len(threading.enumerate()) != 1:
time.sleep(1)
print('2个线程对同一个全局变量操作之后的最终结果是:{}'.format(g_num))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def work1(num):
global g_num
for i in range(num):
g_num += 1
print('__in work1: g_num is {}'.format(g_num))
def work2(num):
global g_num
for i in range(num):
g_num += 1
print('__in work2: g_num is {}'.format(g_num))
def main():
print('__线程创建之前g_num is {}'.format(g_num))
num = 10000000000
t1 = threading.Thread(target=work1, args=(num,))
t1.start()
t2 = threading.Thread(target=work2, args=(num,))
t2.start()
while len(threading.enumerate()) != 1:
time.sleep(1)
print('2个线程对同一个全局变量操作之后的最终结果是:{}'.format(g_num))
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
g_num = 0
def work1(num):
global g_num
for i in range(num):
g_num += 1
print('__in work1: g_num is {}'.format(g_num))
def work2(num):
global g_num
for i in range(num):
g_num += 1
print('__in work2: g_num is {}'.format(g_num))
def main():
print('__线程创建之前g_num is {}'.format(g_num))
num = 10000000000
t1 = threading.Thread(target=work1, args=(num,))
t1.start()
t2 = threading.Thread(target=work2, args=(num,))
t2.start()
while len(threading.enumerate()) != 1:
time.sleep(1)
print('2个线程对同一个全局变量操作之后的最终结果是:{}'.format(g_num))
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import threading
import time
g_num = 0
def work1(num):
global g_num
for i in range(num):
g_num += 1
print('__in work1: g_num is {}'.format(g_num))
def work2(num):
global g_num
for i in range(num):
g_num += 1
print('__in work2: g_num is {}'.format(g_num))
def main():
print('__线程创建之前g_num is {}'.format(g_num))
num = 10000000000
t1 = threading.Thread(target=work1, args=(num,))
t1.start()
t2 = threading.Thread(target=work2, args=(num,))
t2.start()
while len(threading.enumerate()) != 1:
time.sleep(1)
print('2个线程对同一个全局变量操作之后的最终结果是:{}'.format(g_num))
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import threading
import time
g_num = 0
def work1(num):
global g_num
for i in range(num):
g_num += 1
print("__in work1: g_num is {}".format(g_num))
def work2(num):
global g_num
for i in range(num):
g_num += 1
print("__in work2: g_num is {}".format(g_num))
def main():
print("__线程创建之前g_num is {}".format(g_num))
# num = 100 or 10000000
num = 10000000000
t1 = threading.Thread(target=work1, args=(num,))
t1.start()
t2 = threading.Thread(target=work2, args=(num,))
t2.start()
while len(threading.enumerate()) != 1:
time.sleep(1)
print("2个线程对同一个全局变量操作之后的最终结果是:{}".format(g_num))
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "60079005c2091d2dc0b76fb71739671873f0e0f1",
"index": 9534,
"step-1": "<mask token>\n\n\ndef work1(num):\n global g_num\n for i in range(num):\n g_num += 1\n print('__in work1: g_num is {}'.format(g_num))\n\n\ndef work2(num):\n global g_num\n for i in range(num):\n g_num += 1\n print('__in work2: g_num is {}'.format(g_num))\n\n\ndef main():\n print('__线程创建之前g_num is {}'.format(g_num))\n num = 10000000000\n t1 = threading.Thread(target=work1, args=(num,))\n t1.start()\n t2 = threading.Thread(target=work2, args=(num,))\n t2.start()\n while len(threading.enumerate()) != 1:\n time.sleep(1)\n print('2个线程对同一个全局变量操作之后的最终结果是:{}'.format(g_num))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef work1(num):\n global g_num\n for i in range(num):\n g_num += 1\n print('__in work1: g_num is {}'.format(g_num))\n\n\ndef work2(num):\n global g_num\n for i in range(num):\n g_num += 1\n print('__in work2: g_num is {}'.format(g_num))\n\n\ndef main():\n print('__线程创建之前g_num is {}'.format(g_num))\n num = 10000000000\n t1 = threading.Thread(target=work1, args=(num,))\n t1.start()\n t2 = threading.Thread(target=work2, args=(num,))\n t2.start()\n while len(threading.enumerate()) != 1:\n time.sleep(1)\n print('2个线程对同一个全局变量操作之后的最终结果是:{}'.format(g_num))\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\ng_num = 0\n\n\ndef work1(num):\n global g_num\n for i in range(num):\n g_num += 1\n print('__in work1: g_num is {}'.format(g_num))\n\n\ndef work2(num):\n global g_num\n for i in range(num):\n g_num += 1\n print('__in work2: g_num is {}'.format(g_num))\n\n\ndef main():\n print('__线程创建之前g_num is {}'.format(g_num))\n num = 10000000000\n t1 = threading.Thread(target=work1, args=(num,))\n t1.start()\n t2 = threading.Thread(target=work2, args=(num,))\n t2.start()\n while len(threading.enumerate()) != 1:\n time.sleep(1)\n print('2个线程对同一个全局变量操作之后的最终结果是:{}'.format(g_num))\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import threading\nimport time\ng_num = 0\n\n\ndef work1(num):\n global g_num\n for i in range(num):\n g_num += 1\n print('__in work1: g_num is {}'.format(g_num))\n\n\ndef work2(num):\n global g_num\n for i in range(num):\n g_num += 1\n print('__in work2: g_num is {}'.format(g_num))\n\n\ndef main():\n print('__线程创建之前g_num is {}'.format(g_num))\n num = 10000000000\n t1 = threading.Thread(target=work1, args=(num,))\n t1.start()\n t2 = threading.Thread(target=work2, args=(num,))\n t2.start()\n while len(threading.enumerate()) != 1:\n time.sleep(1)\n print('2个线程对同一个全局变量操作之后的最终结果是:{}'.format(g_num))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import threading\nimport time\n\n\ng_num = 0\n\n\ndef work1(num):\n global g_num\n for i in range(num):\n g_num += 1\n print(\"__in work1: g_num is {}\".format(g_num))\n\n\ndef work2(num):\n global g_num\n for i in range(num):\n g_num += 1 \n print(\"__in work2: g_num is {}\".format(g_num))\n\n\ndef main():\n print(\"__线程创建之前g_num is {}\".format(g_num))\n # num = 100 or 10000000\n num = 10000000000\n t1 = threading.Thread(target=work1, args=(num,))\n t1.start()\n t2 = threading.Thread(target=work2, args=(num,))\n t2.start()\n\n while len(threading.enumerate()) != 1:\n time.sleep(1)\n \n print(\"2个线程对同一个全局变量操作之后的最终结果是:{}\".format(g_num))\n \n \nif __name__ == \"__main__\":\n main()\n \n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def repeatedNTimes(self, A):
freq = {}
for i in A:
if i in freq.keys():
freq[i] += 1
else:
freq[i] = 1
key = list(freq.keys())
val = list(freq.values())
m = max(val)
return key[val.index(m)]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def repeatedNTimes(self, A):
freq = {}
for i in A:
if i in freq.keys():
freq[i] += 1
else:
freq[i] = 1
key = list(freq.keys())
val = list(freq.values())
m = max(val)
return key[val.index(m)]
<|reserved_special_token_0|>
print(s.repeatedNTimes(l))
<|reserved_special_token_1|>
class Solution:
def repeatedNTimes(self, A):
freq = {}
for i in A:
if i in freq.keys():
freq[i] += 1
else:
freq[i] = 1
key = list(freq.keys())
val = list(freq.values())
m = max(val)
return key[val.index(m)]
s = Solution()
l = [2, 1, 2, 5, 3, 2]
k = [1, 1, 1, 2]
print(s.repeatedNTimes(l))
<|reserved_special_token_1|>
#n-repeated element
class Solution:
def repeatedNTimes(self, A):
freq = {}
for i in A:
if i in freq.keys():
freq[i] += 1
else:
freq[i] = 1
key = list(freq.keys())
val = list(freq.values())
m = max(val)
return key[val.index(m)]
s = Solution()
l = [2,1,2,5,3,2]
k = [1,1,1,2]
print(s.repeatedNTimes(l))
|
flexible
|
{
"blob_id": "d50618f7784e69b46cb665ec1a9c56f7a2867785",
"index": 5033,
"step-1": "class Solution:\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Solution:\n\n def repeatedNTimes(self, A):\n freq = {}\n for i in A:\n if i in freq.keys():\n freq[i] += 1\n else:\n freq[i] = 1\n key = list(freq.keys())\n val = list(freq.values())\n m = max(val)\n return key[val.index(m)]\n\n\n<mask token>\n",
"step-3": "class Solution:\n\n def repeatedNTimes(self, A):\n freq = {}\n for i in A:\n if i in freq.keys():\n freq[i] += 1\n else:\n freq[i] = 1\n key = list(freq.keys())\n val = list(freq.values())\n m = max(val)\n return key[val.index(m)]\n\n\n<mask token>\nprint(s.repeatedNTimes(l))\n",
"step-4": "class Solution:\n\n def repeatedNTimes(self, A):\n freq = {}\n for i in A:\n if i in freq.keys():\n freq[i] += 1\n else:\n freq[i] = 1\n key = list(freq.keys())\n val = list(freq.values())\n m = max(val)\n return key[val.index(m)]\n\n\ns = Solution()\nl = [2, 1, 2, 5, 3, 2]\nk = [1, 1, 1, 2]\nprint(s.repeatedNTimes(l))\n",
"step-5": "#n-repeated element\nclass Solution:\n def repeatedNTimes(self, A):\n freq = {}\n for i in A:\n if i in freq.keys():\n freq[i] += 1\n else:\n freq[i] = 1\n key = list(freq.keys())\n val = list(freq.values())\n m = max(val)\n return key[val.index(m)]\n\ns = Solution()\nl = [2,1,2,5,3,2]\nk = [1,1,1,2]\nprint(s.repeatedNTimes(l))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import aiohttp
import asyncio
import base64
import discord
import json
from discord.ext import commands
class BasicMC(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.session = aiohttp.ClientSession()
@commands.command(name="stealskin", aliases=["skinsteal", "skin"])
@commands.cooldown(1, 4, commands.BucketType.user)
async def skinner(self, ctx, gamertag: str):
response = await self.session.get(f"https://api.mojang.com/users/profiles/minecraft/{gamertag}")
if response.status == 204:
await ctx.send(
embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description="That player doesn't exist!"))
return
uuid = json.loads(await response.text()).get("id")
if uuid is None:
await ctx.send(
embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description="That player doesn't exist!"))
return
response = await self.session.get(
f"https://sessionserver.mojang.com/session/minecraft/profile/{uuid}?unsigned=false")
content = json.loads(await response.text())
if "error" in content:
if content["error"] == "TooManyRequestsException":
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.author.id),
description="Oops, we're being ratelimited by the Mojang API, try again later!"))
return
if len(content["properties"]) == 0:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.author.id),
description="We can't get this person's skin for some reason..."))
return
undec = base64.b64decode(content["properties"][0]["value"])
try:
url = json.loads(undec)["textures"]["SKIN"]["url"]
except Exception:
await ctx.send(
embed=discord.Embed(color=await self.bot.cc(ctx.author.id),
description="An error occurred while fetching that skin!"))
return
skin_embed = discord.Embed(color=await self.bot.cc(ctx.author.id),
description=f"{gamertag}'s skin\n[**[Download]**]({url})")
skin_embed.set_thumbnail(url=url)
skin_embed.set_image(url=f"https://mc-heads.net/body/{gamertag}")
await ctx.send(embed=skin_embed)
@commands.command(name="nametouuid", aliases=["uuid", "getuuid"])
@commands.cooldown(1, 2, commands.BucketType.user)
async def get_uuid(self, ctx, gamertag: str):
r = await self.session.post("https://api.mojang.com/profiles/minecraft", json=[gamertag])
j = json.loads(await r.text()) # [0]['id']
if not j:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.author.id),
description="That user could not be found."))
return
await ctx.send(
embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description=f"{gamertag}: ``{j[0]['id']}``"))
@commands.command(name="uuidtoname", aliases=["getgamertag"])
@commands.cooldown(1, 2, commands.BucketType.user)
async def get_gamertag(self, ctx, uuid: str):
response = await self.session.get(f"https://api.mojang.com/user/profiles/{uuid}/names")
if response.status == 204:
await ctx.send(
embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description="That player doesn't exist!"))
return
j = json.loads(await response.text())
name = j[len(j) - 1]["name"]
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description=f"{uuid}: ``{name}``"))
@commands.command(name="colorcodes", aliases=["mccolorcodes", "colors", "cc"])
async def mc_color_codes(self, ctx):
embed = discord.Embed(color=await self.bot.cc(ctx.author.id),
description="Text in Minecraft can be formatted using different codes and\nthe section (``§``) sign.")
embed.set_author(name="Minecraft Formatting Codes")
embed.add_field(name="Color Codes", value="<:red:697541699706028083> **Red** ``§c``\n"
"<:yellow:697541699743776808> **Yellow** ``§e``\n"
"<:green:697541699316219967> **Green** ``§a``\n"
"<:aqua:697541699173613750> **Aqua** ``§b``\n"
"<:blue:697541699655696787> **Blue** ``§9``\n"
"<:light_purple:697541699546775612> **Light Purple** ``§d``\n"
"<:white:697541699785719838> **White** ``§f``\n"
"<:gray:697541699534061630> **Gray** ``§7``\n")
embed.add_field(name="Color Codes", value="<:dark_red:697541699488055426> **Dark Red** ``§4``\n"
"<:gold:697541699639050382> **Gold** ``§6``\n"
"<:dark_green:697541699500769420> **Dark Green** ``§2``\n"
"<:dark_aqua:697541699475472436> **Dark Aqua** ``§3``\n"
"<:dark_blue:697541699488055437> **Dark Blue** ``§1``\n"
"<:dark_purple:697541699437592666> **Dark Purple** ``§5``\n"
"<:dark_gray:697541699471278120> **Dark Gray** ``§8``\n"
"<:black:697541699496444025> **Black** ``§0``\n")
embed.add_field(name="Formatting Codes", value="<:bold:697541699488186419> **Bold** ``§l``\n"
"<:strikethrough:697541699768942711> ~~Strikethrough~~ ``§m``\n"
"<:underline:697541699806953583> __Underline__ ``§n``\n"
"<:italic:697541699152379995> *Italic* ``§o``\n"
"<:obfuscated:697541699769204736> ||Obfuscated|| ``§k``\n"
"<:reset:697541699697639446> Reset ``§r``\n")
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(BasicMC(bot))
|
normal
|
{
"blob_id": "a6f242a0443ffbad835f86098b70ede41c03515b",
"index": 7652,
"step-1": "<mask token>\n\n\nclass BasicMC(commands.Cog):\n <mask token>\n\n @commands.command(name='stealskin', aliases=['skinsteal', 'skin'])\n @commands.cooldown(1, 4, commands.BucketType.user)\n async def skinner(self, ctx, gamertag: str):\n response = await self.session.get(\n f'https://api.mojang.com/users/profiles/minecraft/{gamertag}')\n if response.status == 204:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n uuid = json.loads(await response.text()).get('id')\n if uuid is None:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n response = await self.session.get(\n f'https://sessionserver.mojang.com/session/minecraft/profile/{uuid}?unsigned=false'\n )\n content = json.loads(await response.text())\n if 'error' in content:\n if content['error'] == 'TooManyRequestsException':\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(\n ctx.author.id), description=\n \"Oops, we're being ratelimited by the Mojang API, try again later!\"\n ))\n return\n if len(content['properties']) == 0:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\n \"We can't get this person's skin for some reason...\"))\n return\n undec = base64.b64decode(content['properties'][0]['value'])\n try:\n url = json.loads(undec)['textures']['SKIN']['url']\n except Exception:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\n 'An error occurred while fetching that skin!'))\n return\n skin_embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=f\"\"\"{gamertag}'s skin\n[**[Download]**]({url})\"\"\")\n skin_embed.set_thumbnail(url=url)\n skin_embed.set_image(url=f'https://mc-heads.net/body/{gamertag}')\n await ctx.send(embed=skin_embed)\n\n @commands.command(name='nametouuid', aliases=['uuid', 'getuuid'])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_uuid(self, ctx, gamertag: str):\n r = await self.session.post('https://api.mojang.com/profiles/minecraft'\n , json=[gamertag])\n j = json.loads(await r.text())\n if not j:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description='That user could not be found.'))\n return\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=f\"{gamertag}: ``{j[0]['id']}``\"))\n\n @commands.command(name='uuidtoname', aliases=['getgamertag'])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_gamertag(self, ctx, uuid: str):\n response = await self.session.get(\n f'https://api.mojang.com/user/profiles/{uuid}/names')\n if response.status == 204:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n j = json.loads(await response.text())\n name = j[len(j) - 1]['name']\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=f'{uuid}: ``{name}``'))\n\n @commands.command(name='colorcodes', aliases=['mccolorcodes', 'colors',\n 'cc'])\n async def mc_color_codes(self, ctx):\n embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=\n \"\"\"Text in Minecraft can be formatted using different codes and\nthe section (``§``) sign.\"\"\"\n )\n embed.set_author(name='Minecraft Formatting Codes')\n embed.add_field(name='Color Codes', value=\n \"\"\"<:red:697541699706028083> **Red** ``§c``\n<:yellow:697541699743776808> **Yellow** ``§e``\n<:green:697541699316219967> **Green** ``§a``\n<:aqua:697541699173613750> **Aqua** ``§b``\n<:blue:697541699655696787> **Blue** ``§9``\n<:light_purple:697541699546775612> **Light Purple** ``§d``\n<:white:697541699785719838> **White** ``§f``\n<:gray:697541699534061630> **Gray** ``§7``\n\"\"\"\n )\n embed.add_field(name='Color Codes', value=\n \"\"\"<:dark_red:697541699488055426> **Dark Red** ``§4``\n<:gold:697541699639050382> **Gold** ``§6``\n<:dark_green:697541699500769420> **Dark Green** ``§2``\n<:dark_aqua:697541699475472436> **Dark Aqua** ``§3``\n<:dark_blue:697541699488055437> **Dark Blue** ``§1``\n<:dark_purple:697541699437592666> **Dark Purple** ``§5``\n<:dark_gray:697541699471278120> **Dark Gray** ``§8``\n<:black:697541699496444025> **Black** ``§0``\n\"\"\"\n )\n embed.add_field(name='Formatting Codes', value=\n \"\"\"<:bold:697541699488186419> **Bold** ``§l``\n<:strikethrough:697541699768942711> ~~Strikethrough~~ ``§m``\n<:underline:697541699806953583> __Underline__ ``§n``\n<:italic:697541699152379995> *Italic* ``§o``\n<:obfuscated:697541699769204736> ||Obfuscated|| ``§k``\n<:reset:697541699697639446> Reset ``§r``\n\"\"\"\n )\n await ctx.send(embed=embed)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BasicMC(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n self.session = aiohttp.ClientSession()\n\n @commands.command(name='stealskin', aliases=['skinsteal', 'skin'])\n @commands.cooldown(1, 4, commands.BucketType.user)\n async def skinner(self, ctx, gamertag: str):\n response = await self.session.get(\n f'https://api.mojang.com/users/profiles/minecraft/{gamertag}')\n if response.status == 204:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n uuid = json.loads(await response.text()).get('id')\n if uuid is None:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n response = await self.session.get(\n f'https://sessionserver.mojang.com/session/minecraft/profile/{uuid}?unsigned=false'\n )\n content = json.loads(await response.text())\n if 'error' in content:\n if content['error'] == 'TooManyRequestsException':\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(\n ctx.author.id), description=\n \"Oops, we're being ratelimited by the Mojang API, try again later!\"\n ))\n return\n if len(content['properties']) == 0:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\n \"We can't get this person's skin for some reason...\"))\n return\n undec = base64.b64decode(content['properties'][0]['value'])\n try:\n url = json.loads(undec)['textures']['SKIN']['url']\n except Exception:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\n 'An error occurred while fetching that skin!'))\n return\n skin_embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=f\"\"\"{gamertag}'s skin\n[**[Download]**]({url})\"\"\")\n skin_embed.set_thumbnail(url=url)\n skin_embed.set_image(url=f'https://mc-heads.net/body/{gamertag}')\n await ctx.send(embed=skin_embed)\n\n @commands.command(name='nametouuid', aliases=['uuid', 'getuuid'])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_uuid(self, ctx, gamertag: str):\n r = await self.session.post('https://api.mojang.com/profiles/minecraft'\n , json=[gamertag])\n j = json.loads(await r.text())\n if not j:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description='That user could not be found.'))\n return\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=f\"{gamertag}: ``{j[0]['id']}``\"))\n\n @commands.command(name='uuidtoname', aliases=['getgamertag'])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_gamertag(self, ctx, uuid: str):\n response = await self.session.get(\n f'https://api.mojang.com/user/profiles/{uuid}/names')\n if response.status == 204:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n j = json.loads(await response.text())\n name = j[len(j) - 1]['name']\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=f'{uuid}: ``{name}``'))\n\n @commands.command(name='colorcodes', aliases=['mccolorcodes', 'colors',\n 'cc'])\n async def mc_color_codes(self, ctx):\n embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=\n \"\"\"Text in Minecraft can be formatted using different codes and\nthe section (``§``) sign.\"\"\"\n )\n embed.set_author(name='Minecraft Formatting Codes')\n embed.add_field(name='Color Codes', value=\n \"\"\"<:red:697541699706028083> **Red** ``§c``\n<:yellow:697541699743776808> **Yellow** ``§e``\n<:green:697541699316219967> **Green** ``§a``\n<:aqua:697541699173613750> **Aqua** ``§b``\n<:blue:697541699655696787> **Blue** ``§9``\n<:light_purple:697541699546775612> **Light Purple** ``§d``\n<:white:697541699785719838> **White** ``§f``\n<:gray:697541699534061630> **Gray** ``§7``\n\"\"\"\n )\n embed.add_field(name='Color Codes', value=\n \"\"\"<:dark_red:697541699488055426> **Dark Red** ``§4``\n<:gold:697541699639050382> **Gold** ``§6``\n<:dark_green:697541699500769420> **Dark Green** ``§2``\n<:dark_aqua:697541699475472436> **Dark Aqua** ``§3``\n<:dark_blue:697541699488055437> **Dark Blue** ``§1``\n<:dark_purple:697541699437592666> **Dark Purple** ``§5``\n<:dark_gray:697541699471278120> **Dark Gray** ``§8``\n<:black:697541699496444025> **Black** ``§0``\n\"\"\"\n )\n embed.add_field(name='Formatting Codes', value=\n \"\"\"<:bold:697541699488186419> **Bold** ``§l``\n<:strikethrough:697541699768942711> ~~Strikethrough~~ ``§m``\n<:underline:697541699806953583> __Underline__ ``§n``\n<:italic:697541699152379995> *Italic* ``§o``\n<:obfuscated:697541699769204736> ||Obfuscated|| ``§k``\n<:reset:697541699697639446> Reset ``§r``\n\"\"\"\n )\n await ctx.send(embed=embed)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass BasicMC(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n self.session = aiohttp.ClientSession()\n\n @commands.command(name='stealskin', aliases=['skinsteal', 'skin'])\n @commands.cooldown(1, 4, commands.BucketType.user)\n async def skinner(self, ctx, gamertag: str):\n response = await self.session.get(\n f'https://api.mojang.com/users/profiles/minecraft/{gamertag}')\n if response.status == 204:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n uuid = json.loads(await response.text()).get('id')\n if uuid is None:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n response = await self.session.get(\n f'https://sessionserver.mojang.com/session/minecraft/profile/{uuid}?unsigned=false'\n )\n content = json.loads(await response.text())\n if 'error' in content:\n if content['error'] == 'TooManyRequestsException':\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(\n ctx.author.id), description=\n \"Oops, we're being ratelimited by the Mojang API, try again later!\"\n ))\n return\n if len(content['properties']) == 0:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\n \"We can't get this person's skin for some reason...\"))\n return\n undec = base64.b64decode(content['properties'][0]['value'])\n try:\n url = json.loads(undec)['textures']['SKIN']['url']\n except Exception:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\n 'An error occurred while fetching that skin!'))\n return\n skin_embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=f\"\"\"{gamertag}'s skin\n[**[Download]**]({url})\"\"\")\n skin_embed.set_thumbnail(url=url)\n skin_embed.set_image(url=f'https://mc-heads.net/body/{gamertag}')\n await ctx.send(embed=skin_embed)\n\n @commands.command(name='nametouuid', aliases=['uuid', 'getuuid'])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_uuid(self, ctx, gamertag: str):\n r = await self.session.post('https://api.mojang.com/profiles/minecraft'\n , json=[gamertag])\n j = json.loads(await r.text())\n if not j:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description='That user could not be found.'))\n return\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=f\"{gamertag}: ``{j[0]['id']}``\"))\n\n @commands.command(name='uuidtoname', aliases=['getgamertag'])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_gamertag(self, ctx, uuid: str):\n response = await self.session.get(\n f'https://api.mojang.com/user/profiles/{uuid}/names')\n if response.status == 204:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n j = json.loads(await response.text())\n name = j[len(j) - 1]['name']\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=f'{uuid}: ``{name}``'))\n\n @commands.command(name='colorcodes', aliases=['mccolorcodes', 'colors',\n 'cc'])\n async def mc_color_codes(self, ctx):\n embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=\n \"\"\"Text in Minecraft can be formatted using different codes and\nthe section (``§``) sign.\"\"\"\n )\n embed.set_author(name='Minecraft Formatting Codes')\n embed.add_field(name='Color Codes', value=\n \"\"\"<:red:697541699706028083> **Red** ``§c``\n<:yellow:697541699743776808> **Yellow** ``§e``\n<:green:697541699316219967> **Green** ``§a``\n<:aqua:697541699173613750> **Aqua** ``§b``\n<:blue:697541699655696787> **Blue** ``§9``\n<:light_purple:697541699546775612> **Light Purple** ``§d``\n<:white:697541699785719838> **White** ``§f``\n<:gray:697541699534061630> **Gray** ``§7``\n\"\"\"\n )\n embed.add_field(name='Color Codes', value=\n \"\"\"<:dark_red:697541699488055426> **Dark Red** ``§4``\n<:gold:697541699639050382> **Gold** ``§6``\n<:dark_green:697541699500769420> **Dark Green** ``§2``\n<:dark_aqua:697541699475472436> **Dark Aqua** ``§3``\n<:dark_blue:697541699488055437> **Dark Blue** ``§1``\n<:dark_purple:697541699437592666> **Dark Purple** ``§5``\n<:dark_gray:697541699471278120> **Dark Gray** ``§8``\n<:black:697541699496444025> **Black** ``§0``\n\"\"\"\n )\n embed.add_field(name='Formatting Codes', value=\n \"\"\"<:bold:697541699488186419> **Bold** ``§l``\n<:strikethrough:697541699768942711> ~~Strikethrough~~ ``§m``\n<:underline:697541699806953583> __Underline__ ``§n``\n<:italic:697541699152379995> *Italic* ``§o``\n<:obfuscated:697541699769204736> ||Obfuscated|| ``§k``\n<:reset:697541699697639446> Reset ``§r``\n\"\"\"\n )\n await ctx.send(embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(BasicMC(bot))\n",
"step-4": "import aiohttp\nimport asyncio\nimport base64\nimport discord\nimport json\nfrom discord.ext import commands\n\n\nclass BasicMC(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n self.session = aiohttp.ClientSession()\n\n @commands.command(name='stealskin', aliases=['skinsteal', 'skin'])\n @commands.cooldown(1, 4, commands.BucketType.user)\n async def skinner(self, ctx, gamertag: str):\n response = await self.session.get(\n f'https://api.mojang.com/users/profiles/minecraft/{gamertag}')\n if response.status == 204:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n uuid = json.loads(await response.text()).get('id')\n if uuid is None:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n response = await self.session.get(\n f'https://sessionserver.mojang.com/session/minecraft/profile/{uuid}?unsigned=false'\n )\n content = json.loads(await response.text())\n if 'error' in content:\n if content['error'] == 'TooManyRequestsException':\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(\n ctx.author.id), description=\n \"Oops, we're being ratelimited by the Mojang API, try again later!\"\n ))\n return\n if len(content['properties']) == 0:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\n \"We can't get this person's skin for some reason...\"))\n return\n undec = base64.b64decode(content['properties'][0]['value'])\n try:\n url = json.loads(undec)['textures']['SKIN']['url']\n except Exception:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\n 'An error occurred while fetching that skin!'))\n return\n skin_embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=f\"\"\"{gamertag}'s skin\n[**[Download]**]({url})\"\"\")\n skin_embed.set_thumbnail(url=url)\n skin_embed.set_image(url=f'https://mc-heads.net/body/{gamertag}')\n await ctx.send(embed=skin_embed)\n\n @commands.command(name='nametouuid', aliases=['uuid', 'getuuid'])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_uuid(self, ctx, gamertag: str):\n r = await self.session.post('https://api.mojang.com/profiles/minecraft'\n , json=[gamertag])\n j = json.loads(await r.text())\n if not j:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description='That user could not be found.'))\n return\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=f\"{gamertag}: ``{j[0]['id']}``\"))\n\n @commands.command(name='uuidtoname', aliases=['getgamertag'])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_gamertag(self, ctx, uuid: str):\n response = await self.session.get(\n f'https://api.mojang.com/user/profiles/{uuid}/names')\n if response.status == 204:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n j = json.loads(await response.text())\n name = j[len(j) - 1]['name']\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=f'{uuid}: ``{name}``'))\n\n @commands.command(name='colorcodes', aliases=['mccolorcodes', 'colors',\n 'cc'])\n async def mc_color_codes(self, ctx):\n embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=\n \"\"\"Text in Minecraft can be formatted using different codes and\nthe section (``§``) sign.\"\"\"\n )\n embed.set_author(name='Minecraft Formatting Codes')\n embed.add_field(name='Color Codes', value=\n \"\"\"<:red:697541699706028083> **Red** ``§c``\n<:yellow:697541699743776808> **Yellow** ``§e``\n<:green:697541699316219967> **Green** ``§a``\n<:aqua:697541699173613750> **Aqua** ``§b``\n<:blue:697541699655696787> **Blue** ``§9``\n<:light_purple:697541699546775612> **Light Purple** ``§d``\n<:white:697541699785719838> **White** ``§f``\n<:gray:697541699534061630> **Gray** ``§7``\n\"\"\"\n )\n embed.add_field(name='Color Codes', value=\n \"\"\"<:dark_red:697541699488055426> **Dark Red** ``§4``\n<:gold:697541699639050382> **Gold** ``§6``\n<:dark_green:697541699500769420> **Dark Green** ``§2``\n<:dark_aqua:697541699475472436> **Dark Aqua** ``§3``\n<:dark_blue:697541699488055437> **Dark Blue** ``§1``\n<:dark_purple:697541699437592666> **Dark Purple** ``§5``\n<:dark_gray:697541699471278120> **Dark Gray** ``§8``\n<:black:697541699496444025> **Black** ``§0``\n\"\"\"\n )\n embed.add_field(name='Formatting Codes', value=\n \"\"\"<:bold:697541699488186419> **Bold** ``§l``\n<:strikethrough:697541699768942711> ~~Strikethrough~~ ``§m``\n<:underline:697541699806953583> __Underline__ ``§n``\n<:italic:697541699152379995> *Italic* ``§o``\n<:obfuscated:697541699769204736> ||Obfuscated|| ``§k``\n<:reset:697541699697639446> Reset ``§r``\n\"\"\"\n )\n await ctx.send(embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(BasicMC(bot))\n",
"step-5": "import aiohttp\nimport asyncio\nimport base64\nimport discord\nimport json\nfrom discord.ext import commands\n\n\nclass BasicMC(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n\n self.session = aiohttp.ClientSession()\n\n @commands.command(name=\"stealskin\", aliases=[\"skinsteal\", \"skin\"])\n @commands.cooldown(1, 4, commands.BucketType.user)\n async def skinner(self, ctx, gamertag: str):\n response = await self.session.get(f\"https://api.mojang.com/users/profiles/minecraft/{gamertag}\")\n if response.status == 204:\n await ctx.send(\n embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description=\"That player doesn't exist!\"))\n return\n uuid = json.loads(await response.text()).get(\"id\")\n if uuid is None:\n await ctx.send(\n embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description=\"That player doesn't exist!\"))\n return\n response = await self.session.get(\n f\"https://sessionserver.mojang.com/session/minecraft/profile/{uuid}?unsigned=false\")\n content = json.loads(await response.text())\n if \"error\" in content:\n if content[\"error\"] == \"TooManyRequestsException\":\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=\"Oops, we're being ratelimited by the Mojang API, try again later!\"))\n return\n if len(content[\"properties\"]) == 0:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=\"We can't get this person's skin for some reason...\"))\n return\n undec = base64.b64decode(content[\"properties\"][0][\"value\"])\n try:\n url = json.loads(undec)[\"textures\"][\"SKIN\"][\"url\"]\n except Exception:\n await ctx.send(\n embed=discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=\"An error occurred while fetching that skin!\"))\n return\n skin_embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=f\"{gamertag}'s skin\\n[**[Download]**]({url})\")\n skin_embed.set_thumbnail(url=url)\n skin_embed.set_image(url=f\"https://mc-heads.net/body/{gamertag}\")\n await ctx.send(embed=skin_embed)\n\n @commands.command(name=\"nametouuid\", aliases=[\"uuid\", \"getuuid\"])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_uuid(self, ctx, gamertag: str):\n r = await self.session.post(\"https://api.mojang.com/profiles/minecraft\", json=[gamertag])\n j = json.loads(await r.text()) # [0]['id']\n if not j:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=\"That user could not be found.\"))\n return\n await ctx.send(\n embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description=f\"{gamertag}: ``{j[0]['id']}``\"))\n\n @commands.command(name=\"uuidtoname\", aliases=[\"getgamertag\"])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_gamertag(self, ctx, uuid: str):\n response = await self.session.get(f\"https://api.mojang.com/user/profiles/{uuid}/names\")\n if response.status == 204:\n await ctx.send(\n embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description=\"That player doesn't exist!\"))\n return\n j = json.loads(await response.text())\n name = j[len(j) - 1][\"name\"]\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description=f\"{uuid}: ``{name}``\"))\n\n @commands.command(name=\"colorcodes\", aliases=[\"mccolorcodes\", \"colors\", \"cc\"])\n async def mc_color_codes(self, ctx):\n embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=\"Text in Minecraft can be formatted using different codes and\\nthe section (``§``) sign.\")\n embed.set_author(name=\"Minecraft Formatting Codes\")\n embed.add_field(name=\"Color Codes\", value=\"<:red:697541699706028083> **Red** ``§c``\\n\"\n \"<:yellow:697541699743776808> **Yellow** ``§e``\\n\"\n \"<:green:697541699316219967> **Green** ``§a``\\n\"\n \"<:aqua:697541699173613750> **Aqua** ``§b``\\n\"\n \"<:blue:697541699655696787> **Blue** ``§9``\\n\"\n \"<:light_purple:697541699546775612> **Light Purple** ``§d``\\n\"\n \"<:white:697541699785719838> **White** ``§f``\\n\"\n \"<:gray:697541699534061630> **Gray** ``§7``\\n\")\n embed.add_field(name=\"Color Codes\", value=\"<:dark_red:697541699488055426> **Dark Red** ``§4``\\n\"\n \"<:gold:697541699639050382> **Gold** ``§6``\\n\"\n \"<:dark_green:697541699500769420> **Dark Green** ``§2``\\n\"\n \"<:dark_aqua:697541699475472436> **Dark Aqua** ``§3``\\n\"\n \"<:dark_blue:697541699488055437> **Dark Blue** ``§1``\\n\"\n \"<:dark_purple:697541699437592666> **Dark Purple** ``§5``\\n\"\n \"<:dark_gray:697541699471278120> **Dark Gray** ``§8``\\n\"\n \"<:black:697541699496444025> **Black** ``§0``\\n\")\n embed.add_field(name=\"Formatting Codes\", value=\"<:bold:697541699488186419> **Bold** ``§l``\\n\"\n \"<:strikethrough:697541699768942711> ~~Strikethrough~~ ``§m``\\n\"\n \"<:underline:697541699806953583> __Underline__ ``§n``\\n\"\n \"<:italic:697541699152379995> *Italic* ``§o``\\n\"\n \"<:obfuscated:697541699769204736> ||Obfuscated|| ``§k``\\n\"\n \"<:reset:697541699697639446> Reset ``§r``\\n\")\n await ctx.send(embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(BasicMC(bot))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django.apps import AppConfig
class MarketingemailsConfig(AppConfig):
name = 'marketingemails'
|
normal
|
{
"blob_id": "19bb58ab440ca00bf6410a70a8b6bbc24eec96c1",
"index": 492,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass MarketingemailsConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass MarketingemailsConfig(AppConfig):\n name = 'marketingemails'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass MarketingemailsConfig(AppConfig):\n name = 'marketingemails'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from modeller import *
from modeller.automodel import *
# This part was within the script loop_modelling_2
# Here is is in a separate file for loop_modelling_3 so the script can be run in parallel
class MyLoop(dopehr_loopmodel):
def select_atoms(self):
# Here only the second loop atoms are allowed to move so we do not mess with the first loop we have previously refined
return selection(self.residue_range('218:', '231:'))
def select_loop_atoms(self):
return selection(self.residue_range('218:', '231:'))
|
normal
|
{
"blob_id": "d058c3df8513e07e4ff7035aa5c5885819e43687",
"index": 7295,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass MyLoop(dopehr_loopmodel):\n <mask token>\n\n def select_loop_atoms(self):\n return selection(self.residue_range('218:', '231:'))\n",
"step-3": "<mask token>\n\n\nclass MyLoop(dopehr_loopmodel):\n\n def select_atoms(self):\n return selection(self.residue_range('218:', '231:'))\n\n def select_loop_atoms(self):\n return selection(self.residue_range('218:', '231:'))\n",
"step-4": "from modeller import *\nfrom modeller.automodel import *\n\n\nclass MyLoop(dopehr_loopmodel):\n\n def select_atoms(self):\n return selection(self.residue_range('218:', '231:'))\n\n def select_loop_atoms(self):\n return selection(self.residue_range('218:', '231:'))\n",
"step-5": "from modeller import *\nfrom modeller.automodel import * \n\n# This part was within the script loop_modelling_2\n# Here is is in a separate file for loop_modelling_3 so the script can be run in parallel\n\nclass MyLoop(dopehr_loopmodel):\n def select_atoms(self):\n\t # Here only the second loop atoms are allowed to move so we do not mess with the first loop we have previously refined\n return selection(self.residue_range('218:', '231:'))\n def select_loop_atoms(self):\n return selection(self.residue_range('218:', '231:'))\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
from sympy import *
import sys
x = Symbol("x")
# EOF
try:
in_str = input()
except Exception as e:
print("WRONG FORMAT!") # Wrong Format!
sys.exit(0)
in_str = in_str.replace("^", "**") #change '^'into'**' for recognition
# wrong expression
try:
in_exp = eval(in_str) # turn str into expression
except Exception as e:
print("WRONG FORMAT!") # Wrong Format!
sys.exit(0)
res = diff(in_exp)
print(str(res).replace("**", "^"))
#res = diff(in_exp).subs(x,2)
#print(res)
|
normal
|
{
"blob_id": "1634ae0e329b4f277fa96a870fbd19626c0ece81",
"index": 6516,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n in_str = input()\nexcept Exception as e:\n print('WRONG FORMAT!')\n sys.exit(0)\n<mask token>\ntry:\n in_exp = eval(in_str)\nexcept Exception as e:\n print('WRONG FORMAT!')\n sys.exit(0)\n<mask token>\nprint(str(res).replace('**', '^'))\n",
"step-3": "<mask token>\nx = Symbol('x')\ntry:\n in_str = input()\nexcept Exception as e:\n print('WRONG FORMAT!')\n sys.exit(0)\nin_str = in_str.replace('^', '**')\ntry:\n in_exp = eval(in_str)\nexcept Exception as e:\n print('WRONG FORMAT!')\n sys.exit(0)\nres = diff(in_exp)\nprint(str(res).replace('**', '^'))\n",
"step-4": "from sympy import *\nimport sys\nx = Symbol('x')\ntry:\n in_str = input()\nexcept Exception as e:\n print('WRONG FORMAT!')\n sys.exit(0)\nin_str = in_str.replace('^', '**')\ntry:\n in_exp = eval(in_str)\nexcept Exception as e:\n print('WRONG FORMAT!')\n sys.exit(0)\nres = diff(in_exp)\nprint(str(res).replace('**', '^'))\n",
"step-5": "from sympy import *\nimport sys\nx = Symbol(\"x\")\n# EOF\ntry:\n in_str = input()\nexcept Exception as e:\n print(\"WRONG FORMAT!\") # Wrong Format!\n sys.exit(0)\n\nin_str = in_str.replace(\"^\", \"**\") #change '^'into'**' for recognition\n\n# wrong expression\ntry:\n in_exp = eval(in_str) # turn str into expression\nexcept Exception as e:\n print(\"WRONG FORMAT!\") # Wrong Format!\n sys.exit(0)\n\nres = diff(in_exp)\nprint(str(res).replace(\"**\", \"^\"))\n#res = diff(in_exp).subs(x,2)\n#print(res)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Time :O(N) space: O(1)
def swap(arr, start, end):
while start < end:
arr[start], arr[end] = arr[end], arr[start]
start += 1
end -= 1
def rotation(arr, k, n):
k = k % n
swap(arr, 0, k-1)
print(arr)
swap(arr, k, n-1)
print(arr)
swap(arr, 0, n-1)
print(arr)
if __name__ == '__main__':
arr = [1, 2, 3, 4, 5, 6, 7]
n = len(arr)
k = 4
rotation(arr, k, n)
|
normal
|
{
"blob_id": "2180146da7ea745f5917ee66fd8c467437b5af4c",
"index": 6761,
"step-1": "<mask token>\n",
"step-2": "def swap(arr, start, end):\n while start < end:\n arr[start], arr[end] = arr[end], arr[start]\n start += 1\n end -= 1\n\n\n<mask token>\n",
"step-3": "def swap(arr, start, end):\n while start < end:\n arr[start], arr[end] = arr[end], arr[start]\n start += 1\n end -= 1\n\n\ndef rotation(arr, k, n):\n k = k % n\n swap(arr, 0, k - 1)\n print(arr)\n swap(arr, k, n - 1)\n print(arr)\n swap(arr, 0, n - 1)\n print(arr)\n\n\n<mask token>\n",
"step-4": "def swap(arr, start, end):\n while start < end:\n arr[start], arr[end] = arr[end], arr[start]\n start += 1\n end -= 1\n\n\ndef rotation(arr, k, n):\n k = k % n\n swap(arr, 0, k - 1)\n print(arr)\n swap(arr, k, n - 1)\n print(arr)\n swap(arr, 0, n - 1)\n print(arr)\n\n\nif __name__ == '__main__':\n arr = [1, 2, 3, 4, 5, 6, 7]\n n = len(arr)\n k = 4\n rotation(arr, k, n)\n",
"step-5": "# Time :O(N) space: O(1)\ndef swap(arr, start, end):\n while start < end:\n arr[start], arr[end] = arr[end], arr[start]\n start += 1\n end -= 1\n\ndef rotation(arr, k, n):\n k = k % n\n swap(arr, 0, k-1)\n print(arr)\n swap(arr, k, n-1)\n print(arr)\n swap(arr, 0, n-1)\n print(arr)\n\nif __name__ == '__main__':\n arr = [1, 2, 3, 4, 5, 6, 7]\n n = len(arr)\n k = 4\n rotation(arr, k, n)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Generated by Django 2.2.6 on 2020-05-27 19:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pancar', '0006_auto_20200526_1058'),
]
operations = [
migrations.AlterField(
model_name='process',
name='price',
field=models.DecimalField(decimal_places=1, max_digits=5, null=True),
),
]
|
normal
|
{
"blob_id": "316a34bbc2b3e3c818ef837f51bc1f86863ea59a",
"index": 2473,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('pancar', '0006_auto_20200526_1058')]\n operations = [migrations.AlterField(model_name='process', name='price',\n field=models.DecimalField(decimal_places=1, max_digits=5, null=True))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('pancar', '0006_auto_20200526_1058')]\n operations = [migrations.AlterField(model_name='process', name='price',\n field=models.DecimalField(decimal_places=1, max_digits=5, null=True))]\n",
"step-5": "# Generated by Django 2.2.6 on 2020-05-27 19:29\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('pancar', '0006_auto_20200526_1058'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='process',\n name='price',\n field=models.DecimalField(decimal_places=1, max_digits=5, null=True),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class MonthYearWidget(DateWidget):
""" Month and year widget """
zope.interface.implementsOnly(IMonthYearWidget)
klass = u'monthyear-widget'
value = '', '', 1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MonthYearWidget(DateWidget):
""" Month and year widget """
zope.interface.implementsOnly(IMonthYearWidget)
klass = u'monthyear-widget'
value = '', '', 1
@zope.component.adapter(zope.schema.interfaces.IField, z3c.form.interfaces.
IFormLayer)
@zope.interface.implementer(z3c.form.interfaces.IFieldWidget)
def MonthYearFieldWidget(field, request):
"""IFieldWidget factory for MonthYearWidget."""
return z3c.form.widget.FieldWidget(field, MonthYearWidget(request))
<|reserved_special_token_1|>
__docformat__ = 'reStructuredText'
<|reserved_special_token_0|>
class MonthYearWidget(DateWidget):
""" Month and year widget """
zope.interface.implementsOnly(IMonthYearWidget)
klass = u'monthyear-widget'
value = '', '', 1
@zope.component.adapter(zope.schema.interfaces.IField, z3c.form.interfaces.
IFormLayer)
@zope.interface.implementer(z3c.form.interfaces.IFieldWidget)
def MonthYearFieldWidget(field, request):
"""IFieldWidget factory for MonthYearWidget."""
return z3c.form.widget.FieldWidget(field, MonthYearWidget(request))
<|reserved_special_token_1|>
__docformat__ = 'reStructuredText'
import z3c.form
import zope.schema
import zope.interface
import zope.component
from widget_date import DateWidget
from interfaces import IMonthYearWidget
class MonthYearWidget(DateWidget):
""" Month and year widget """
zope.interface.implementsOnly(IMonthYearWidget)
klass = u'monthyear-widget'
value = '', '', 1
@zope.component.adapter(zope.schema.interfaces.IField, z3c.form.interfaces.
IFormLayer)
@zope.interface.implementer(z3c.form.interfaces.IFieldWidget)
def MonthYearFieldWidget(field, request):
"""IFieldWidget factory for MonthYearWidget."""
return z3c.form.widget.FieldWidget(field, MonthYearWidget(request))
<|reserved_special_token_1|>
#-*- coding: utf-8 -*-
#############################################################################
# #
# Copyright (c) 2008 Rok Garbas <[email protected]> #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
#############################################################################
__docformat__ = "reStructuredText"
import z3c.form
import zope.schema
import zope.interface
import zope.component
from widget_date import DateWidget
from interfaces import IMonthYearWidget
class MonthYearWidget(DateWidget):
""" Month and year widget """
zope.interface.implementsOnly(IMonthYearWidget)
klass = u'monthyear-widget'
value = ('', '', 1)
@zope.component.adapter(zope.schema.interfaces.IField, z3c.form.interfaces.IFormLayer)
@zope.interface.implementer(z3c.form.interfaces.IFieldWidget)
def MonthYearFieldWidget(field, request):
"""IFieldWidget factory for MonthYearWidget."""
return z3c.form.widget.FieldWidget(field, MonthYearWidget(request))
|
flexible
|
{
"blob_id": "d0f9dd0a06023dd844b0bf70dff360f6bb46c152",
"index": 4412,
"step-1": "<mask token>\n\n\nclass MonthYearWidget(DateWidget):\n \"\"\" Month and year widget \"\"\"\n zope.interface.implementsOnly(IMonthYearWidget)\n klass = u'monthyear-widget'\n value = '', '', 1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MonthYearWidget(DateWidget):\n \"\"\" Month and year widget \"\"\"\n zope.interface.implementsOnly(IMonthYearWidget)\n klass = u'monthyear-widget'\n value = '', '', 1\n\n\[email protected](zope.schema.interfaces.IField, z3c.form.interfaces.\n IFormLayer)\[email protected](z3c.form.interfaces.IFieldWidget)\ndef MonthYearFieldWidget(field, request):\n \"\"\"IFieldWidget factory for MonthYearWidget.\"\"\"\n return z3c.form.widget.FieldWidget(field, MonthYearWidget(request))\n",
"step-3": "__docformat__ = 'reStructuredText'\n<mask token>\n\n\nclass MonthYearWidget(DateWidget):\n \"\"\" Month and year widget \"\"\"\n zope.interface.implementsOnly(IMonthYearWidget)\n klass = u'monthyear-widget'\n value = '', '', 1\n\n\[email protected](zope.schema.interfaces.IField, z3c.form.interfaces.\n IFormLayer)\[email protected](z3c.form.interfaces.IFieldWidget)\ndef MonthYearFieldWidget(field, request):\n \"\"\"IFieldWidget factory for MonthYearWidget.\"\"\"\n return z3c.form.widget.FieldWidget(field, MonthYearWidget(request))\n",
"step-4": "__docformat__ = 'reStructuredText'\nimport z3c.form\nimport zope.schema\nimport zope.interface\nimport zope.component\nfrom widget_date import DateWidget\nfrom interfaces import IMonthYearWidget\n\n\nclass MonthYearWidget(DateWidget):\n \"\"\" Month and year widget \"\"\"\n zope.interface.implementsOnly(IMonthYearWidget)\n klass = u'monthyear-widget'\n value = '', '', 1\n\n\[email protected](zope.schema.interfaces.IField, z3c.form.interfaces.\n IFormLayer)\[email protected](z3c.form.interfaces.IFieldWidget)\ndef MonthYearFieldWidget(field, request):\n \"\"\"IFieldWidget factory for MonthYearWidget.\"\"\"\n return z3c.form.widget.FieldWidget(field, MonthYearWidget(request))\n",
"step-5": "#-*- coding: utf-8 -*- \n\n#############################################################################\n# #\n# Copyright (c) 2008 Rok Garbas <[email protected]> #\n# #\n# This program is free software; you can redistribute it and/or modify #\n# it under the terms of the GNU General Public License as published by #\n# the Free Software Foundation; either version 3 of the License, or #\n# (at your option) any later version. #\n# #\n# This program is distributed in the hope that it will be useful, #\n# but WITHOUT ANY WARRANTY; without even the implied warranty of #\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #\n# GNU General Public License for more details. #\n# #\n# You should have received a copy of the GNU General Public License #\n# along with this program. If not, see <http://www.gnu.org/licenses/>. #\n# #\n#############################################################################\n__docformat__ = \"reStructuredText\"\n\nimport z3c.form\nimport zope.schema\nimport zope.interface\nimport zope.component\nfrom widget_date import DateWidget\nfrom interfaces import IMonthYearWidget\n\n\nclass MonthYearWidget(DateWidget):\n \"\"\" Month and year widget \"\"\"\n\n zope.interface.implementsOnly(IMonthYearWidget)\n\n klass = u'monthyear-widget'\n value = ('', '', 1)\n\[email protected](zope.schema.interfaces.IField, z3c.form.interfaces.IFormLayer)\[email protected](z3c.form.interfaces.IFieldWidget)\ndef MonthYearFieldWidget(field, request):\n \"\"\"IFieldWidget factory for MonthYearWidget.\"\"\"\n return z3c.form.widget.FieldWidget(field, MonthYearWidget(request))\n\n\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
x = '我是一个字符串'
y = "我也是一个字符串"
z = """我还是一个字符串"""
#字符串str用单引号(' ')或双引号(" ")括起来
#使用反斜杠(\)转义特殊字符。
s = 'Yes,he doesn\'t'
#如果你不想让反斜杠发生转义,
#可以在字符串前面添加一个r,表示原始字符串
print('C:\some\name')
print('C:\\some\\name')
print(r'C:\some\name')
#反斜杠可以作为续行符,表示下一行是上一行的延续。
s = "abcd\
efg"
print(s)
#还可以使用"""..."""或者'''...'''跨越多行
s = """
Hello I am fine!
Thinks.
"""
print(s)
|
normal
|
{
"blob_id": "8fe9d21bb65b795a6633ab390f7f5d24a90146d5",
"index": 6774,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('C:\\\\some\\name')\nprint('C:\\\\some\\\\name')\nprint('C:\\\\some\\\\name')\n<mask token>\nprint(s)\n<mask token>\nprint(s)\n",
"step-3": "x = '我是一个字符串'\ny = '我也是一个字符串'\nz = '我还是一个字符串'\ns = \"Yes,he doesn't\"\nprint('C:\\\\some\\name')\nprint('C:\\\\some\\\\name')\nprint('C:\\\\some\\\\name')\ns = 'abcdefg'\nprint(s)\ns = \"\"\"\nHello I am fine!\nThinks.\n\"\"\"\nprint(s)\n",
"step-4": "x = '我是一个字符串'\r\ny = \"我也是一个字符串\"\r\nz = \"\"\"我还是一个字符串\"\"\"\r\n\r\n\r\n#字符串str用单引号(' ')或双引号(\" \")括起来\r\n\r\n#使用反斜杠(\\)转义特殊字符。\r\ns = 'Yes,he doesn\\'t'\r\n\r\n#如果你不想让反斜杠发生转义,\r\n#可以在字符串前面添加一个r,表示原始字符串\r\nprint('C:\\some\\name')\r\n\r\nprint('C:\\\\some\\\\name')\r\n\r\nprint(r'C:\\some\\name')\r\n\r\n#反斜杠可以作为续行符,表示下一行是上一行的延续。\r\ns = \"abcd\\\r\nefg\"\r\nprint(s)\r\n\r\n#还可以使用\"\"\"...\"\"\"或者'''...'''跨越多行\r\n\r\ns = \"\"\"\r\nHello I am fine!\r\nThinks.\r\n\"\"\"\r\nprint(s)\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
"""Diverse wiskundige structuren weergeven in LaTeX in Jupyter Notebook."""
__author__ = "Brian van der Bijl"
__copyright__ = "Copyright 2020, Hogeschool Utrecht"
from IPython.display import display, Math, Markdown
import re
def show_num(x):
return re.compile(r"\.(?!\d)").sub("\1",x)
def latex_formula(form):
latex = form.simplify().to_latex(outer=True)
if latex:
display(Math(latex))
display(Markdown("<details><pre>$" + latex + "$</pre></details>"))
def latex_bmatrix(M, label=None): # Gebaseerd op https://stackoverflow.com/questions/17129290/numpy-2d-and-1d-array-to-latex-bmatrix
if len(M.shape) > 2:
raise ValueError('bmatrix can at most display two dimensions')
lines = str(M).replace("[", "").replace("]", "").splitlines()
if label:
result = [label + " = "]
else:
result = [""]
result += [r"\begin{bmatrix}"]
result += [" " + " & ".join(map(show_num, l.split())) + r"\\" for l in lines]
result += [r"\end{bmatrix}"]
display(Math("\n".join(result)))
display(Markdown("<details><pre>$" + " ".join(result) + "$</pre></details>"))
def latex_amatrix(M, labels=None):
if len(M.shape) > 2:
raise ValueError('array can at most display two dimensions')
lines = str(M).replace("[", "").replace("]", "").splitlines()
if labels and len(labels) == 2:
result = [r"(\mathbf{" + labels[0] + r"} | \vec " + labels[1] + ") = "]
else:
result = [""]
result += [r"\left[\begin{array}{ccc|c}"]
result += [" " + " & ".join(map(show_num, l.split())) + r"\\" for l in lines]
result += [r"\end{array}\right]"]
display(Math("\n".join(result)))
display(Markdown("<details><pre>$" + " ".join(result) + "$</pre></details>"))
def latex_msquare(sq):
if sq.shape != (3,3):
raise ValueError('Geen magisch vierkant')
lines = str(sq).replace("[", "").replace("]", "").splitlines()
result = [r"\begin{array}{|c|c|c|}\hline"]
result += [" " + " & ".join(map(show_num, l.split())) + r"\\\hline" for l in lines]
result += [r"\end{array}"]
display(Math("\n".join(result)))
display(Markdown("<details><pre>$" + " ".join(result) + "$</pre></details>"))
def latex_ratio(x):
"""Helper functie om breuken naar LaTeX te converteren; getallen worden alleen naar string
geconverteerd."""
if isinstance(x, int):
return str(x)
else:
n, d = x.as_integer_ratio() # Nul buiten de breuk halen
return ("-" if n < 0 else "") + r"\frac{" + str(abs(n)) + "}{" + str(d) + "}"
def latex_polynomial(poly):
terms, label, var, primes = poly # Bind parameters uit tuple
def power(exp):
"""Print een term (e.g. x^2). x^1 is gewoon x, x^0 is 1, maar n × 1 is gewoon n dus verberg de 1.
In alle andere gevallen wordt de variabele met het juiste exponent opgeleverd."""
if exp is 1:
return var
elif exp is 0:
return ""
else:
return (var + r"^{" + latex_ratio(exp) + "}")
# Print f(x) met het juiste aantal primes
result = label + ("^{" + r"\prime"*primes + "}" if primes > 0 else "") + "(" + var + ") = "
first = True # Na de eerste moet er "+" tussen de termen komen
for k, v in reversed(sorted(terms.items())): # Voor iedere term, van groot (hoog exponent) naar klein
if v > 0 and not first: # Koppel met een plus, tenzij het de eerste term is
result += "+"
elif v < 0: # Koppel met een min als de term negatief is, ook de eerste term
result += "-"
if v != 0: # Zet first op False na de eerste keer
first = False
if k is 0:
result += str(v)
elif abs(v) is 1: # Print x in plaats van 1x en -x in plaats van -1x
result += str(power(k))
elif v != 0: # Print iedere term die niet 0 of 1 is op de gebruikelijke manier, zonder min want die staat
result += latex_ratio(abs(v)) + str(power(k)) # erboven al
display(Math(result))
display(Markdown("<details><pre>$" + result + "$</pre></details>"))
|
normal
|
{
"blob_id": "7f7bd2e9ec1932ccfd8aa900956ce85473ee8dbd",
"index": 4668,
"step-1": "<mask token>\n\n\ndef latex_formula(form):\n latex = form.simplify().to_latex(outer=True)\n if latex:\n display(Math(latex))\n display(Markdown('<details><pre>$' + latex + '$</pre></details>'))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef latex_formula(form):\n latex = form.simplify().to_latex(outer=True)\n if latex:\n display(Math(latex))\n display(Markdown('<details><pre>$' + latex + '$</pre></details>'))\n\n\n<mask token>\n\n\ndef latex_amatrix(M, labels=None):\n if len(M.shape) > 2:\n raise ValueError('array can at most display two dimensions')\n lines = str(M).replace('[', '').replace(']', '').splitlines()\n if labels and len(labels) == 2:\n result = ['(\\\\mathbf{' + labels[0] + '} | \\\\vec ' + labels[1] + ') = ']\n else:\n result = ['']\n result += ['\\\\left[\\\\begin{array}{ccc|c}']\n result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\\\\\') for l in\n lines]\n result += ['\\\\end{array}\\\\right]']\n display(Math('\\n'.join(result)))\n display(Markdown('<details><pre>$' + ' '.join(result) +\n '$</pre></details>'))\n\n\ndef latex_msquare(sq):\n if sq.shape != (3, 3):\n raise ValueError('Geen magisch vierkant')\n lines = str(sq).replace('[', '').replace(']', '').splitlines()\n result = ['\\\\begin{array}{|c|c|c|}\\\\hline']\n result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\\\\\\\\\hline'\n ) for l in lines]\n result += ['\\\\end{array}']\n display(Math('\\n'.join(result)))\n display(Markdown('<details><pre>$' + ' '.join(result) +\n '$</pre></details>'))\n\n\ndef latex_ratio(x):\n \"\"\"Helper functie om breuken naar LaTeX te converteren; getallen worden alleen naar string\n geconverteerd.\"\"\"\n if isinstance(x, int):\n return str(x)\n else:\n n, d = x.as_integer_ratio()\n return ('-' if n < 0 else '') + '\\\\frac{' + str(abs(n)) + '}{' + str(d\n ) + '}'\n\n\ndef latex_polynomial(poly):\n terms, label, var, primes = poly\n\n def power(exp):\n \"\"\"Print een term (e.g. x^2). x^1 is gewoon x, x^0 is 1, maar n × 1 is gewoon n dus verberg de 1.\n In alle andere gevallen wordt de variabele met het juiste exponent opgeleverd.\"\"\"\n if exp is 1:\n return var\n elif exp is 0:\n return ''\n else:\n return var + '^{' + latex_ratio(exp) + '}'\n result = label + ('^{' + '\\\\prime' * primes + '}' if primes > 0 else ''\n ) + '(' + var + ') = '\n first = True\n for k, v in reversed(sorted(terms.items())):\n if v > 0 and not first:\n result += '+'\n elif v < 0:\n result += '-'\n if v != 0:\n first = False\n if k is 0:\n result += str(v)\n elif abs(v) is 1:\n result += str(power(k))\n elif v != 0:\n result += latex_ratio(abs(v)) + str(power(k))\n display(Math(result))\n display(Markdown('<details><pre>$' + result + '$</pre></details>'))\n",
"step-3": "<mask token>\n\n\ndef show_num(x):\n return re.compile('\\\\.(?!\\\\d)').sub('\\x01', x)\n\n\ndef latex_formula(form):\n latex = form.simplify().to_latex(outer=True)\n if latex:\n display(Math(latex))\n display(Markdown('<details><pre>$' + latex + '$</pre></details>'))\n\n\ndef latex_bmatrix(M, label=None):\n if len(M.shape) > 2:\n raise ValueError('bmatrix can at most display two dimensions')\n lines = str(M).replace('[', '').replace(']', '').splitlines()\n if label:\n result = [label + ' = ']\n else:\n result = ['']\n result += ['\\\\begin{bmatrix}']\n result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\\\\\') for l in\n lines]\n result += ['\\\\end{bmatrix}']\n display(Math('\\n'.join(result)))\n display(Markdown('<details><pre>$' + ' '.join(result) +\n '$</pre></details>'))\n\n\ndef latex_amatrix(M, labels=None):\n if len(M.shape) > 2:\n raise ValueError('array can at most display two dimensions')\n lines = str(M).replace('[', '').replace(']', '').splitlines()\n if labels and len(labels) == 2:\n result = ['(\\\\mathbf{' + labels[0] + '} | \\\\vec ' + labels[1] + ') = ']\n else:\n result = ['']\n result += ['\\\\left[\\\\begin{array}{ccc|c}']\n result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\\\\\') for l in\n lines]\n result += ['\\\\end{array}\\\\right]']\n display(Math('\\n'.join(result)))\n display(Markdown('<details><pre>$' + ' '.join(result) +\n '$</pre></details>'))\n\n\ndef latex_msquare(sq):\n if sq.shape != (3, 3):\n raise ValueError('Geen magisch vierkant')\n lines = str(sq).replace('[', '').replace(']', '').splitlines()\n result = ['\\\\begin{array}{|c|c|c|}\\\\hline']\n result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\\\\\\\\\hline'\n ) for l in lines]\n result += ['\\\\end{array}']\n display(Math('\\n'.join(result)))\n display(Markdown('<details><pre>$' + ' '.join(result) +\n '$</pre></details>'))\n\n\ndef latex_ratio(x):\n \"\"\"Helper functie om breuken naar LaTeX te converteren; getallen worden alleen naar string\n geconverteerd.\"\"\"\n if isinstance(x, int):\n return str(x)\n else:\n n, d = x.as_integer_ratio()\n return ('-' if n < 0 else '') + '\\\\frac{' + str(abs(n)) + '}{' + str(d\n ) + '}'\n\n\ndef latex_polynomial(poly):\n terms, label, var, primes = poly\n\n def power(exp):\n \"\"\"Print een term (e.g. x^2). x^1 is gewoon x, x^0 is 1, maar n × 1 is gewoon n dus verberg de 1.\n In alle andere gevallen wordt de variabele met het juiste exponent opgeleverd.\"\"\"\n if exp is 1:\n return var\n elif exp is 0:\n return ''\n else:\n return var + '^{' + latex_ratio(exp) + '}'\n result = label + ('^{' + '\\\\prime' * primes + '}' if primes > 0 else ''\n ) + '(' + var + ') = '\n first = True\n for k, v in reversed(sorted(terms.items())):\n if v > 0 and not first:\n result += '+'\n elif v < 0:\n result += '-'\n if v != 0:\n first = False\n if k is 0:\n result += str(v)\n elif abs(v) is 1:\n result += str(power(k))\n elif v != 0:\n result += latex_ratio(abs(v)) + str(power(k))\n display(Math(result))\n display(Markdown('<details><pre>$' + result + '$</pre></details>'))\n",
"step-4": "<mask token>\n__author__ = 'Brian van der Bijl'\n__copyright__ = 'Copyright 2020, Hogeschool Utrecht'\nfrom IPython.display import display, Math, Markdown\nimport re\n\n\ndef show_num(x):\n return re.compile('\\\\.(?!\\\\d)').sub('\\x01', x)\n\n\ndef latex_formula(form):\n latex = form.simplify().to_latex(outer=True)\n if latex:\n display(Math(latex))\n display(Markdown('<details><pre>$' + latex + '$</pre></details>'))\n\n\ndef latex_bmatrix(M, label=None):\n if len(M.shape) > 2:\n raise ValueError('bmatrix can at most display two dimensions')\n lines = str(M).replace('[', '').replace(']', '').splitlines()\n if label:\n result = [label + ' = ']\n else:\n result = ['']\n result += ['\\\\begin{bmatrix}']\n result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\\\\\') for l in\n lines]\n result += ['\\\\end{bmatrix}']\n display(Math('\\n'.join(result)))\n display(Markdown('<details><pre>$' + ' '.join(result) +\n '$</pre></details>'))\n\n\ndef latex_amatrix(M, labels=None):\n if len(M.shape) > 2:\n raise ValueError('array can at most display two dimensions')\n lines = str(M).replace('[', '').replace(']', '').splitlines()\n if labels and len(labels) == 2:\n result = ['(\\\\mathbf{' + labels[0] + '} | \\\\vec ' + labels[1] + ') = ']\n else:\n result = ['']\n result += ['\\\\left[\\\\begin{array}{ccc|c}']\n result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\\\\\') for l in\n lines]\n result += ['\\\\end{array}\\\\right]']\n display(Math('\\n'.join(result)))\n display(Markdown('<details><pre>$' + ' '.join(result) +\n '$</pre></details>'))\n\n\ndef latex_msquare(sq):\n if sq.shape != (3, 3):\n raise ValueError('Geen magisch vierkant')\n lines = str(sq).replace('[', '').replace(']', '').splitlines()\n result = ['\\\\begin{array}{|c|c|c|}\\\\hline']\n result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\\\\\\\\\hline'\n ) for l in lines]\n result += ['\\\\end{array}']\n display(Math('\\n'.join(result)))\n display(Markdown('<details><pre>$' + ' '.join(result) +\n '$</pre></details>'))\n\n\ndef latex_ratio(x):\n \"\"\"Helper functie om breuken naar LaTeX te converteren; getallen worden alleen naar string\n geconverteerd.\"\"\"\n if isinstance(x, int):\n return str(x)\n else:\n n, d = x.as_integer_ratio()\n return ('-' if n < 0 else '') + '\\\\frac{' + str(abs(n)) + '}{' + str(d\n ) + '}'\n\n\ndef latex_polynomial(poly):\n terms, label, var, primes = poly\n\n def power(exp):\n \"\"\"Print een term (e.g. x^2). x^1 is gewoon x, x^0 is 1, maar n × 1 is gewoon n dus verberg de 1.\n In alle andere gevallen wordt de variabele met het juiste exponent opgeleverd.\"\"\"\n if exp is 1:\n return var\n elif exp is 0:\n return ''\n else:\n return var + '^{' + latex_ratio(exp) + '}'\n result = label + ('^{' + '\\\\prime' * primes + '}' if primes > 0 else ''\n ) + '(' + var + ') = '\n first = True\n for k, v in reversed(sorted(terms.items())):\n if v > 0 and not first:\n result += '+'\n elif v < 0:\n result += '-'\n if v != 0:\n first = False\n if k is 0:\n result += str(v)\n elif abs(v) is 1:\n result += str(power(k))\n elif v != 0:\n result += latex_ratio(abs(v)) + str(power(k))\n display(Math(result))\n display(Markdown('<details><pre>$' + result + '$</pre></details>'))\n",
"step-5": "#!/usr/bin/env python\r\n\r\n\"\"\"Diverse wiskundige structuren weergeven in LaTeX in Jupyter Notebook.\"\"\"\r\n\r\n__author__ = \"Brian van der Bijl\"\r\n__copyright__ = \"Copyright 2020, Hogeschool Utrecht\"\r\n\r\nfrom IPython.display import display, Math, Markdown\r\nimport re\r\n\r\ndef show_num(x):\r\n return re.compile(r\"\\.(?!\\d)\").sub(\"\\1\",x)\r\n\r\ndef latex_formula(form):\r\n latex = form.simplify().to_latex(outer=True)\r\n if latex:\r\n display(Math(latex))\r\n display(Markdown(\"<details><pre>$\" + latex + \"$</pre></details>\"))\r\n\r\ndef latex_bmatrix(M, label=None): # Gebaseerd op https://stackoverflow.com/questions/17129290/numpy-2d-and-1d-array-to-latex-bmatrix\r\n if len(M.shape) > 2:\r\n raise ValueError('bmatrix can at most display two dimensions')\r\n lines = str(M).replace(\"[\", \"\").replace(\"]\", \"\").splitlines()\r\n if label:\r\n result = [label + \" = \"]\r\n else:\r\n result = [\"\"]\r\n result += [r\"\\begin{bmatrix}\"]\r\n result += [\" \" + \" & \".join(map(show_num, l.split())) + r\"\\\\\" for l in lines]\r\n result += [r\"\\end{bmatrix}\"]\r\n display(Math(\"\\n\".join(result)))\r\n display(Markdown(\"<details><pre>$\" + \" \".join(result) + \"$</pre></details>\"))\r\n\r\ndef latex_amatrix(M, labels=None):\r\n if len(M.shape) > 2:\r\n raise ValueError('array can at most display two dimensions')\r\n lines = str(M).replace(\"[\", \"\").replace(\"]\", \"\").splitlines()\r\n if labels and len(labels) == 2:\r\n result = [r\"(\\mathbf{\" + labels[0] + r\"} | \\vec \" + labels[1] + \") = \"]\r\n else:\r\n result = [\"\"]\r\n result += [r\"\\left[\\begin{array}{ccc|c}\"]\r\n result += [\" \" + \" & \".join(map(show_num, l.split())) + r\"\\\\\" for l in lines]\r\n result += [r\"\\end{array}\\right]\"]\r\n display(Math(\"\\n\".join(result)))\r\n display(Markdown(\"<details><pre>$\" + \" \".join(result) + \"$</pre></details>\"))\r\n\r\ndef latex_msquare(sq):\r\n if sq.shape != (3,3):\r\n raise ValueError('Geen magisch vierkant')\r\n lines = str(sq).replace(\"[\", \"\").replace(\"]\", \"\").splitlines()\r\n result = [r\"\\begin{array}{|c|c|c|}\\hline\"]\r\n result += [\" \" + \" & \".join(map(show_num, l.split())) + r\"\\\\\\hline\" for l in lines]\r\n result += [r\"\\end{array}\"]\r\n display(Math(\"\\n\".join(result)))\r\n display(Markdown(\"<details><pre>$\" + \" \".join(result) + \"$</pre></details>\"))\r\n\r\ndef latex_ratio(x):\r\n \"\"\"Helper functie om breuken naar LaTeX te converteren; getallen worden alleen naar string\r\n geconverteerd.\"\"\"\r\n if isinstance(x, int):\r\n return str(x)\r\n else:\r\n n, d = x.as_integer_ratio() # Nul buiten de breuk halen\r\n return (\"-\" if n < 0 else \"\") + r\"\\frac{\" + str(abs(n)) + \"}{\" + str(d) + \"}\"\r\n\r\ndef latex_polynomial(poly):\r\n terms, label, var, primes = poly # Bind parameters uit tuple\r\n\r\n def power(exp):\r\n \"\"\"Print een term (e.g. x^2). x^1 is gewoon x, x^0 is 1, maar n × 1 is gewoon n dus verberg de 1.\r\n In alle andere gevallen wordt de variabele met het juiste exponent opgeleverd.\"\"\"\r\n if exp is 1:\r\n return var\r\n elif exp is 0:\r\n return \"\"\r\n else:\r\n return (var + r\"^{\" + latex_ratio(exp) + \"}\")\r\n\r\n # Print f(x) met het juiste aantal primes \r\n result = label + (\"^{\" + r\"\\prime\"*primes + \"}\" if primes > 0 else \"\") + \"(\" + var + \") = \"\r\n first = True # Na de eerste moet er \"+\" tussen de termen komen\r\n\r\n for k, v in reversed(sorted(terms.items())): # Voor iedere term, van groot (hoog exponent) naar klein\r\n if v > 0 and not first: # Koppel met een plus, tenzij het de eerste term is\r\n result += \"+\"\r\n elif v < 0: # Koppel met een min als de term negatief is, ook de eerste term\r\n result += \"-\"\r\n\r\n if v != 0: # Zet first op False na de eerste keer\r\n first = False\r\n\r\n if k is 0:\r\n result += str(v)\r\n elif abs(v) is 1: # Print x in plaats van 1x en -x in plaats van -1x\r\n result += str(power(k))\r\n elif v != 0: # Print iedere term die niet 0 of 1 is op de gebruikelijke manier, zonder min want die staat\r\n result += latex_ratio(abs(v)) + str(power(k)) # erboven al\r\n\r\n display(Math(result))\r\n display(Markdown(\"<details><pre>$\" + result + \"$</pre></details>\"))\r\n",
"step-ids": [
1,
5,
7,
9,
10
]
}
|
[
1,
5,
7,
9,
10
] |
<|reserved_special_token_0|>
def Yes():
global player
global comp
tplayer = randint(1, 6)
tcomp = randint(1, 6)
message = ''
if tplayer > tcomp:
message = 'Wygrales!'
player += 1
elif tplayer == tcomp:
message = 'Remis'
else:
message = 'Przegrales'
comp += 1
messagebox.showinfo('Wynik', 'Gracz: ' + str(player) +
' Komputer: ' + str(comp) + '\nTwoj rzut ' + str(tplayer) +
'\n' + 'Przeciwnik wyrzucil ' + str(tcomp) + '\n' + message)
def No():
messagebox.showinfo('Do zobaczenia')
top.quit()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
top.resizable(width=False, height=False)
top.geometry('200x100')
def Yes():
global player
global comp
tplayer = randint(1, 6)
tcomp = randint(1, 6)
message = ''
if tplayer > tcomp:
message = 'Wygrales!'
player += 1
elif tplayer == tcomp:
message = 'Remis'
else:
message = 'Przegrales'
comp += 1
messagebox.showinfo('Wynik', 'Gracz: ' + str(player) +
' Komputer: ' + str(comp) + '\nTwoj rzut ' + str(tplayer) +
'\n' + 'Przeciwnik wyrzucil ' + str(tcomp) + '\n' + message)
def No():
messagebox.showinfo('Do zobaczenia')
top.quit()
<|reserved_special_token_0|>
w.grid(row=0, column=0)
B1.grid(row=1, column=0)
B2.grid(row=1, column=1)
top.mainloop()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
tplyer = 0
tcomp = 0
player = 0
comp = 0
top = tkinter.Tk()
top.resizable(width=False, height=False)
top.geometry('200x100')
def Yes():
global player
global comp
tplayer = randint(1, 6)
tcomp = randint(1, 6)
message = ''
if tplayer > tcomp:
message = 'Wygrales!'
player += 1
elif tplayer == tcomp:
message = 'Remis'
else:
message = 'Przegrales'
comp += 1
messagebox.showinfo('Wynik', 'Gracz: ' + str(player) +
' Komputer: ' + str(comp) + '\nTwoj rzut ' + str(tplayer) +
'\n' + 'Przeciwnik wyrzucil ' + str(tcomp) + '\n' + message)
def No():
messagebox.showinfo('Do zobaczenia')
top.quit()
w = tkinter.Label(top, text='Zagramy w kosci?\n')
B1 = tkinter.Button(top, text='Tak', command=Yes, width=10)
B2 = tkinter.Button(top, text='Nie', command=No, width=10)
w.grid(row=0, column=0)
B1.grid(row=1, column=0)
B2.grid(row=1, column=1)
top.mainloop()
<|reserved_special_token_1|>
import tkinter
from tkinter import messagebox
from random import randint
tplyer = 0
tcomp = 0
player = 0
comp = 0
top = tkinter.Tk()
top.resizable(width=False, height=False)
top.geometry('200x100')
def Yes():
global player
global comp
tplayer = randint(1, 6)
tcomp = randint(1, 6)
message = ''
if tplayer > tcomp:
message = 'Wygrales!'
player += 1
elif tplayer == tcomp:
message = 'Remis'
else:
message = 'Przegrales'
comp += 1
messagebox.showinfo('Wynik', 'Gracz: ' + str(player) +
' Komputer: ' + str(comp) + '\nTwoj rzut ' + str(tplayer) +
'\n' + 'Przeciwnik wyrzucil ' + str(tcomp) + '\n' + message)
def No():
messagebox.showinfo('Do zobaczenia')
top.quit()
w = tkinter.Label(top, text='Zagramy w kosci?\n')
B1 = tkinter.Button(top, text='Tak', command=Yes, width=10)
B2 = tkinter.Button(top, text='Nie', command=No, width=10)
w.grid(row=0, column=0)
B1.grid(row=1, column=0)
B2.grid(row=1, column=1)
top.mainloop()
<|reserved_special_token_1|>
import tkinter
from tkinter import messagebox
from random import randint
tplyer = 0
tcomp = 0
player = 0
comp = 0
top = tkinter.Tk()
top.resizable(width = False, height =False)
top.geometry("200x100")
def Yes():
global player
global comp
tplayer = randint(1,6)
tcomp = randint(1,6)
message =""
if tplayer>tcomp:
message = "Wygrales!"
player+=1
elif tplayer==tcomp:
message = "Remis"
else:
message = "Przegrales"
comp +=1
messagebox.showinfo( "Wynik", "Gracz: "+str(player)+" Komputer: "+str(comp)+"\nTwoj rzut "+str(tplayer)+"\n"+"Przeciwnik wyrzucil "+str(tcomp)+"\n"+message)
def No():
messagebox.showinfo("Do zobaczenia")
top.quit()
w = tkinter.Label(top,text = "Zagramy w kosci?\n")
B1 = tkinter.Button(top, text ="Tak", command = Yes,width = 10)
B2 = tkinter.Button(top, text = "Nie", command = No,width = 10)
w.grid(row = 0,column = 0)
B1.grid(row = 1, column = 0)
B2.grid(row = 1, column = 1)
top.mainloop()
|
flexible
|
{
"blob_id": "0a5baacf17d33dbf6ea69114a8632f7fcef52c3c",
"index": 9419,
"step-1": "<mask token>\n\n\ndef Yes():\n global player\n global comp\n tplayer = randint(1, 6)\n tcomp = randint(1, 6)\n message = ''\n if tplayer > tcomp:\n message = 'Wygrales!'\n player += 1\n elif tplayer == tcomp:\n message = 'Remis'\n else:\n message = 'Przegrales'\n comp += 1\n messagebox.showinfo('Wynik', 'Gracz: ' + str(player) +\n ' Komputer: ' + str(comp) + '\\nTwoj rzut ' + str(tplayer) +\n '\\n' + 'Przeciwnik wyrzucil ' + str(tcomp) + '\\n' + message)\n\n\ndef No():\n messagebox.showinfo('Do zobaczenia')\n top.quit()\n\n\n<mask token>\n",
"step-2": "<mask token>\ntop.resizable(width=False, height=False)\ntop.geometry('200x100')\n\n\ndef Yes():\n global player\n global comp\n tplayer = randint(1, 6)\n tcomp = randint(1, 6)\n message = ''\n if tplayer > tcomp:\n message = 'Wygrales!'\n player += 1\n elif tplayer == tcomp:\n message = 'Remis'\n else:\n message = 'Przegrales'\n comp += 1\n messagebox.showinfo('Wynik', 'Gracz: ' + str(player) +\n ' Komputer: ' + str(comp) + '\\nTwoj rzut ' + str(tplayer) +\n '\\n' + 'Przeciwnik wyrzucil ' + str(tcomp) + '\\n' + message)\n\n\ndef No():\n messagebox.showinfo('Do zobaczenia')\n top.quit()\n\n\n<mask token>\nw.grid(row=0, column=0)\nB1.grid(row=1, column=0)\nB2.grid(row=1, column=1)\ntop.mainloop()\n",
"step-3": "<mask token>\ntplyer = 0\ntcomp = 0\nplayer = 0\ncomp = 0\ntop = tkinter.Tk()\ntop.resizable(width=False, height=False)\ntop.geometry('200x100')\n\n\ndef Yes():\n global player\n global comp\n tplayer = randint(1, 6)\n tcomp = randint(1, 6)\n message = ''\n if tplayer > tcomp:\n message = 'Wygrales!'\n player += 1\n elif tplayer == tcomp:\n message = 'Remis'\n else:\n message = 'Przegrales'\n comp += 1\n messagebox.showinfo('Wynik', 'Gracz: ' + str(player) +\n ' Komputer: ' + str(comp) + '\\nTwoj rzut ' + str(tplayer) +\n '\\n' + 'Przeciwnik wyrzucil ' + str(tcomp) + '\\n' + message)\n\n\ndef No():\n messagebox.showinfo('Do zobaczenia')\n top.quit()\n\n\nw = tkinter.Label(top, text='Zagramy w kosci?\\n')\nB1 = tkinter.Button(top, text='Tak', command=Yes, width=10)\nB2 = tkinter.Button(top, text='Nie', command=No, width=10)\nw.grid(row=0, column=0)\nB1.grid(row=1, column=0)\nB2.grid(row=1, column=1)\ntop.mainloop()\n",
"step-4": "import tkinter\nfrom tkinter import messagebox\nfrom random import randint\ntplyer = 0\ntcomp = 0\nplayer = 0\ncomp = 0\ntop = tkinter.Tk()\ntop.resizable(width=False, height=False)\ntop.geometry('200x100')\n\n\ndef Yes():\n global player\n global comp\n tplayer = randint(1, 6)\n tcomp = randint(1, 6)\n message = ''\n if tplayer > tcomp:\n message = 'Wygrales!'\n player += 1\n elif tplayer == tcomp:\n message = 'Remis'\n else:\n message = 'Przegrales'\n comp += 1\n messagebox.showinfo('Wynik', 'Gracz: ' + str(player) +\n ' Komputer: ' + str(comp) + '\\nTwoj rzut ' + str(tplayer) +\n '\\n' + 'Przeciwnik wyrzucil ' + str(tcomp) + '\\n' + message)\n\n\ndef No():\n messagebox.showinfo('Do zobaczenia')\n top.quit()\n\n\nw = tkinter.Label(top, text='Zagramy w kosci?\\n')\nB1 = tkinter.Button(top, text='Tak', command=Yes, width=10)\nB2 = tkinter.Button(top, text='Nie', command=No, width=10)\nw.grid(row=0, column=0)\nB1.grid(row=1, column=0)\nB2.grid(row=1, column=1)\ntop.mainloop()\n",
"step-5": "import tkinter\nfrom tkinter import messagebox\nfrom random import randint\ntplyer = 0\ntcomp = 0\nplayer = 0\ncomp = 0\ntop = tkinter.Tk()\ntop.resizable(width = False, height =False)\ntop.geometry(\"200x100\")\ndef Yes():\n global player\n global comp\n tplayer = randint(1,6)\n tcomp = randint(1,6)\n message =\"\"\n if tplayer>tcomp:\n message = \"Wygrales!\"\n player+=1\n elif tplayer==tcomp:\n message = \"Remis\"\n else:\n message = \"Przegrales\"\n comp +=1\n messagebox.showinfo( \"Wynik\", \"Gracz: \"+str(player)+\" Komputer: \"+str(comp)+\"\\nTwoj rzut \"+str(tplayer)+\"\\n\"+\"Przeciwnik wyrzucil \"+str(tcomp)+\"\\n\"+message)\ndef No():\n messagebox.showinfo(\"Do zobaczenia\")\n top.quit()\nw = tkinter.Label(top,text = \"Zagramy w kosci?\\n\")\nB1 = tkinter.Button(top, text =\"Tak\", command = Yes,width = 10)\nB2 = tkinter.Button(top, text = \"Nie\", command = No,width = 10)\nw.grid(row = 0,column = 0)\nB1.grid(row = 1, column = 0)\nB2.grid(row = 1, column = 1)\ntop.mainloop()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from get_info import parse_matches as pm
def all_match_data(year):
"""
Searches through the parse_matches data for all games in a specific season prints them out with a game ID and
returns the data in a list to the main program
:param year: Specific format YYYY between 2008 - 2017
:return: year_match_data
"""
year_match_data = []
match_year_data = pm()
for count in range(len(match_year_data)):
if year == match_year_data[count][1]:
year_match_data.append(match_year_data[count])
for count in range(len(year_match_data)):
print(
f'Game ID: {count + 1} Match date: {year_match_data[count][3]} {year_match_data[count][4]} vs '
f'{year_match_data[count][5]}')
return year_match_data
|
normal
|
{
"blob_id": "bc53af24bb46d2be3122e290c4732b312f4ebdf5",
"index": 5313,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef all_match_data(year):\n \"\"\"\n Searches through the parse_matches data for all games in a specific season prints them out with a game ID and\n returns the data in a list to the main program\n :param year: Specific format YYYY between 2008 - 2017\n :return: year_match_data\n \"\"\"\n year_match_data = []\n match_year_data = pm()\n for count in range(len(match_year_data)):\n if year == match_year_data[count][1]:\n year_match_data.append(match_year_data[count])\n for count in range(len(year_match_data)):\n print(\n f'Game ID: {count + 1} Match date: {year_match_data[count][3]} {year_match_data[count][4]} vs {year_match_data[count][5]}'\n )\n return year_match_data\n",
"step-3": "from get_info import parse_matches as pm\n\n\ndef all_match_data(year):\n \"\"\"\n Searches through the parse_matches data for all games in a specific season prints them out with a game ID and\n returns the data in a list to the main program\n :param year: Specific format YYYY between 2008 - 2017\n :return: year_match_data\n \"\"\"\n year_match_data = []\n match_year_data = pm()\n for count in range(len(match_year_data)):\n if year == match_year_data[count][1]:\n year_match_data.append(match_year_data[count])\n for count in range(len(year_match_data)):\n print(\n f'Game ID: {count + 1} Match date: {year_match_data[count][3]} {year_match_data[count][4]} vs {year_match_data[count][5]}'\n )\n return year_match_data\n",
"step-4": "from get_info import parse_matches as pm\n\n\ndef all_match_data(year):\n \"\"\"\n Searches through the parse_matches data for all games in a specific season prints them out with a game ID and\n returns the data in a list to the main program\n :param year: Specific format YYYY between 2008 - 2017\n :return: year_match_data\n \"\"\"\n year_match_data = []\n match_year_data = pm()\n for count in range(len(match_year_data)):\n if year == match_year_data[count][1]:\n year_match_data.append(match_year_data[count])\n for count in range(len(year_match_data)):\n print(\n f'Game ID: {count + 1} Match date: {year_match_data[count][3]} {year_match_data[count][4]} vs '\n f'{year_match_data[count][5]}')\n\n return year_match_data\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sys
n = int(input())
min_number = sys.maxsize
max_number = -sys.maxsize
for i in range(0, n):
num = int(input())
if num > max_number:
max_number = num
if num < min_number:
min_number = num
print(f"Max number: {max_number}")
print(f"Min number: {min_number}")
|
normal
|
{
"blob_id": "ac6f2287390bdad8fe20cdc73c0063f685970cfb",
"index": 5289,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(0, n):\n num = int(input())\n if num > max_number:\n max_number = num\n if num < min_number:\n min_number = num\nprint(f'Max number: {max_number}')\nprint(f'Min number: {min_number}')\n",
"step-3": "<mask token>\nn = int(input())\nmin_number = sys.maxsize\nmax_number = -sys.maxsize\nfor i in range(0, n):\n num = int(input())\n if num > max_number:\n max_number = num\n if num < min_number:\n min_number = num\nprint(f'Max number: {max_number}')\nprint(f'Min number: {min_number}')\n",
"step-4": "import sys\nn = int(input())\nmin_number = sys.maxsize\nmax_number = -sys.maxsize\nfor i in range(0, n):\n num = int(input())\n if num > max_number:\n max_number = num\n if num < min_number:\n min_number = num\nprint(f'Max number: {max_number}')\nprint(f'Min number: {min_number}')\n",
"step-5": "import sys\r\n\r\nn = int(input())\r\nmin_number = sys.maxsize\r\nmax_number = -sys.maxsize\r\n\r\nfor i in range(0, n):\r\n num = int(input())\r\n if num > max_number:\r\n max_number = num\r\n\r\n if num < min_number:\r\n min_number = num\r\n\r\nprint(f\"Max number: {max_number}\")\r\nprint(f\"Min number: {min_number}\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Command(NoArgsCommand):
"""
Import directory structure into the filer ::
manage.py --path=/tmp/assets/images
manage.py --path=/tmp/assets/news --folder=images
"""
option_list = BaseCommand.option_list + (make_option('--path', action=
'store', dest='path', default=False, help=
'Import files located in the path into django-filer'), make_option(
'--folder', action='store', dest='base_folder', default=False, help
=
'Specify the destination folder in which the directory structure should be imported'
))
def handle_noargs(self, **options):
file_importer = FileImporter(**options)
file_importer.walker()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FileImporter(object):
<|reserved_special_token_0|>
def import_file(self, file_obj, folder):
"""
Create a File or an Image into the given folder
"""
created = False
for cls in MEDIA_MODELS:
if cls.matches_file_type(file_obj.name):
obj, created = cls.objects.get_or_create(original_filename=
file_obj.name, file=file_obj, folder=folder, is_public=
FILER_IS_PUBLIC_DEFAULT)
if created:
self.image_created += 1
if not created:
obj, created = File.objects.get_or_create(original_filename=
file_obj.name, file=file_obj, folder=folder, is_public=
FILER_IS_PUBLIC_DEFAULT)
if created:
self.file_created += 1
if self.verbosity >= 2:
print(
'file_created #%s / image_created #%s -- file : %s -- created : %s'
% (self.file_created, self.image_created, obj, created))
return obj
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Command(NoArgsCommand):
"""
Import directory structure into the filer ::
manage.py --path=/tmp/assets/images
manage.py --path=/tmp/assets/news --folder=images
"""
option_list = BaseCommand.option_list + (make_option('--path', action=
'store', dest='path', default=False, help=
'Import files located in the path into django-filer'), make_option(
'--folder', action='store', dest='base_folder', default=False, help
=
'Specify the destination folder in which the directory structure should be imported'
))
def handle_noargs(self, **options):
file_importer = FileImporter(**options)
file_importer.walker()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FileImporter(object):
def __init__(self, *args, **kwargs):
self.path = kwargs.get('path')
self.base_folder = kwargs.get('base_folder')
self.verbosity = int(kwargs.get('verbosity', 1))
self.file_created = 0
self.image_created = 0
self.folder_created = 0
def import_file(self, file_obj, folder):
"""
Create a File or an Image into the given folder
"""
created = False
for cls in MEDIA_MODELS:
if cls.matches_file_type(file_obj.name):
obj, created = cls.objects.get_or_create(original_filename=
file_obj.name, file=file_obj, folder=folder, is_public=
FILER_IS_PUBLIC_DEFAULT)
if created:
self.image_created += 1
if not created:
obj, created = File.objects.get_or_create(original_filename=
file_obj.name, file=file_obj, folder=folder, is_public=
FILER_IS_PUBLIC_DEFAULT)
if created:
self.file_created += 1
if self.verbosity >= 2:
print(
'file_created #%s / image_created #%s -- file : %s -- created : %s'
% (self.file_created, self.image_created, obj, created))
return obj
def get_or_create_folder(self, folder_names):
"""
Gets or creates a Folder based the list of folder names in hierarchical
order (like breadcrumbs).
get_or_create_folder(['root', 'subfolder', 'subsub folder'])
creates the folders with correct parent relations and returns the
'subsub folder' instance.
"""
if not len(folder_names):
return None
current_parent = None
for folder_name in folder_names:
current_parent, created = Folder.objects.get_or_create(name=
folder_name, parent=current_parent)
if created:
self.folder_created += 1
if self.verbosity >= 2:
print('folder_created #%s folder : %s -- created : %s' %
(self.folder_created, current_parent, created))
return current_parent
def walker(self, path=None, base_folder=None):
"""
This method walk a directory structure and create the
Folders and Files as they appear.
"""
path = path or self.path
base_folder = base_folder or self.base_folder
path = os.path.normpath(upath(path))
if base_folder:
base_folder = os.path.normpath(upath(base_folder))
print('The directory structure will be imported in %s' % (
base_folder,))
if self.verbosity >= 1:
print('Import the folders and files in %s' % (path,))
root_folder_name = os.path.basename(path)
for root, dirs, files in os.walk(path):
rel_folders = root.partition(path)[2].strip(os.path.sep).split(os
.path.sep)
while '' in rel_folders:
rel_folders.remove('')
if base_folder:
folder_names = base_folder.split('/') + [root_folder_name
] + rel_folders
else:
folder_names = [root_folder_name] + rel_folders
folder = self.get_or_create_folder(folder_names)
for file_obj in files:
dj_file = DjangoFile(open(os.path.join(root, file_obj)),
name=file_obj)
self.import_file(file_obj=dj_file, folder=folder)
if self.verbosity >= 1:
print(('folder_created #%s / file_created #%s / ' +
'image_created #%s') % (self.folder_created, self.
file_created, self.image_created))
class Command(NoArgsCommand):
"""
Import directory structure into the filer ::
manage.py --path=/tmp/assets/images
manage.py --path=/tmp/assets/news --folder=images
"""
option_list = BaseCommand.option_list + (make_option('--path', action=
'store', dest='path', default=False, help=
'Import files located in the path into django-filer'), make_option(
'--folder', action='store', dest='base_folder', default=False, help
=
'Specify the destination folder in which the directory structure should be imported'
))
def handle_noargs(self, **options):
file_importer = FileImporter(**options)
file_importer.walker()
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.core.files import File as DjangoFile
from django.core.management.base import BaseCommand, NoArgsCommand
from filer.models.filemodels import File
from leonardo.module.media.models import *
from filer.settings import FILER_IS_PUBLIC_DEFAULT
from filer.utils.compatibility import upath
from optparse import make_option
import os
MEDIA_MODELS = [Image, Document, Vector, Video]
class FileImporter(object):
def __init__(self, *args, **kwargs):
self.path = kwargs.get('path')
self.base_folder = kwargs.get('base_folder')
self.verbosity = int(kwargs.get('verbosity', 1))
self.file_created = 0
self.image_created = 0
self.folder_created = 0
def import_file(self, file_obj, folder):
"""
Create a File or an Image into the given folder
"""
created = False
for cls in MEDIA_MODELS:
if cls.matches_file_type(file_obj.name):
obj, created = cls.objects.get_or_create(original_filename=
file_obj.name, file=file_obj, folder=folder, is_public=
FILER_IS_PUBLIC_DEFAULT)
if created:
self.image_created += 1
if not created:
obj, created = File.objects.get_or_create(original_filename=
file_obj.name, file=file_obj, folder=folder, is_public=
FILER_IS_PUBLIC_DEFAULT)
if created:
self.file_created += 1
if self.verbosity >= 2:
print(
'file_created #%s / image_created #%s -- file : %s -- created : %s'
% (self.file_created, self.image_created, obj, created))
return obj
def get_or_create_folder(self, folder_names):
"""
Gets or creates a Folder based the list of folder names in hierarchical
order (like breadcrumbs).
get_or_create_folder(['root', 'subfolder', 'subsub folder'])
creates the folders with correct parent relations and returns the
'subsub folder' instance.
"""
if not len(folder_names):
return None
current_parent = None
for folder_name in folder_names:
current_parent, created = Folder.objects.get_or_create(name=
folder_name, parent=current_parent)
if created:
self.folder_created += 1
if self.verbosity >= 2:
print('folder_created #%s folder : %s -- created : %s' %
(self.folder_created, current_parent, created))
return current_parent
def walker(self, path=None, base_folder=None):
"""
This method walk a directory structure and create the
Folders and Files as they appear.
"""
path = path or self.path
base_folder = base_folder or self.base_folder
path = os.path.normpath(upath(path))
if base_folder:
base_folder = os.path.normpath(upath(base_folder))
print('The directory structure will be imported in %s' % (
base_folder,))
if self.verbosity >= 1:
print('Import the folders and files in %s' % (path,))
root_folder_name = os.path.basename(path)
for root, dirs, files in os.walk(path):
rel_folders = root.partition(path)[2].strip(os.path.sep).split(os
.path.sep)
while '' in rel_folders:
rel_folders.remove('')
if base_folder:
folder_names = base_folder.split('/') + [root_folder_name
] + rel_folders
else:
folder_names = [root_folder_name] + rel_folders
folder = self.get_or_create_folder(folder_names)
for file_obj in files:
dj_file = DjangoFile(open(os.path.join(root, file_obj)),
name=file_obj)
self.import_file(file_obj=dj_file, folder=folder)
if self.verbosity >= 1:
print(('folder_created #%s / file_created #%s / ' +
'image_created #%s') % (self.folder_created, self.
file_created, self.image_created))
class Command(NoArgsCommand):
"""
Import directory structure into the filer ::
manage.py --path=/tmp/assets/images
manage.py --path=/tmp/assets/news --folder=images
"""
option_list = BaseCommand.option_list + (make_option('--path', action=
'store', dest='path', default=False, help=
'Import files located in the path into django-filer'), make_option(
'--folder', action='store', dest='base_folder', default=False, help
=
'Specify the destination folder in which the directory structure should be imported'
))
def handle_noargs(self, **options):
file_importer = FileImporter(**options)
file_importer.walker()
<|reserved_special_token_1|>
#-*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.files import File as DjangoFile
from django.core.management.base import BaseCommand, NoArgsCommand
from filer.models.filemodels import File
from leonardo.module.media.models import *
from filer.settings import FILER_IS_PUBLIC_DEFAULT
from filer.utils.compatibility import upath
from optparse import make_option
import os
MEDIA_MODELS = [Image, Document, Vector, Video]
class FileImporter(object):
def __init__(self, * args, **kwargs):
self.path = kwargs.get('path')
self.base_folder = kwargs.get('base_folder')
self.verbosity = int(kwargs.get('verbosity', 1))
self.file_created = 0
self.image_created = 0
self.folder_created = 0
def import_file(self, file_obj, folder):
"""
Create a File or an Image into the given folder
"""
created = False
for cls in MEDIA_MODELS:
if cls.matches_file_type(file_obj.name):
obj, created = cls.objects.get_or_create(
original_filename=file_obj.name,
file=file_obj,
folder=folder,
is_public=FILER_IS_PUBLIC_DEFAULT)
if created:
self.image_created += 1
if not created:
obj, created = File.objects.get_or_create(
original_filename=file_obj.name,
file=file_obj,
folder=folder,
is_public=FILER_IS_PUBLIC_DEFAULT)
if created:
self.file_created += 1
if self.verbosity >= 2:
print("file_created #%s / image_created #%s -- file : %s -- created : %s" % (self.file_created,
self.image_created,
obj, created))
return obj
def get_or_create_folder(self, folder_names):
"""
Gets or creates a Folder based the list of folder names in hierarchical
order (like breadcrumbs).
get_or_create_folder(['root', 'subfolder', 'subsub folder'])
creates the folders with correct parent relations and returns the
'subsub folder' instance.
"""
if not len(folder_names):
return None
current_parent = None
for folder_name in folder_names:
current_parent, created = Folder.objects.get_or_create(
name=folder_name, parent=current_parent)
if created:
self.folder_created += 1
if self.verbosity >= 2:
print("folder_created #%s folder : %s -- created : %s" % (self.folder_created,
current_parent, created))
return current_parent
def walker(self, path=None, base_folder=None):
"""
This method walk a directory structure and create the
Folders and Files as they appear.
"""
path = path or self.path
base_folder = base_folder or self.base_folder
# prevent trailing slashes and other inconsistencies on path.
path = os.path.normpath(upath(path))
if base_folder:
base_folder = os.path.normpath(upath(base_folder))
print("The directory structure will be imported in %s" % (base_folder,))
if self.verbosity >= 1:
print("Import the folders and files in %s" % (path,))
root_folder_name = os.path.basename(path)
for root, dirs, files in os.walk(path):
rel_folders = root.partition(path)[2].strip(os.path.sep).split(os.path.sep)
while '' in rel_folders:
rel_folders.remove('')
if base_folder:
folder_names = base_folder.split('/') + [root_folder_name] + rel_folders
else:
folder_names = [root_folder_name] + rel_folders
folder = self.get_or_create_folder(folder_names)
for file_obj in files:
dj_file = DjangoFile(open(os.path.join(root, file_obj)),
name=file_obj)
self.import_file(file_obj=dj_file, folder=folder)
if self.verbosity >= 1:
print(('folder_created #%s / file_created #%s / ' +
'image_created #%s') % (
self.folder_created, self.file_created,
self.image_created))
class Command(NoArgsCommand):
"""
Import directory structure into the filer ::
manage.py --path=/tmp/assets/images
manage.py --path=/tmp/assets/news --folder=images
"""
option_list = BaseCommand.option_list + (
make_option('--path',
action='store',
dest='path',
default=False,
help='Import files located in the path into django-filer'),
make_option('--folder',
action='store',
dest='base_folder',
default=False,
help='Specify the destination folder in which the directory structure should be imported'),
)
def handle_noargs(self, **options):
file_importer = FileImporter(**options)
file_importer.walker()
|
flexible
|
{
"blob_id": "864e9063ec1ed80cd1da3128a38633cbeb2f8bba",
"index": 3775,
"step-1": "<mask token>\n\n\nclass Command(NoArgsCommand):\n \"\"\"\n Import directory structure into the filer ::\n\n manage.py --path=/tmp/assets/images\n manage.py --path=/tmp/assets/news --folder=images\n \"\"\"\n option_list = BaseCommand.option_list + (make_option('--path', action=\n 'store', dest='path', default=False, help=\n 'Import files located in the path into django-filer'), make_option(\n '--folder', action='store', dest='base_folder', default=False, help\n =\n 'Specify the destination folder in which the directory structure should be imported'\n ))\n\n def handle_noargs(self, **options):\n file_importer = FileImporter(**options)\n file_importer.walker()\n",
"step-2": "<mask token>\n\n\nclass FileImporter(object):\n <mask token>\n\n def import_file(self, file_obj, folder):\n \"\"\"\n Create a File or an Image into the given folder\n \"\"\"\n created = False\n for cls in MEDIA_MODELS:\n if cls.matches_file_type(file_obj.name):\n obj, created = cls.objects.get_or_create(original_filename=\n file_obj.name, file=file_obj, folder=folder, is_public=\n FILER_IS_PUBLIC_DEFAULT)\n if created:\n self.image_created += 1\n if not created:\n obj, created = File.objects.get_or_create(original_filename=\n file_obj.name, file=file_obj, folder=folder, is_public=\n FILER_IS_PUBLIC_DEFAULT)\n if created:\n self.file_created += 1\n if self.verbosity >= 2:\n print(\n 'file_created #%s / image_created #%s -- file : %s -- created : %s'\n % (self.file_created, self.image_created, obj, created))\n return obj\n <mask token>\n <mask token>\n\n\nclass Command(NoArgsCommand):\n \"\"\"\n Import directory structure into the filer ::\n\n manage.py --path=/tmp/assets/images\n manage.py --path=/tmp/assets/news --folder=images\n \"\"\"\n option_list = BaseCommand.option_list + (make_option('--path', action=\n 'store', dest='path', default=False, help=\n 'Import files located in the path into django-filer'), make_option(\n '--folder', action='store', dest='base_folder', default=False, help\n =\n 'Specify the destination folder in which the directory structure should be imported'\n ))\n\n def handle_noargs(self, **options):\n file_importer = FileImporter(**options)\n file_importer.walker()\n",
"step-3": "<mask token>\n\n\nclass FileImporter(object):\n\n def __init__(self, *args, **kwargs):\n self.path = kwargs.get('path')\n self.base_folder = kwargs.get('base_folder')\n self.verbosity = int(kwargs.get('verbosity', 1))\n self.file_created = 0\n self.image_created = 0\n self.folder_created = 0\n\n def import_file(self, file_obj, folder):\n \"\"\"\n Create a File or an Image into the given folder\n \"\"\"\n created = False\n for cls in MEDIA_MODELS:\n if cls.matches_file_type(file_obj.name):\n obj, created = cls.objects.get_or_create(original_filename=\n file_obj.name, file=file_obj, folder=folder, is_public=\n FILER_IS_PUBLIC_DEFAULT)\n if created:\n self.image_created += 1\n if not created:\n obj, created = File.objects.get_or_create(original_filename=\n file_obj.name, file=file_obj, folder=folder, is_public=\n FILER_IS_PUBLIC_DEFAULT)\n if created:\n self.file_created += 1\n if self.verbosity >= 2:\n print(\n 'file_created #%s / image_created #%s -- file : %s -- created : %s'\n % (self.file_created, self.image_created, obj, created))\n return obj\n\n def get_or_create_folder(self, folder_names):\n \"\"\"\n Gets or creates a Folder based the list of folder names in hierarchical\n order (like breadcrumbs).\n\n get_or_create_folder(['root', 'subfolder', 'subsub folder'])\n\n creates the folders with correct parent relations and returns the\n 'subsub folder' instance.\n \"\"\"\n if not len(folder_names):\n return None\n current_parent = None\n for folder_name in folder_names:\n current_parent, created = Folder.objects.get_or_create(name=\n folder_name, parent=current_parent)\n if created:\n self.folder_created += 1\n if self.verbosity >= 2:\n print('folder_created #%s folder : %s -- created : %s' %\n (self.folder_created, current_parent, created))\n return current_parent\n\n def walker(self, path=None, base_folder=None):\n \"\"\"\n This method walk a directory structure and create the\n Folders and Files as they appear.\n \"\"\"\n path = path or self.path\n base_folder = base_folder or self.base_folder\n path = os.path.normpath(upath(path))\n if base_folder:\n base_folder = os.path.normpath(upath(base_folder))\n print('The directory structure will be imported in %s' % (\n base_folder,))\n if self.verbosity >= 1:\n print('Import the folders and files in %s' % (path,))\n root_folder_name = os.path.basename(path)\n for root, dirs, files in os.walk(path):\n rel_folders = root.partition(path)[2].strip(os.path.sep).split(os\n .path.sep)\n while '' in rel_folders:\n rel_folders.remove('')\n if base_folder:\n folder_names = base_folder.split('/') + [root_folder_name\n ] + rel_folders\n else:\n folder_names = [root_folder_name] + rel_folders\n folder = self.get_or_create_folder(folder_names)\n for file_obj in files:\n dj_file = DjangoFile(open(os.path.join(root, file_obj)),\n name=file_obj)\n self.import_file(file_obj=dj_file, folder=folder)\n if self.verbosity >= 1:\n print(('folder_created #%s / file_created #%s / ' +\n 'image_created #%s') % (self.folder_created, self.\n file_created, self.image_created))\n\n\nclass Command(NoArgsCommand):\n \"\"\"\n Import directory structure into the filer ::\n\n manage.py --path=/tmp/assets/images\n manage.py --path=/tmp/assets/news --folder=images\n \"\"\"\n option_list = BaseCommand.option_list + (make_option('--path', action=\n 'store', dest='path', default=False, help=\n 'Import files located in the path into django-filer'), make_option(\n '--folder', action='store', dest='base_folder', default=False, help\n =\n 'Specify the destination folder in which the directory structure should be imported'\n ))\n\n def handle_noargs(self, **options):\n file_importer = FileImporter(**options)\n file_importer.walker()\n",
"step-4": "from __future__ import unicode_literals\nfrom django.core.files import File as DjangoFile\nfrom django.core.management.base import BaseCommand, NoArgsCommand\nfrom filer.models.filemodels import File\nfrom leonardo.module.media.models import *\nfrom filer.settings import FILER_IS_PUBLIC_DEFAULT\nfrom filer.utils.compatibility import upath\nfrom optparse import make_option\nimport os\nMEDIA_MODELS = [Image, Document, Vector, Video]\n\n\nclass FileImporter(object):\n\n def __init__(self, *args, **kwargs):\n self.path = kwargs.get('path')\n self.base_folder = kwargs.get('base_folder')\n self.verbosity = int(kwargs.get('verbosity', 1))\n self.file_created = 0\n self.image_created = 0\n self.folder_created = 0\n\n def import_file(self, file_obj, folder):\n \"\"\"\n Create a File or an Image into the given folder\n \"\"\"\n created = False\n for cls in MEDIA_MODELS:\n if cls.matches_file_type(file_obj.name):\n obj, created = cls.objects.get_or_create(original_filename=\n file_obj.name, file=file_obj, folder=folder, is_public=\n FILER_IS_PUBLIC_DEFAULT)\n if created:\n self.image_created += 1\n if not created:\n obj, created = File.objects.get_or_create(original_filename=\n file_obj.name, file=file_obj, folder=folder, is_public=\n FILER_IS_PUBLIC_DEFAULT)\n if created:\n self.file_created += 1\n if self.verbosity >= 2:\n print(\n 'file_created #%s / image_created #%s -- file : %s -- created : %s'\n % (self.file_created, self.image_created, obj, created))\n return obj\n\n def get_or_create_folder(self, folder_names):\n \"\"\"\n Gets or creates a Folder based the list of folder names in hierarchical\n order (like breadcrumbs).\n\n get_or_create_folder(['root', 'subfolder', 'subsub folder'])\n\n creates the folders with correct parent relations and returns the\n 'subsub folder' instance.\n \"\"\"\n if not len(folder_names):\n return None\n current_parent = None\n for folder_name in folder_names:\n current_parent, created = Folder.objects.get_or_create(name=\n folder_name, parent=current_parent)\n if created:\n self.folder_created += 1\n if self.verbosity >= 2:\n print('folder_created #%s folder : %s -- created : %s' %\n (self.folder_created, current_parent, created))\n return current_parent\n\n def walker(self, path=None, base_folder=None):\n \"\"\"\n This method walk a directory structure and create the\n Folders and Files as they appear.\n \"\"\"\n path = path or self.path\n base_folder = base_folder or self.base_folder\n path = os.path.normpath(upath(path))\n if base_folder:\n base_folder = os.path.normpath(upath(base_folder))\n print('The directory structure will be imported in %s' % (\n base_folder,))\n if self.verbosity >= 1:\n print('Import the folders and files in %s' % (path,))\n root_folder_name = os.path.basename(path)\n for root, dirs, files in os.walk(path):\n rel_folders = root.partition(path)[2].strip(os.path.sep).split(os\n .path.sep)\n while '' in rel_folders:\n rel_folders.remove('')\n if base_folder:\n folder_names = base_folder.split('/') + [root_folder_name\n ] + rel_folders\n else:\n folder_names = [root_folder_name] + rel_folders\n folder = self.get_or_create_folder(folder_names)\n for file_obj in files:\n dj_file = DjangoFile(open(os.path.join(root, file_obj)),\n name=file_obj)\n self.import_file(file_obj=dj_file, folder=folder)\n if self.verbosity >= 1:\n print(('folder_created #%s / file_created #%s / ' +\n 'image_created #%s') % (self.folder_created, self.\n file_created, self.image_created))\n\n\nclass Command(NoArgsCommand):\n \"\"\"\n Import directory structure into the filer ::\n\n manage.py --path=/tmp/assets/images\n manage.py --path=/tmp/assets/news --folder=images\n \"\"\"\n option_list = BaseCommand.option_list + (make_option('--path', action=\n 'store', dest='path', default=False, help=\n 'Import files located in the path into django-filer'), make_option(\n '--folder', action='store', dest='base_folder', default=False, help\n =\n 'Specify the destination folder in which the directory structure should be imported'\n ))\n\n def handle_noargs(self, **options):\n file_importer = FileImporter(**options)\n file_importer.walker()\n",
"step-5": "#-*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.core.files import File as DjangoFile\nfrom django.core.management.base import BaseCommand, NoArgsCommand\nfrom filer.models.filemodels import File\nfrom leonardo.module.media.models import *\nfrom filer.settings import FILER_IS_PUBLIC_DEFAULT\nfrom filer.utils.compatibility import upath\nfrom optparse import make_option\nimport os\n\nMEDIA_MODELS = [Image, Document, Vector, Video]\n\n\nclass FileImporter(object):\n\n def __init__(self, * args, **kwargs):\n self.path = kwargs.get('path')\n self.base_folder = kwargs.get('base_folder')\n self.verbosity = int(kwargs.get('verbosity', 1))\n self.file_created = 0\n self.image_created = 0\n self.folder_created = 0\n\n def import_file(self, file_obj, folder):\n \"\"\"\n Create a File or an Image into the given folder\n \"\"\"\n created = False\n for cls in MEDIA_MODELS:\n if cls.matches_file_type(file_obj.name):\n\n obj, created = cls.objects.get_or_create(\n original_filename=file_obj.name,\n file=file_obj,\n folder=folder,\n is_public=FILER_IS_PUBLIC_DEFAULT)\n if created:\n self.image_created += 1\n if not created:\n obj, created = File.objects.get_or_create(\n original_filename=file_obj.name,\n file=file_obj,\n folder=folder,\n is_public=FILER_IS_PUBLIC_DEFAULT)\n if created:\n self.file_created += 1\n if self.verbosity >= 2:\n print(\"file_created #%s / image_created #%s -- file : %s -- created : %s\" % (self.file_created,\n self.image_created,\n obj, created))\n return obj\n\n def get_or_create_folder(self, folder_names):\n \"\"\"\n Gets or creates a Folder based the list of folder names in hierarchical\n order (like breadcrumbs).\n\n get_or_create_folder(['root', 'subfolder', 'subsub folder'])\n\n creates the folders with correct parent relations and returns the\n 'subsub folder' instance.\n \"\"\"\n if not len(folder_names):\n return None\n current_parent = None\n for folder_name in folder_names:\n current_parent, created = Folder.objects.get_or_create(\n name=folder_name, parent=current_parent)\n if created:\n self.folder_created += 1\n if self.verbosity >= 2:\n print(\"folder_created #%s folder : %s -- created : %s\" % (self.folder_created,\n current_parent, created))\n return current_parent\n\n def walker(self, path=None, base_folder=None):\n \"\"\"\n This method walk a directory structure and create the\n Folders and Files as they appear.\n \"\"\"\n path = path or self.path\n base_folder = base_folder or self.base_folder\n # prevent trailing slashes and other inconsistencies on path.\n path = os.path.normpath(upath(path))\n if base_folder:\n base_folder = os.path.normpath(upath(base_folder))\n print(\"The directory structure will be imported in %s\" % (base_folder,))\n if self.verbosity >= 1:\n print(\"Import the folders and files in %s\" % (path,))\n root_folder_name = os.path.basename(path)\n for root, dirs, files in os.walk(path):\n rel_folders = root.partition(path)[2].strip(os.path.sep).split(os.path.sep)\n while '' in rel_folders:\n rel_folders.remove('')\n if base_folder:\n folder_names = base_folder.split('/') + [root_folder_name] + rel_folders\n else:\n folder_names = [root_folder_name] + rel_folders\n folder = self.get_or_create_folder(folder_names)\n for file_obj in files:\n dj_file = DjangoFile(open(os.path.join(root, file_obj)),\n name=file_obj)\n self.import_file(file_obj=dj_file, folder=folder)\n if self.verbosity >= 1:\n print(('folder_created #%s / file_created #%s / ' +\n 'image_created #%s') % (\n self.folder_created, self.file_created,\n self.image_created))\n\n\nclass Command(NoArgsCommand):\n\n \"\"\"\n Import directory structure into the filer ::\n\n manage.py --path=/tmp/assets/images\n manage.py --path=/tmp/assets/news --folder=images\n \"\"\"\n\n option_list = BaseCommand.option_list + (\n make_option('--path',\n action='store',\n dest='path',\n default=False,\n help='Import files located in the path into django-filer'),\n make_option('--folder',\n action='store',\n dest='base_folder',\n default=False,\n help='Specify the destination folder in which the directory structure should be imported'),\n )\n\n def handle_noargs(self, **options):\n file_importer = FileImporter(**options)\n file_importer.walker()\n",
"step-ids": [
4,
6,
9,
11,
12
]
}
|
[
4,
6,
9,
11,
12
] |
<|reserved_special_token_0|>
def part_one():
"""Solution to Part 1"""
return ''.join(networkx.lexicographical_topological_sort(G))
def part_two():
"""Solution to Part 2"""
tasks = {}
current_time = 0
while G.nodes():
candidate_next_tasks = [task for task in G.nodes() if task not in
tasks.keys() and G.in_degree(task) == 0]
if candidate_next_tasks and len(tasks) < 5:
next_task = sorted(candidate_next_tasks)[0]
tasks[next_task] = ord(next_task) - 4
else:
min_task_time = min(tasks.values())
current_time += min_task_time
completed_task = dict(zip(tasks.values(), tasks.keys()))[
min_task_time]
tasks = {k: (v - min_task_time) for k, v in tasks.items() if k !=
completed_task}
G.remove_node(completed_task)
return current_time
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('puzzle_input') as f:
for line in f.read().split('\n'):
match = re.search('Step (?P<pre>[A-Z]).*step (?P<post>[A-Z])', line)
G.add_edge(match.group('pre'), match.group('post'))
def part_one():
"""Solution to Part 1"""
return ''.join(networkx.lexicographical_topological_sort(G))
def part_two():
"""Solution to Part 2"""
tasks = {}
current_time = 0
while G.nodes():
candidate_next_tasks = [task for task in G.nodes() if task not in
tasks.keys() and G.in_degree(task) == 0]
if candidate_next_tasks and len(tasks) < 5:
next_task = sorted(candidate_next_tasks)[0]
tasks[next_task] = ord(next_task) - 4
else:
min_task_time = min(tasks.values())
current_time += min_task_time
completed_task = dict(zip(tasks.values(), tasks.keys()))[
min_task_time]
tasks = {k: (v - min_task_time) for k, v in tasks.items() if k !=
completed_task}
G.remove_node(completed_task)
return current_time
<|reserved_special_token_1|>
<|reserved_special_token_0|>
G = networkx.DiGraph()
with open('puzzle_input') as f:
for line in f.read().split('\n'):
match = re.search('Step (?P<pre>[A-Z]).*step (?P<post>[A-Z])', line)
G.add_edge(match.group('pre'), match.group('post'))
def part_one():
"""Solution to Part 1"""
return ''.join(networkx.lexicographical_topological_sort(G))
def part_two():
"""Solution to Part 2"""
tasks = {}
current_time = 0
while G.nodes():
candidate_next_tasks = [task for task in G.nodes() if task not in
tasks.keys() and G.in_degree(task) == 0]
if candidate_next_tasks and len(tasks) < 5:
next_task = sorted(candidate_next_tasks)[0]
tasks[next_task] = ord(next_task) - 4
else:
min_task_time = min(tasks.values())
current_time += min_task_time
completed_task = dict(zip(tasks.values(), tasks.keys()))[
min_task_time]
tasks = {k: (v - min_task_time) for k, v in tasks.items() if k !=
completed_task}
G.remove_node(completed_task)
return current_time
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import networkx
import re
G = networkx.DiGraph()
with open('puzzle_input') as f:
for line in f.read().split('\n'):
match = re.search('Step (?P<pre>[A-Z]).*step (?P<post>[A-Z])', line)
G.add_edge(match.group('pre'), match.group('post'))
def part_one():
"""Solution to Part 1"""
return ''.join(networkx.lexicographical_topological_sort(G))
def part_two():
"""Solution to Part 2"""
tasks = {}
current_time = 0
while G.nodes():
candidate_next_tasks = [task for task in G.nodes() if task not in
tasks.keys() and G.in_degree(task) == 0]
if candidate_next_tasks and len(tasks) < 5:
next_task = sorted(candidate_next_tasks)[0]
tasks[next_task] = ord(next_task) - 4
else:
min_task_time = min(tasks.values())
current_time += min_task_time
completed_task = dict(zip(tasks.values(), tasks.keys()))[
min_task_time]
tasks = {k: (v - min_task_time) for k, v in tasks.items() if k !=
completed_task}
G.remove_node(completed_task)
return current_time
<|reserved_special_token_1|>
# coding=utf-8
"""Advent of Code 2018, Day 7"""
import networkx
import re
G = networkx.DiGraph()
with open("puzzle_input") as f:
for line in f.read().split("\n"):
match = re.search("Step (?P<pre>[A-Z]).*step (?P<post>[A-Z])", line)
G.add_edge(match.group("pre"), match.group("post"))
def part_one():
"""Solution to Part 1"""
return "".join(networkx.lexicographical_topological_sort(G))
def part_two():
"""Solution to Part 2"""
tasks = {}
current_time = 0
while G.nodes():
# noinspection PyCallingNonCallable
candidate_next_tasks = [task for task in G.nodes()
if task not in tasks.keys() and G.in_degree(task) == 0]
if candidate_next_tasks and len(tasks) < 5:
next_task = sorted(candidate_next_tasks)[0]
tasks[next_task] = ord(next_task) - 4
else:
min_task_time = min(tasks.values())
current_time += min_task_time
completed_task = dict(zip(tasks.values(), tasks.keys()))[min_task_time]
tasks = {k: v - min_task_time for k, v in tasks.items() if k != completed_task}
G.remove_node(completed_task)
return current_time
|
flexible
|
{
"blob_id": "1c5884c10ac0b6a3335f8e677007fc52311245e2",
"index": 7603,
"step-1": "<mask token>\n\n\ndef part_one():\n \"\"\"Solution to Part 1\"\"\"\n return ''.join(networkx.lexicographical_topological_sort(G))\n\n\ndef part_two():\n \"\"\"Solution to Part 2\"\"\"\n tasks = {}\n current_time = 0\n while G.nodes():\n candidate_next_tasks = [task for task in G.nodes() if task not in\n tasks.keys() and G.in_degree(task) == 0]\n if candidate_next_tasks and len(tasks) < 5:\n next_task = sorted(candidate_next_tasks)[0]\n tasks[next_task] = ord(next_task) - 4\n else:\n min_task_time = min(tasks.values())\n current_time += min_task_time\n completed_task = dict(zip(tasks.values(), tasks.keys()))[\n min_task_time]\n tasks = {k: (v - min_task_time) for k, v in tasks.items() if k !=\n completed_task}\n G.remove_node(completed_task)\n return current_time\n",
"step-2": "<mask token>\nwith open('puzzle_input') as f:\n for line in f.read().split('\\n'):\n match = re.search('Step (?P<pre>[A-Z]).*step (?P<post>[A-Z])', line)\n G.add_edge(match.group('pre'), match.group('post'))\n\n\ndef part_one():\n \"\"\"Solution to Part 1\"\"\"\n return ''.join(networkx.lexicographical_topological_sort(G))\n\n\ndef part_two():\n \"\"\"Solution to Part 2\"\"\"\n tasks = {}\n current_time = 0\n while G.nodes():\n candidate_next_tasks = [task for task in G.nodes() if task not in\n tasks.keys() and G.in_degree(task) == 0]\n if candidate_next_tasks and len(tasks) < 5:\n next_task = sorted(candidate_next_tasks)[0]\n tasks[next_task] = ord(next_task) - 4\n else:\n min_task_time = min(tasks.values())\n current_time += min_task_time\n completed_task = dict(zip(tasks.values(), tasks.keys()))[\n min_task_time]\n tasks = {k: (v - min_task_time) for k, v in tasks.items() if k !=\n completed_task}\n G.remove_node(completed_task)\n return current_time\n",
"step-3": "<mask token>\nG = networkx.DiGraph()\nwith open('puzzle_input') as f:\n for line in f.read().split('\\n'):\n match = re.search('Step (?P<pre>[A-Z]).*step (?P<post>[A-Z])', line)\n G.add_edge(match.group('pre'), match.group('post'))\n\n\ndef part_one():\n \"\"\"Solution to Part 1\"\"\"\n return ''.join(networkx.lexicographical_topological_sort(G))\n\n\ndef part_two():\n \"\"\"Solution to Part 2\"\"\"\n tasks = {}\n current_time = 0\n while G.nodes():\n candidate_next_tasks = [task for task in G.nodes() if task not in\n tasks.keys() and G.in_degree(task) == 0]\n if candidate_next_tasks and len(tasks) < 5:\n next_task = sorted(candidate_next_tasks)[0]\n tasks[next_task] = ord(next_task) - 4\n else:\n min_task_time = min(tasks.values())\n current_time += min_task_time\n completed_task = dict(zip(tasks.values(), tasks.keys()))[\n min_task_time]\n tasks = {k: (v - min_task_time) for k, v in tasks.items() if k !=\n completed_task}\n G.remove_node(completed_task)\n return current_time\n",
"step-4": "<mask token>\nimport networkx\nimport re\nG = networkx.DiGraph()\nwith open('puzzle_input') as f:\n for line in f.read().split('\\n'):\n match = re.search('Step (?P<pre>[A-Z]).*step (?P<post>[A-Z])', line)\n G.add_edge(match.group('pre'), match.group('post'))\n\n\ndef part_one():\n \"\"\"Solution to Part 1\"\"\"\n return ''.join(networkx.lexicographical_topological_sort(G))\n\n\ndef part_two():\n \"\"\"Solution to Part 2\"\"\"\n tasks = {}\n current_time = 0\n while G.nodes():\n candidate_next_tasks = [task for task in G.nodes() if task not in\n tasks.keys() and G.in_degree(task) == 0]\n if candidate_next_tasks and len(tasks) < 5:\n next_task = sorted(candidate_next_tasks)[0]\n tasks[next_task] = ord(next_task) - 4\n else:\n min_task_time = min(tasks.values())\n current_time += min_task_time\n completed_task = dict(zip(tasks.values(), tasks.keys()))[\n min_task_time]\n tasks = {k: (v - min_task_time) for k, v in tasks.items() if k !=\n completed_task}\n G.remove_node(completed_task)\n return current_time\n",
"step-5": "# coding=utf-8\n\"\"\"Advent of Code 2018, Day 7\"\"\"\n\nimport networkx\nimport re\n\nG = networkx.DiGraph()\nwith open(\"puzzle_input\") as f:\n for line in f.read().split(\"\\n\"):\n match = re.search(\"Step (?P<pre>[A-Z]).*step (?P<post>[A-Z])\", line)\n G.add_edge(match.group(\"pre\"), match.group(\"post\"))\n\n\ndef part_one():\n \"\"\"Solution to Part 1\"\"\"\n return \"\".join(networkx.lexicographical_topological_sort(G))\n\n\ndef part_two():\n \"\"\"Solution to Part 2\"\"\"\n tasks = {}\n current_time = 0\n while G.nodes():\n # noinspection PyCallingNonCallable\n candidate_next_tasks = [task for task in G.nodes()\n if task not in tasks.keys() and G.in_degree(task) == 0]\n if candidate_next_tasks and len(tasks) < 5:\n next_task = sorted(candidate_next_tasks)[0]\n tasks[next_task] = ord(next_task) - 4\n else:\n min_task_time = min(tasks.values())\n current_time += min_task_time\n completed_task = dict(zip(tasks.values(), tasks.keys()))[min_task_time]\n tasks = {k: v - min_task_time for k, v in tasks.items() if k != completed_task}\n G.remove_node(completed_task)\n return current_time\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
MQTT handler for Event subscriptions.
"""
import json
import time
import tornado.gen
import tornado.ioloop
from hbmqtt.mqtt.constants import QOS_0
from tornado.queues import QueueFull
from wotpy.protocols.mqtt.handlers.base import BaseMQTTHandler
from wotpy.protocols.mqtt.handlers.subs import InteractionsSubscriber
from wotpy.utils.utils import to_json_obj
from wotpy.wot.enums import InteractionTypes
class EventMQTTHandler(BaseMQTTHandler):
"""MQTT handler for Event subscriptions."""
DEFAULT_CALLBACK_MS = 2000
DEFAULT_JITTER = 0.2
def __init__(self, mqtt_server, qos=QOS_0, callback_ms=None):
super(EventMQTTHandler, self).__init__(mqtt_server)
callback_ms = self.DEFAULT_CALLBACK_MS if callback_ms is None else callback_ms
self._qos = qos
self._callback_ms = callback_ms
self._subs = {}
self._interaction_subscriber = InteractionsSubscriber(
interaction_type=InteractionTypes.EVENT,
server=self.mqtt_server,
on_next_builder=self._build_on_next)
@tornado.gen.coroutine
def refresh_subs():
self._interaction_subscriber.refresh()
self._periodic_refresh_subs = tornado.ioloop.PeriodicCallback(
refresh_subs, self._callback_ms, jitter=self.DEFAULT_JITTER)
def build_event_topic(self, thing, event):
"""Returns the MQTT topic for Event emissions."""
return "{}/event/{}/{}".format(
self.servient_id,
thing.url_name,
event.url_name)
@tornado.gen.coroutine
def init(self):
"""Initializes the MQTT handler.
Called when the MQTT runner starts."""
self._interaction_subscriber.refresh()
self._periodic_refresh_subs.start()
yield None
@tornado.gen.coroutine
def teardown(self):
"""Destroys the MQTT handler.
Called when the MQTT runner stops."""
self._periodic_refresh_subs.stop()
self._interaction_subscriber.dispose()
yield None
def _build_on_next(self, exp_thing, event):
"""Builds the on_next function to use when subscribing to the given Event."""
topic = self.build_event_topic(exp_thing, event)
def on_next(item):
try:
data = {
"name": item.name,
"data": to_json_obj(item.data),
"timestamp": int(time.time() * 1000)
}
self.queue.put_nowait({
"topic": topic,
"data": json.dumps(data).encode(),
"qos": self._qos
})
except QueueFull:
pass
return on_next
|
normal
|
{
"blob_id": "b3f72bc12f85724ddcdaf1c151fd2a68b29432e8",
"index": 6545,
"step-1": "<mask token>\n\n\nclass EventMQTTHandler(BaseMQTTHandler):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, mqtt_server, qos=QOS_0, callback_ms=None):\n super(EventMQTTHandler, self).__init__(mqtt_server)\n callback_ms = (self.DEFAULT_CALLBACK_MS if callback_ms is None else\n callback_ms)\n self._qos = qos\n self._callback_ms = callback_ms\n self._subs = {}\n self._interaction_subscriber = InteractionsSubscriber(interaction_type\n =InteractionTypes.EVENT, server=self.mqtt_server,\n on_next_builder=self._build_on_next)\n\n @tornado.gen.coroutine\n def refresh_subs():\n self._interaction_subscriber.refresh()\n self._periodic_refresh_subs = tornado.ioloop.PeriodicCallback(\n refresh_subs, self._callback_ms, jitter=self.DEFAULT_JITTER)\n\n def build_event_topic(self, thing, event):\n \"\"\"Returns the MQTT topic for Event emissions.\"\"\"\n return '{}/event/{}/{}'.format(self.servient_id, thing.url_name,\n event.url_name)\n\n @tornado.gen.coroutine\n def init(self):\n \"\"\"Initializes the MQTT handler.\n Called when the MQTT runner starts.\"\"\"\n self._interaction_subscriber.refresh()\n self._periodic_refresh_subs.start()\n yield None\n\n @tornado.gen.coroutine\n def teardown(self):\n \"\"\"Destroys the MQTT handler.\n Called when the MQTT runner stops.\"\"\"\n self._periodic_refresh_subs.stop()\n self._interaction_subscriber.dispose()\n yield None\n\n def _build_on_next(self, exp_thing, event):\n \"\"\"Builds the on_next function to use when subscribing to the given Event.\"\"\"\n topic = self.build_event_topic(exp_thing, event)\n\n def on_next(item):\n try:\n data = {'name': item.name, 'data': to_json_obj(item.data),\n 'timestamp': int(time.time() * 1000)}\n self.queue.put_nowait({'topic': topic, 'data': json.dumps(\n data).encode(), 'qos': self._qos})\n except QueueFull:\n pass\n return on_next\n",
"step-2": "<mask token>\n\n\nclass EventMQTTHandler(BaseMQTTHandler):\n <mask token>\n DEFAULT_CALLBACK_MS = 2000\n DEFAULT_JITTER = 0.2\n\n def __init__(self, mqtt_server, qos=QOS_0, callback_ms=None):\n super(EventMQTTHandler, self).__init__(mqtt_server)\n callback_ms = (self.DEFAULT_CALLBACK_MS if callback_ms is None else\n callback_ms)\n self._qos = qos\n self._callback_ms = callback_ms\n self._subs = {}\n self._interaction_subscriber = InteractionsSubscriber(interaction_type\n =InteractionTypes.EVENT, server=self.mqtt_server,\n on_next_builder=self._build_on_next)\n\n @tornado.gen.coroutine\n def refresh_subs():\n self._interaction_subscriber.refresh()\n self._periodic_refresh_subs = tornado.ioloop.PeriodicCallback(\n refresh_subs, self._callback_ms, jitter=self.DEFAULT_JITTER)\n\n def build_event_topic(self, thing, event):\n \"\"\"Returns the MQTT topic for Event emissions.\"\"\"\n return '{}/event/{}/{}'.format(self.servient_id, thing.url_name,\n event.url_name)\n\n @tornado.gen.coroutine\n def init(self):\n \"\"\"Initializes the MQTT handler.\n Called when the MQTT runner starts.\"\"\"\n self._interaction_subscriber.refresh()\n self._periodic_refresh_subs.start()\n yield None\n\n @tornado.gen.coroutine\n def teardown(self):\n \"\"\"Destroys the MQTT handler.\n Called when the MQTT runner stops.\"\"\"\n self._periodic_refresh_subs.stop()\n self._interaction_subscriber.dispose()\n yield None\n\n def _build_on_next(self, exp_thing, event):\n \"\"\"Builds the on_next function to use when subscribing to the given Event.\"\"\"\n topic = self.build_event_topic(exp_thing, event)\n\n def on_next(item):\n try:\n data = {'name': item.name, 'data': to_json_obj(item.data),\n 'timestamp': int(time.time() * 1000)}\n self.queue.put_nowait({'topic': topic, 'data': json.dumps(\n data).encode(), 'qos': self._qos})\n except QueueFull:\n pass\n return on_next\n",
"step-3": "<mask token>\n\n\nclass EventMQTTHandler(BaseMQTTHandler):\n \"\"\"MQTT handler for Event subscriptions.\"\"\"\n DEFAULT_CALLBACK_MS = 2000\n DEFAULT_JITTER = 0.2\n\n def __init__(self, mqtt_server, qos=QOS_0, callback_ms=None):\n super(EventMQTTHandler, self).__init__(mqtt_server)\n callback_ms = (self.DEFAULT_CALLBACK_MS if callback_ms is None else\n callback_ms)\n self._qos = qos\n self._callback_ms = callback_ms\n self._subs = {}\n self._interaction_subscriber = InteractionsSubscriber(interaction_type\n =InteractionTypes.EVENT, server=self.mqtt_server,\n on_next_builder=self._build_on_next)\n\n @tornado.gen.coroutine\n def refresh_subs():\n self._interaction_subscriber.refresh()\n self._periodic_refresh_subs = tornado.ioloop.PeriodicCallback(\n refresh_subs, self._callback_ms, jitter=self.DEFAULT_JITTER)\n\n def build_event_topic(self, thing, event):\n \"\"\"Returns the MQTT topic for Event emissions.\"\"\"\n return '{}/event/{}/{}'.format(self.servient_id, thing.url_name,\n event.url_name)\n\n @tornado.gen.coroutine\n def init(self):\n \"\"\"Initializes the MQTT handler.\n Called when the MQTT runner starts.\"\"\"\n self._interaction_subscriber.refresh()\n self._periodic_refresh_subs.start()\n yield None\n\n @tornado.gen.coroutine\n def teardown(self):\n \"\"\"Destroys the MQTT handler.\n Called when the MQTT runner stops.\"\"\"\n self._periodic_refresh_subs.stop()\n self._interaction_subscriber.dispose()\n yield None\n\n def _build_on_next(self, exp_thing, event):\n \"\"\"Builds the on_next function to use when subscribing to the given Event.\"\"\"\n topic = self.build_event_topic(exp_thing, event)\n\n def on_next(item):\n try:\n data = {'name': item.name, 'data': to_json_obj(item.data),\n 'timestamp': int(time.time() * 1000)}\n self.queue.put_nowait({'topic': topic, 'data': json.dumps(\n data).encode(), 'qos': self._qos})\n except QueueFull:\n pass\n return on_next\n",
"step-4": "<mask token>\nimport json\nimport time\nimport tornado.gen\nimport tornado.ioloop\nfrom hbmqtt.mqtt.constants import QOS_0\nfrom tornado.queues import QueueFull\nfrom wotpy.protocols.mqtt.handlers.base import BaseMQTTHandler\nfrom wotpy.protocols.mqtt.handlers.subs import InteractionsSubscriber\nfrom wotpy.utils.utils import to_json_obj\nfrom wotpy.wot.enums import InteractionTypes\n\n\nclass EventMQTTHandler(BaseMQTTHandler):\n \"\"\"MQTT handler for Event subscriptions.\"\"\"\n DEFAULT_CALLBACK_MS = 2000\n DEFAULT_JITTER = 0.2\n\n def __init__(self, mqtt_server, qos=QOS_0, callback_ms=None):\n super(EventMQTTHandler, self).__init__(mqtt_server)\n callback_ms = (self.DEFAULT_CALLBACK_MS if callback_ms is None else\n callback_ms)\n self._qos = qos\n self._callback_ms = callback_ms\n self._subs = {}\n self._interaction_subscriber = InteractionsSubscriber(interaction_type\n =InteractionTypes.EVENT, server=self.mqtt_server,\n on_next_builder=self._build_on_next)\n\n @tornado.gen.coroutine\n def refresh_subs():\n self._interaction_subscriber.refresh()\n self._periodic_refresh_subs = tornado.ioloop.PeriodicCallback(\n refresh_subs, self._callback_ms, jitter=self.DEFAULT_JITTER)\n\n def build_event_topic(self, thing, event):\n \"\"\"Returns the MQTT topic for Event emissions.\"\"\"\n return '{}/event/{}/{}'.format(self.servient_id, thing.url_name,\n event.url_name)\n\n @tornado.gen.coroutine\n def init(self):\n \"\"\"Initializes the MQTT handler.\n Called when the MQTT runner starts.\"\"\"\n self._interaction_subscriber.refresh()\n self._periodic_refresh_subs.start()\n yield None\n\n @tornado.gen.coroutine\n def teardown(self):\n \"\"\"Destroys the MQTT handler.\n Called when the MQTT runner stops.\"\"\"\n self._periodic_refresh_subs.stop()\n self._interaction_subscriber.dispose()\n yield None\n\n def _build_on_next(self, exp_thing, event):\n \"\"\"Builds the on_next function to use when subscribing to the given Event.\"\"\"\n topic = self.build_event_topic(exp_thing, event)\n\n def on_next(item):\n try:\n data = {'name': item.name, 'data': to_json_obj(item.data),\n 'timestamp': int(time.time() * 1000)}\n self.queue.put_nowait({'topic': topic, 'data': json.dumps(\n data).encode(), 'qos': self._qos})\n except QueueFull:\n pass\n return on_next\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nMQTT handler for Event subscriptions.\n\"\"\"\n\nimport json\nimport time\n\nimport tornado.gen\nimport tornado.ioloop\nfrom hbmqtt.mqtt.constants import QOS_0\nfrom tornado.queues import QueueFull\n\nfrom wotpy.protocols.mqtt.handlers.base import BaseMQTTHandler\nfrom wotpy.protocols.mqtt.handlers.subs import InteractionsSubscriber\nfrom wotpy.utils.utils import to_json_obj\nfrom wotpy.wot.enums import InteractionTypes\n\n\nclass EventMQTTHandler(BaseMQTTHandler):\n \"\"\"MQTT handler for Event subscriptions.\"\"\"\n\n DEFAULT_CALLBACK_MS = 2000\n DEFAULT_JITTER = 0.2\n\n def __init__(self, mqtt_server, qos=QOS_0, callback_ms=None):\n super(EventMQTTHandler, self).__init__(mqtt_server)\n\n callback_ms = self.DEFAULT_CALLBACK_MS if callback_ms is None else callback_ms\n\n self._qos = qos\n self._callback_ms = callback_ms\n self._subs = {}\n\n self._interaction_subscriber = InteractionsSubscriber(\n interaction_type=InteractionTypes.EVENT,\n server=self.mqtt_server,\n on_next_builder=self._build_on_next)\n\n @tornado.gen.coroutine\n def refresh_subs():\n self._interaction_subscriber.refresh()\n\n self._periodic_refresh_subs = tornado.ioloop.PeriodicCallback(\n refresh_subs, self._callback_ms, jitter=self.DEFAULT_JITTER)\n\n def build_event_topic(self, thing, event):\n \"\"\"Returns the MQTT topic for Event emissions.\"\"\"\n\n return \"{}/event/{}/{}\".format(\n self.servient_id,\n thing.url_name,\n event.url_name)\n\n @tornado.gen.coroutine\n def init(self):\n \"\"\"Initializes the MQTT handler.\n Called when the MQTT runner starts.\"\"\"\n\n self._interaction_subscriber.refresh()\n self._periodic_refresh_subs.start()\n\n yield None\n\n @tornado.gen.coroutine\n def teardown(self):\n \"\"\"Destroys the MQTT handler.\n Called when the MQTT runner stops.\"\"\"\n\n self._periodic_refresh_subs.stop()\n self._interaction_subscriber.dispose()\n\n yield None\n\n def _build_on_next(self, exp_thing, event):\n \"\"\"Builds the on_next function to use when subscribing to the given Event.\"\"\"\n\n topic = self.build_event_topic(exp_thing, event)\n\n def on_next(item):\n try:\n data = {\n \"name\": item.name,\n \"data\": to_json_obj(item.data),\n \"timestamp\": int(time.time() * 1000)\n }\n\n self.queue.put_nowait({\n \"topic\": topic,\n \"data\": json.dumps(data).encode(),\n \"qos\": self._qos\n })\n except QueueFull:\n pass\n\n return on_next\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='Category', fields=[('id',
models.BigAutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('title', models.CharField(max_length=
20))]), migrations.CreateModel(name='Task', fields=[('id', models.
BigAutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('title', models.CharField(max_length=50, null
=True)), ('description', models.CharField(max_length=500)), (
'priority', models.IntegerField(choices=[(0, 'unimportant'), (1,
'insignificant'), (2, 'important'), (3, 'Necessary')], default=0)),
('status', models.CharField(choices=[('deleted', 'deleted'), (
'doing', 'doing'), ('done', 'done'), ('expire', 'expire'), (
'archive', 'archive')], default='doing', max_length=10)), (
'expired', models.DateTimeField(blank=True, null=True)), ('created',
models.DateTimeField(auto_now_add=True)), ('updated', models.
DateTimeField(auto_now=True)), ('category', models.ManyToManyField(
default='unknown', to='todo.Category'))])]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='Category', fields=[('id',
models.BigAutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('title', models.CharField(max_length=
20))]), migrations.CreateModel(name='Task', fields=[('id', models.
BigAutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('title', models.CharField(max_length=50, null
=True)), ('description', models.CharField(max_length=500)), (
'priority', models.IntegerField(choices=[(0, 'unimportant'), (1,
'insignificant'), (2, 'important'), (3, 'Necessary')], default=0)),
('status', models.CharField(choices=[('deleted', 'deleted'), (
'doing', 'doing'), ('done', 'done'), ('expire', 'expire'), (
'archive', 'archive')], default='doing', max_length=10)), (
'expired', models.DateTimeField(blank=True, null=True)), ('created',
models.DateTimeField(auto_now_add=True)), ('updated', models.
DateTimeField(auto_now=True)), ('category', models.ManyToManyField(
default='unknown', to='todo.Category'))])]
<|reserved_special_token_1|>
# Generated by Django 3.2.5 on 2021-07-27 17:12
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50, null=True)),
('description', models.CharField(max_length=500)),
('priority', models.IntegerField(choices=[(0, 'unimportant'), (1, 'insignificant'), (2, 'important'), (3, 'Necessary')], default=0)),
('status', models.CharField(choices=[('deleted', 'deleted'), ('doing', 'doing'), ('done', 'done'), ('expire', 'expire'), ('archive', 'archive')], default='doing', max_length=10)),
('expired', models.DateTimeField(blank=True, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('category', models.ManyToManyField(default='unknown', to='todo.Category')),
],
),
]
|
flexible
|
{
"blob_id": "d145f4c061c8f364756012832a07adc305e35e5c",
"index": 5772,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Category', fields=[('id',\n models.BigAutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('title', models.CharField(max_length=\n 20))]), migrations.CreateModel(name='Task', fields=[('id', models.\n BigAutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('title', models.CharField(max_length=50, null\n =True)), ('description', models.CharField(max_length=500)), (\n 'priority', models.IntegerField(choices=[(0, 'unimportant'), (1,\n 'insignificant'), (2, 'important'), (3, 'Necessary')], default=0)),\n ('status', models.CharField(choices=[('deleted', 'deleted'), (\n 'doing', 'doing'), ('done', 'done'), ('expire', 'expire'), (\n 'archive', 'archive')], default='doing', max_length=10)), (\n 'expired', models.DateTimeField(blank=True, null=True)), ('created',\n models.DateTimeField(auto_now_add=True)), ('updated', models.\n DateTimeField(auto_now=True)), ('category', models.ManyToManyField(\n default='unknown', to='todo.Category'))])]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Category', fields=[('id',\n models.BigAutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('title', models.CharField(max_length=\n 20))]), migrations.CreateModel(name='Task', fields=[('id', models.\n BigAutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('title', models.CharField(max_length=50, null\n =True)), ('description', models.CharField(max_length=500)), (\n 'priority', models.IntegerField(choices=[(0, 'unimportant'), (1,\n 'insignificant'), (2, 'important'), (3, 'Necessary')], default=0)),\n ('status', models.CharField(choices=[('deleted', 'deleted'), (\n 'doing', 'doing'), ('done', 'done'), ('expire', 'expire'), (\n 'archive', 'archive')], default='doing', max_length=10)), (\n 'expired', models.DateTimeField(blank=True, null=True)), ('created',\n models.DateTimeField(auto_now_add=True)), ('updated', models.\n DateTimeField(auto_now=True)), ('category', models.ManyToManyField(\n default='unknown', to='todo.Category'))])]\n",
"step-5": "# Generated by Django 3.2.5 on 2021-07-27 17:12\r\n\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n initial = True\r\n\r\n dependencies = [\r\n ]\r\n\r\n operations = [\r\n migrations.CreateModel(\r\n name='Category',\r\n fields=[\r\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('title', models.CharField(max_length=20)),\r\n ],\r\n ),\r\n migrations.CreateModel(\r\n name='Task',\r\n fields=[\r\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('title', models.CharField(max_length=50, null=True)),\r\n ('description', models.CharField(max_length=500)),\r\n ('priority', models.IntegerField(choices=[(0, 'unimportant'), (1, 'insignificant'), (2, 'important'), (3, 'Necessary')], default=0)),\r\n ('status', models.CharField(choices=[('deleted', 'deleted'), ('doing', 'doing'), ('done', 'done'), ('expire', 'expire'), ('archive', 'archive')], default='doing', max_length=10)),\r\n ('expired', models.DateTimeField(blank=True, null=True)),\r\n ('created', models.DateTimeField(auto_now_add=True)),\r\n ('updated', models.DateTimeField(auto_now=True)),\r\n ('category', models.ManyToManyField(default='unknown', to='todo.Category')),\r\n ],\r\n ),\r\n ]\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
os.mkdir(base_dir)
<|reserved_special_token_0|>
os.mkdir(train_dir)
<|reserved_special_token_0|>
os.mkdir(validation_dir)
<|reserved_special_token_0|>
os.mkdir(test_dir)
<|reserved_special_token_0|>
os.mkdir(train_cats_dir)
<|reserved_special_token_0|>
os.mkdir(train_dogs_dir)
<|reserved_special_token_0|>
os.mkdir(validation_cats_dir)
<|reserved_special_token_0|>
os.mkdir(validation_dogs_dir)
<|reserved_special_token_0|>
os.mkdir(test_cats_dir)
<|reserved_special_token_0|>
os.mkdir(test_dogs_dir)
<|reserved_special_token_0|>
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_cats_dir, fname)
shutil.copyfile(src, dst)
<|reserved_special_token_0|>
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_cats_dir, fname)
shutil.copyfile(src, dst)
<|reserved_special_token_0|>
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_cats_dir, fname)
shutil.copyfile(src, dst)
<|reserved_special_token_0|>
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_dogs_dir, fname)
shutil.copyfile(src, dst)
<|reserved_special_token_0|>
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_dogs_dir, fname)
shutil.copyfile(src, dst)
<|reserved_special_token_0|>
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_dogs_dir, fname)
shutil.copyfile(src, dst)
<|reserved_special_token_0|>
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150,
150, 3)))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
<|reserved_special_token_0|>
model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=
0.0001), metrics=['acc'])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
original_dataset_dir = 'E:/train/'
base_dir = 'E:/train/smaller'
os.mkdir(base_dir)
train_dir = os.path.join(base_dir, 'train')
os.mkdir(train_dir)
validation_dir = os.path.join(base_dir, 'validation')
os.mkdir(validation_dir)
test_dir = os.path.join(base_dir, 'test')
os.mkdir(test_dir)
train_cats_dir = os.path.join(train_dir, 'cats')
os.mkdir(train_cats_dir)
train_dogs_dir = os.path.join(train_dir, 'dogs')
os.mkdir(train_dogs_dir)
validation_cats_dir = os.path.join(validation_dir, 'cats')
os.mkdir(validation_cats_dir)
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
os.mkdir(validation_dogs_dir)
test_cats_dir = os.path.join(test_dir, 'cats')
os.mkdir(test_cats_dir)
test_dogs_dir = os.path.join(test_dir, 'dogs')
os.mkdir(test_dogs_dir)
fnames = ['cat.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_cats_dir, fname)
shutil.copyfile(src, dst)
fnames = ['cat.{}.jpg'.format(i) for i in range(1000, 1500)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_cats_dir, fname)
shutil.copyfile(src, dst)
fnames = ['cat.{}.jpg'.format(i) for i in range(1500, 2000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_cats_dir, fname)
shutil.copyfile(src, dst)
fnames = ['dog.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_dogs_dir, fname)
shutil.copyfile(src, dst)
fnames = ['dog.{}.jpg'.format(i) for i in range(1000, 1500)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_dogs_dir, fname)
shutil.copyfile(src, dst)
fnames = ['dog.{}.jpg'.format(i) for i in range(1500, 2000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_dogs_dir, fname)
shutil.copyfile(src, dst)
<|reserved_special_token_0|>
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150,
150, 3)))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
<|reserved_special_token_0|>
model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=
0.0001), metrics=['acc'])
<|reserved_special_token_0|>
train_datagen = ImageDataGenerator(rescale=1.0 / 255)
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = train_datagen.flow_from_directory(train_dir, target_size=
(150, 150), batch_size=20, class_mode='binary')
validation_generator = test_datagen.flow_from_directory(validation_dir,
target_size=(150, 150), batch_size=20, class_mode='binary')
<|reserved_special_token_1|>
import os, shutil
original_dataset_dir = 'E:/train/'
base_dir = 'E:/train/smaller'
os.mkdir(base_dir)
train_dir = os.path.join(base_dir, 'train')
os.mkdir(train_dir)
validation_dir = os.path.join(base_dir, 'validation')
os.mkdir(validation_dir)
test_dir = os.path.join(base_dir, 'test')
os.mkdir(test_dir)
train_cats_dir = os.path.join(train_dir, 'cats')
os.mkdir(train_cats_dir)
train_dogs_dir = os.path.join(train_dir, 'dogs')
os.mkdir(train_dogs_dir)
validation_cats_dir = os.path.join(validation_dir, 'cats')
os.mkdir(validation_cats_dir)
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
os.mkdir(validation_dogs_dir)
test_cats_dir = os.path.join(test_dir, 'cats')
os.mkdir(test_cats_dir)
test_dogs_dir = os.path.join(test_dir, 'dogs')
os.mkdir(test_dogs_dir)
fnames = ['cat.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_cats_dir, fname)
shutil.copyfile(src, dst)
fnames = ['cat.{}.jpg'.format(i) for i in range(1000, 1500)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_cats_dir, fname)
shutil.copyfile(src, dst)
fnames = ['cat.{}.jpg'.format(i) for i in range(1500, 2000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_cats_dir, fname)
shutil.copyfile(src, dst)
fnames = ['dog.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_dogs_dir, fname)
shutil.copyfile(src, dst)
fnames = ['dog.{}.jpg'.format(i) for i in range(1000, 1500)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_dogs_dir, fname)
shutil.copyfile(src, dst)
fnames = ['dog.{}.jpg'.format(i) for i in range(1500, 2000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_dogs_dir, fname)
shutil.copyfile(src, dst)
from keras import layers
from keras import models
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150,
150, 3)))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
from keras import optimizers
model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=
0.0001), metrics=['acc'])
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1.0 / 255)
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = train_datagen.flow_from_directory(train_dir, target_size=
(150, 150), batch_size=20, class_mode='binary')
validation_generator = test_datagen.flow_from_directory(validation_dir,
target_size=(150, 150), batch_size=20, class_mode='binary')
<|reserved_special_token_1|>
# 5.2 Training a convnet from scratch on a "small dataset" (p.131)
# Preprocessing (p.133)
# Copying images to train, validation and test directories
import os, shutil
# The path to the directory where the original dataset was uncompressed
original_dataset_dir = 'E:/train/'
# The directory where we will store our smaller dataset
base_dir = 'E:/train/smaller'
os.mkdir(base_dir)
# Directories for our training, validation, and test splits
train_dir = os.path.join(base_dir, 'train')
os.mkdir(train_dir)
validation_dir = os.path.join(base_dir, 'validation')
os.mkdir(validation_dir)
test_dir = os.path.join(base_dir, 'test')
os.mkdir(test_dir)
# Directory with our training cat pictures
train_cats_dir = os.path.join(train_dir, 'cats')
os.mkdir(train_cats_dir)
# Directory with our training dog pictures
train_dogs_dir = os.path.join(train_dir, 'dogs')
os.mkdir(train_dogs_dir)
# Directory with our validation cat pictures
validation_cats_dir = os.path.join(validation_dir, 'cats')
os.mkdir(validation_cats_dir)
# Directory with our validation dog pictures
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
os.mkdir(validation_dogs_dir)
# Directory with our validation cat pictures
test_cats_dir = os.path.join(test_dir, 'cats')
os.mkdir(test_cats_dir)
# Directory with our validation dog pictures
test_dogs_dir = os.path.join(test_dir, 'dogs')
os.mkdir(test_dogs_dir)
# Copy first 1000 cat images to train_cats_dir
fnames = ['cat.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_cats_dir, fname)
shutil.copyfile(src, dst)
# Copy next 500 cat images to validation_cats_dir
fnames = ['cat.{}.jpg'.format(i) for i in range(1000, 1500)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_cats_dir, fname)
shutil.copyfile(src, dst)
# Copy next 500 cat images to test_cats_dir
fnames = ['cat.{}.jpg'.format(i) for i in range(1500, 2000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_cats_dir, fname)
shutil.copyfile(src, dst)
# Copy first 1000 dog images to train_dogs_dir
fnames = ['dog.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_dogs_dir, fname)
shutil.copyfile(src, dst)
# Copy next 500 dog images to validation_dogs_dir
fnames = ['dog.{}.jpg'.format(i) for i in range(1000, 1500)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_dogs_dir, fname)
shutil.copyfile(src, dst)
# Copy next 500 dog images to test_dogs_dir
fnames = ['dog.{}.jpg'.format(i) for i in range(1500, 2000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_dogs_dir, fname)
shutil.copyfile(src, dst)
# 5.2.3 Building our network
from keras import layers
from keras import models
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
from keras import optimizers
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['acc'])
# 5.2.4 Data preprocessing (p.137)
# 1) Read the picture files.
# 2) Decode the JPEG content to RBG grids of pixels.
# 3) Convert these into floating point tensors.
# 4) Rescale the pixel values (between 0 and 255) to the [0, 1] interval
# (as you know, neural networks prefer to deal with small input values).
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1.0 / 255)
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = train_datagen.flow_from_directory(train_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
# Listing 5.16 Fitting our model using a batch generator (p.138)
|
flexible
|
{
"blob_id": "8340872f03c1bf7c1aee0c437258ac8e44e08bb8",
"index": 7313,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nos.mkdir(base_dir)\n<mask token>\nos.mkdir(train_dir)\n<mask token>\nos.mkdir(validation_dir)\n<mask token>\nos.mkdir(test_dir)\n<mask token>\nos.mkdir(train_cats_dir)\n<mask token>\nos.mkdir(train_dogs_dir)\n<mask token>\nos.mkdir(validation_cats_dir)\n<mask token>\nos.mkdir(validation_dogs_dir)\n<mask token>\nos.mkdir(test_cats_dir)\n<mask token>\nos.mkdir(test_dogs_dir)\n<mask token>\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(train_cats_dir, fname)\n shutil.copyfile(src, dst)\n<mask token>\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(validation_cats_dir, fname)\n shutil.copyfile(src, dst)\n<mask token>\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(test_cats_dir, fname)\n shutil.copyfile(src, dst)\n<mask token>\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(train_dogs_dir, fname)\n shutil.copyfile(src, dst)\n<mask token>\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(validation_dogs_dir, fname)\n shutil.copyfile(src, dst)\n<mask token>\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(test_dogs_dir, fname)\n shutil.copyfile(src, dst)\n<mask token>\nmodel.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, \n 150, 3)))\nmodel.add(layers.MaxPool2D((2, 2)))\nmodel.add(layers.Conv2D(64, (3, 3), activation='relu'))\nmodel.add(layers.MaxPool2D((2, 2)))\nmodel.add(layers.Conv2D(128, (3, 3), activation='relu'))\nmodel.add(layers.MaxPool2D((2, 2)))\nmodel.add(layers.Conv2D(128, (3, 3), activation='relu'))\nmodel.add(layers.MaxPool2D((2, 2)))\nmodel.add(layers.Flatten())\nmodel.add(layers.Dense(512, activation='relu'))\nmodel.add(layers.Dense(1, activation='sigmoid'))\n<mask token>\nmodel.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=\n 0.0001), metrics=['acc'])\n<mask token>\n",
"step-3": "<mask token>\noriginal_dataset_dir = 'E:/train/'\nbase_dir = 'E:/train/smaller'\nos.mkdir(base_dir)\ntrain_dir = os.path.join(base_dir, 'train')\nos.mkdir(train_dir)\nvalidation_dir = os.path.join(base_dir, 'validation')\nos.mkdir(validation_dir)\ntest_dir = os.path.join(base_dir, 'test')\nos.mkdir(test_dir)\ntrain_cats_dir = os.path.join(train_dir, 'cats')\nos.mkdir(train_cats_dir)\ntrain_dogs_dir = os.path.join(train_dir, 'dogs')\nos.mkdir(train_dogs_dir)\nvalidation_cats_dir = os.path.join(validation_dir, 'cats')\nos.mkdir(validation_cats_dir)\nvalidation_dogs_dir = os.path.join(validation_dir, 'dogs')\nos.mkdir(validation_dogs_dir)\ntest_cats_dir = os.path.join(test_dir, 'cats')\nos.mkdir(test_cats_dir)\ntest_dogs_dir = os.path.join(test_dir, 'dogs')\nos.mkdir(test_dogs_dir)\nfnames = ['cat.{}.jpg'.format(i) for i in range(1000)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(train_cats_dir, fname)\n shutil.copyfile(src, dst)\nfnames = ['cat.{}.jpg'.format(i) for i in range(1000, 1500)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(validation_cats_dir, fname)\n shutil.copyfile(src, dst)\nfnames = ['cat.{}.jpg'.format(i) for i in range(1500, 2000)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(test_cats_dir, fname)\n shutil.copyfile(src, dst)\nfnames = ['dog.{}.jpg'.format(i) for i in range(1000)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(train_dogs_dir, fname)\n shutil.copyfile(src, dst)\nfnames = ['dog.{}.jpg'.format(i) for i in range(1000, 1500)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(validation_dogs_dir, fname)\n shutil.copyfile(src, dst)\nfnames = ['dog.{}.jpg'.format(i) for i in range(1500, 2000)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(test_dogs_dir, fname)\n shutil.copyfile(src, dst)\n<mask token>\nmodel = models.Sequential()\nmodel.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, \n 150, 3)))\nmodel.add(layers.MaxPool2D((2, 2)))\nmodel.add(layers.Conv2D(64, (3, 3), activation='relu'))\nmodel.add(layers.MaxPool2D((2, 2)))\nmodel.add(layers.Conv2D(128, (3, 3), activation='relu'))\nmodel.add(layers.MaxPool2D((2, 2)))\nmodel.add(layers.Conv2D(128, (3, 3), activation='relu'))\nmodel.add(layers.MaxPool2D((2, 2)))\nmodel.add(layers.Flatten())\nmodel.add(layers.Dense(512, activation='relu'))\nmodel.add(layers.Dense(1, activation='sigmoid'))\n<mask token>\nmodel.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=\n 0.0001), metrics=['acc'])\n<mask token>\ntrain_datagen = ImageDataGenerator(rescale=1.0 / 255)\ntest_datagen = ImageDataGenerator(rescale=1.0 / 255)\ntrain_generator = train_datagen.flow_from_directory(train_dir, target_size=\n (150, 150), batch_size=20, class_mode='binary')\nvalidation_generator = test_datagen.flow_from_directory(validation_dir,\n target_size=(150, 150), batch_size=20, class_mode='binary')\n",
"step-4": "import os, shutil\noriginal_dataset_dir = 'E:/train/'\nbase_dir = 'E:/train/smaller'\nos.mkdir(base_dir)\ntrain_dir = os.path.join(base_dir, 'train')\nos.mkdir(train_dir)\nvalidation_dir = os.path.join(base_dir, 'validation')\nos.mkdir(validation_dir)\ntest_dir = os.path.join(base_dir, 'test')\nos.mkdir(test_dir)\ntrain_cats_dir = os.path.join(train_dir, 'cats')\nos.mkdir(train_cats_dir)\ntrain_dogs_dir = os.path.join(train_dir, 'dogs')\nos.mkdir(train_dogs_dir)\nvalidation_cats_dir = os.path.join(validation_dir, 'cats')\nos.mkdir(validation_cats_dir)\nvalidation_dogs_dir = os.path.join(validation_dir, 'dogs')\nos.mkdir(validation_dogs_dir)\ntest_cats_dir = os.path.join(test_dir, 'cats')\nos.mkdir(test_cats_dir)\ntest_dogs_dir = os.path.join(test_dir, 'dogs')\nos.mkdir(test_dogs_dir)\nfnames = ['cat.{}.jpg'.format(i) for i in range(1000)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(train_cats_dir, fname)\n shutil.copyfile(src, dst)\nfnames = ['cat.{}.jpg'.format(i) for i in range(1000, 1500)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(validation_cats_dir, fname)\n shutil.copyfile(src, dst)\nfnames = ['cat.{}.jpg'.format(i) for i in range(1500, 2000)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(test_cats_dir, fname)\n shutil.copyfile(src, dst)\nfnames = ['dog.{}.jpg'.format(i) for i in range(1000)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(train_dogs_dir, fname)\n shutil.copyfile(src, dst)\nfnames = ['dog.{}.jpg'.format(i) for i in range(1000, 1500)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(validation_dogs_dir, fname)\n shutil.copyfile(src, dst)\nfnames = ['dog.{}.jpg'.format(i) for i in range(1500, 2000)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(test_dogs_dir, fname)\n shutil.copyfile(src, dst)\nfrom keras import layers\nfrom keras import models\nmodel = models.Sequential()\nmodel.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, \n 150, 3)))\nmodel.add(layers.MaxPool2D((2, 2)))\nmodel.add(layers.Conv2D(64, (3, 3), activation='relu'))\nmodel.add(layers.MaxPool2D((2, 2)))\nmodel.add(layers.Conv2D(128, (3, 3), activation='relu'))\nmodel.add(layers.MaxPool2D((2, 2)))\nmodel.add(layers.Conv2D(128, (3, 3), activation='relu'))\nmodel.add(layers.MaxPool2D((2, 2)))\nmodel.add(layers.Flatten())\nmodel.add(layers.Dense(512, activation='relu'))\nmodel.add(layers.Dense(1, activation='sigmoid'))\nfrom keras import optimizers\nmodel.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=\n 0.0001), metrics=['acc'])\nfrom keras.preprocessing.image import ImageDataGenerator\ntrain_datagen = ImageDataGenerator(rescale=1.0 / 255)\ntest_datagen = ImageDataGenerator(rescale=1.0 / 255)\ntrain_generator = train_datagen.flow_from_directory(train_dir, target_size=\n (150, 150), batch_size=20, class_mode='binary')\nvalidation_generator = test_datagen.flow_from_directory(validation_dir,\n target_size=(150, 150), batch_size=20, class_mode='binary')\n",
"step-5": "# 5.2 Training a convnet from scratch on a \"small dataset\" (p.131)\n# Preprocessing (p.133)\n# Copying images to train, validation and test directories\nimport os, shutil\n\n# The path to the directory where the original dataset was uncompressed\noriginal_dataset_dir = 'E:/train/'\n\n# The directory where we will store our smaller dataset\nbase_dir = 'E:/train/smaller'\nos.mkdir(base_dir)\n\n# Directories for our training, validation, and test splits\ntrain_dir = os.path.join(base_dir, 'train')\nos.mkdir(train_dir)\nvalidation_dir = os.path.join(base_dir, 'validation')\nos.mkdir(validation_dir)\ntest_dir = os.path.join(base_dir, 'test')\nos.mkdir(test_dir)\n\n# Directory with our training cat pictures\ntrain_cats_dir = os.path.join(train_dir, 'cats')\nos.mkdir(train_cats_dir)\n\n# Directory with our training dog pictures\ntrain_dogs_dir = os.path.join(train_dir, 'dogs')\nos.mkdir(train_dogs_dir)\n\n# Directory with our validation cat pictures\nvalidation_cats_dir = os.path.join(validation_dir, 'cats')\nos.mkdir(validation_cats_dir)\n\n# Directory with our validation dog pictures\nvalidation_dogs_dir = os.path.join(validation_dir, 'dogs')\nos.mkdir(validation_dogs_dir)\n\n# Directory with our validation cat pictures\ntest_cats_dir = os.path.join(test_dir, 'cats')\nos.mkdir(test_cats_dir)\n\n# Directory with our validation dog pictures\ntest_dogs_dir = os.path.join(test_dir, 'dogs')\nos.mkdir(test_dogs_dir)\n\n# Copy first 1000 cat images to train_cats_dir\nfnames = ['cat.{}.jpg'.format(i) for i in range(1000)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(train_cats_dir, fname)\n shutil.copyfile(src, dst)\n\n# Copy next 500 cat images to validation_cats_dir\nfnames = ['cat.{}.jpg'.format(i) for i in range(1000, 1500)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(validation_cats_dir, fname)\n shutil.copyfile(src, dst)\n\n# Copy next 500 cat images to test_cats_dir\nfnames = ['cat.{}.jpg'.format(i) for i in range(1500, 2000)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(test_cats_dir, fname)\n shutil.copyfile(src, dst)\n\n# Copy first 1000 dog images to train_dogs_dir\nfnames = ['dog.{}.jpg'.format(i) for i in range(1000)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(train_dogs_dir, fname)\n shutil.copyfile(src, dst)\n\n# Copy next 500 dog images to validation_dogs_dir\nfnames = ['dog.{}.jpg'.format(i) for i in range(1000, 1500)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(validation_dogs_dir, fname)\n shutil.copyfile(src, dst)\n\n# Copy next 500 dog images to test_dogs_dir\nfnames = ['dog.{}.jpg'.format(i) for i in range(1500, 2000)]\nfor fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(test_dogs_dir, fname)\n shutil.copyfile(src, dst)\n\n\n# 5.2.3 Building our network\nfrom keras import layers\nfrom keras import models\n\nmodel = models.Sequential()\nmodel.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)))\nmodel.add(layers.MaxPool2D((2, 2)))\nmodel.add(layers.Conv2D(64, (3, 3), activation='relu'))\nmodel.add(layers.MaxPool2D((2, 2)))\nmodel.add(layers.Conv2D(128, (3, 3), activation='relu'))\nmodel.add(layers.MaxPool2D((2, 2)))\nmodel.add(layers.Conv2D(128, (3, 3), activation='relu'))\nmodel.add(layers.MaxPool2D((2, 2)))\nmodel.add(layers.Flatten())\nmodel.add(layers.Dense(512, activation='relu'))\nmodel.add(layers.Dense(1, activation='sigmoid'))\n\nfrom keras import optimizers\nmodel.compile(loss='binary_crossentropy',\n optimizer=optimizers.RMSprop(lr=1e-4),\n metrics=['acc'])\n\n# 5.2.4 Data preprocessing (p.137)\n# 1) Read the picture files.\n# 2) Decode the JPEG content to RBG grids of pixels.\n# 3) Convert these into floating point tensors.\n# 4) Rescale the pixel values (between 0 and 255) to the [0, 1] interval\n# (as you know, neural networks prefer to deal with small input values).\nfrom keras.preprocessing.image import ImageDataGenerator\n\ntrain_datagen = ImageDataGenerator(rescale=1.0 / 255)\ntest_datagen = ImageDataGenerator(rescale=1.0 / 255)\n\ntrain_generator = train_datagen.flow_from_directory(train_dir,\n target_size=(150, 150),\n batch_size=20,\n class_mode='binary')\nvalidation_generator = test_datagen.flow_from_directory(validation_dir,\n target_size=(150, 150),\n batch_size=20,\n class_mode='binary')\n\n\n# Listing 5.16 Fitting our model using a batch generator (p.138)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.db import models
# Create your models here.
class Login(models.Model):
trinity_id = models.CharField('',max_length=200)
trinity_password = models.CharField('',max_length=500)
objects = models.Manager()
|
normal
|
{
"blob_id": "1c5cb9363c2903905f1026ede77615e8373c250b",
"index": 7321,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Login(models.Model):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Login(models.Model):\n trinity_id = models.CharField('', max_length=200)\n trinity_password = models.CharField('', max_length=500)\n objects = models.Manager()\n",
"step-4": "from django.db import models\n\n\nclass Login(models.Model):\n trinity_id = models.CharField('', max_length=200)\n trinity_password = models.CharField('', max_length=500)\n objects = models.Manager()\n",
"step-5": "from django.db import models\n\n# Create your models here.\n\nclass Login(models.Model):\n trinity_id = models.CharField('',max_length=200)\n trinity_password = models.CharField('',max_length=500)\n\n objects = models.Manager()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class Solution(object):
def moveZeroes(self, nums):
"""
给定一个数组 nums,编写一个函数将所有 0 移动到数组的末尾,同时保持非零元素的相对顺序。
---
输入: [0,1,0,3,12]
输出: [1,3,12,0,0]
---
思路;
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
num = nums.count(0)
while 0 in nums:
nums.remove(0)
for i in range(num):
nums.append(0)
def moveZeroes1(self, nums):
n = len(nums)
i = 0
j = 0
while i < n:
if nums[i] != 0:
nums[j], nums[i] = nums[i], nums[j]
j += 1
i += 1
|
normal
|
{
"blob_id": "ece80a7765674f9d2991029bb86486b616a90f58",
"index": 3944,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n",
"step-3": "class Solution(object):\n\n def moveZeroes(self, nums):\n \"\"\"\n\t\t给定一个数组 nums,编写一个函数将所有 0 移动到数组的末尾,同时保持非零元素的相对顺序。\n\t\t---\n\t\t输入: [0,1,0,3,12]\n\t\t输出: [1,3,12,0,0]\n\t\t---\n\t\t思路;\n\n\t\t:type nums: List[int]\n\t\t:rtype: void Do not return anything, modify nums in-place instead.\n\t\t\"\"\"\n num = nums.count(0)\n while 0 in nums:\n nums.remove(0)\n for i in range(num):\n nums.append(0)\n\n def moveZeroes1(self, nums):\n n = len(nums)\n i = 0\n j = 0\n while i < n:\n if nums[i] != 0:\n nums[j], nums[i] = nums[i], nums[j]\n j += 1\n i += 1\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from mtots.parser import base
from mtots.parser import combinator
from mtots.parser.combinator import All
from mtots.parser.combinator import Any
from mtots.parser.combinator import AnyTokenBut
from mtots.parser.combinator import Forward
from mtots.parser.combinator import Peek
from mtots.parser.combinator import Required
from mtots.parser.combinator import Token
|
flexible
|
{
"blob_id": "f9edbef46494cc2993c6a633fe35406524dbbf67",
"index": 1199,
"step-1": "<mask token>\n",
"step-2": "from mtots.parser import base\nfrom mtots.parser import combinator\nfrom mtots.parser.combinator import All\nfrom mtots.parser.combinator import Any\nfrom mtots.parser.combinator import AnyTokenBut\nfrom mtots.parser.combinator import Forward\nfrom mtots.parser.combinator import Peek\nfrom mtots.parser.combinator import Required\nfrom mtots.parser.combinator import Token\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import numpy as np
mydict = {}
mylist0 = np.array([1, 2, 3, 4, 5])
mylist1 = np.array([2, 3, 4, 5, 6])
print(mydict)
print(mylist0)
print(mylist1)
for c in ('0', '1'):
if c in mydict:
mydict[c] += mylist0
else:
mydict[c] = mylist0
print(mydict)
for c in ('0', '1'):
if c in mydict:
mydict[c] += mylist1
else:
mydict[c] = mylist1
print(mydict)
|
normal
|
{
"blob_id": "6e5b8be6182f39f185f4547f0abd84a4e404bf34",
"index": 1861,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(mydict)\nprint(mylist0)\nprint(mylist1)\nfor c in ('0', '1'):\n if c in mydict:\n mydict[c] += mylist0\n else:\n mydict[c] = mylist0\nprint(mydict)\nfor c in ('0', '1'):\n if c in mydict:\n mydict[c] += mylist1\n else:\n mydict[c] = mylist1\nprint(mydict)\n",
"step-3": "<mask token>\nmydict = {}\nmylist0 = np.array([1, 2, 3, 4, 5])\nmylist1 = np.array([2, 3, 4, 5, 6])\nprint(mydict)\nprint(mylist0)\nprint(mylist1)\nfor c in ('0', '1'):\n if c in mydict:\n mydict[c] += mylist0\n else:\n mydict[c] = mylist0\nprint(mydict)\nfor c in ('0', '1'):\n if c in mydict:\n mydict[c] += mylist1\n else:\n mydict[c] = mylist1\nprint(mydict)\n",
"step-4": "import numpy as np\nmydict = {}\nmylist0 = np.array([1, 2, 3, 4, 5])\nmylist1 = np.array([2, 3, 4, 5, 6])\nprint(mydict)\nprint(mylist0)\nprint(mylist1)\nfor c in ('0', '1'):\n if c in mydict:\n mydict[c] += mylist0\n else:\n mydict[c] = mylist0\nprint(mydict)\nfor c in ('0', '1'):\n if c in mydict:\n mydict[c] += mylist1\n else:\n mydict[c] = mylist1\nprint(mydict)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class MfccLocal(Mfcc):
<|reserved_special_token_0|>
abstract_class = False
@staticmethod
def sample_result_filename(out_sample_path):
return f'{out_sample_path[:-5]}_mfcc_result.json'
@staticmethod
def filenames_to_skip_sample(out_sample_path):
return [f'{out_sample_path[:-5]}_mfcc_result.csv']
@staticmethod
def serialize_to_json(mfcc_result):
"""
:param mfcc_result: list of mfcc measurements with
necessary metadata
:return: serialized object of proper schema
"""
mfcc_schema = MfccLocalSchema()
mfcc_dict = {'mfcc_info': mfcc_result}
return mfcc_schema.dumps(mfcc_dict)
def compute_mfcc(self, segments_path, phonemes_result_path):
"""
:param segments_path: path to the input wav
:param phonemes_result_path: path to phonemes results
that is required by the Local version of the Mfcc
:return: computed list of mfcc features with all required metadata
"""
wav = get_segment(segments_path, 'wav')
frequency = wav.frame_rate
phoneme_len = self.process_settings.get('phoneme_len', 2048)
ignore_shorter_phonemes = self.process_settings.get(
'ignore_shorter_phonemes', True)
mfcc_nfft = self.process_settings.get('mfcc_nfft', 2048)
mfcc_winstep = self.process_settings.get('mfcc_winstep', 0.1)
with open(phonemes_result_path, 'r') as f:
schema = DecoderOutputSchema()
json_file = json.load(f)
phonemes_result = schema.load(json_file)
phonemes_info = [info for info in phonemes_result[
'segment_info'] if info['word'] not in self.
blacklisted_phonemes]
mfcc_result = []
for info in phonemes_info:
start, stop = 1000 * info['start'], 1000 * info['end']
segment = np.array(wav[start:stop].get_array_of_samples())
if ignore_shorter_phonemes and segment.size < phoneme_len:
continue
mfcc_features = mfcc(segment, samplerate=frequency, nfft=
mfcc_nfft, winstep=mfcc_winstep)
for i in range(len(mfcc_features)):
ith_mfcc = np.array(mfcc_features[i, :])
ith_mfcc_result_row = {'i': i, 'length': len(mfcc_features),
'mfcc': ith_mfcc, **info}
mfcc_result.append(ith_mfcc_result_row)
return mfcc_result
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MfccLocal(Mfcc):
"""
MfccLocal computes Mfcc features for each phoneme from the sample
that are not blacklisted based on phoneme label that is
received from Phoneme chain.
It subclasses Formants to not repeat the sample_layer logic
which is valid also in this context
"""
abstract_class = False
@staticmethod
def sample_result_filename(out_sample_path):
return f'{out_sample_path[:-5]}_mfcc_result.json'
@staticmethod
def filenames_to_skip_sample(out_sample_path):
return [f'{out_sample_path[:-5]}_mfcc_result.csv']
@staticmethod
def serialize_to_json(mfcc_result):
"""
:param mfcc_result: list of mfcc measurements with
necessary metadata
:return: serialized object of proper schema
"""
mfcc_schema = MfccLocalSchema()
mfcc_dict = {'mfcc_info': mfcc_result}
return mfcc_schema.dumps(mfcc_dict)
def compute_mfcc(self, segments_path, phonemes_result_path):
"""
:param segments_path: path to the input wav
:param phonemes_result_path: path to phonemes results
that is required by the Local version of the Mfcc
:return: computed list of mfcc features with all required metadata
"""
wav = get_segment(segments_path, 'wav')
frequency = wav.frame_rate
phoneme_len = self.process_settings.get('phoneme_len', 2048)
ignore_shorter_phonemes = self.process_settings.get(
'ignore_shorter_phonemes', True)
mfcc_nfft = self.process_settings.get('mfcc_nfft', 2048)
mfcc_winstep = self.process_settings.get('mfcc_winstep', 0.1)
with open(phonemes_result_path, 'r') as f:
schema = DecoderOutputSchema()
json_file = json.load(f)
phonemes_result = schema.load(json_file)
phonemes_info = [info for info in phonemes_result[
'segment_info'] if info['word'] not in self.
blacklisted_phonemes]
mfcc_result = []
for info in phonemes_info:
start, stop = 1000 * info['start'], 1000 * info['end']
segment = np.array(wav[start:stop].get_array_of_samples())
if ignore_shorter_phonemes and segment.size < phoneme_len:
continue
mfcc_features = mfcc(segment, samplerate=frequency, nfft=
mfcc_nfft, winstep=mfcc_winstep)
for i in range(len(mfcc_features)):
ith_mfcc = np.array(mfcc_features[i, :])
ith_mfcc_result_row = {'i': i, 'length': len(mfcc_features),
'mfcc': ith_mfcc, **info}
mfcc_result.append(ith_mfcc_result_row)
return mfcc_result
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logger = logging.getLogger()
class MfccLocal(Mfcc):
"""
MfccLocal computes Mfcc features for each phoneme from the sample
that are not blacklisted based on phoneme label that is
received from Phoneme chain.
It subclasses Formants to not repeat the sample_layer logic
which is valid also in this context
"""
abstract_class = False
@staticmethod
def sample_result_filename(out_sample_path):
return f'{out_sample_path[:-5]}_mfcc_result.json'
@staticmethod
def filenames_to_skip_sample(out_sample_path):
return [f'{out_sample_path[:-5]}_mfcc_result.csv']
@staticmethod
def serialize_to_json(mfcc_result):
"""
:param mfcc_result: list of mfcc measurements with
necessary metadata
:return: serialized object of proper schema
"""
mfcc_schema = MfccLocalSchema()
mfcc_dict = {'mfcc_info': mfcc_result}
return mfcc_schema.dumps(mfcc_dict)
def compute_mfcc(self, segments_path, phonemes_result_path):
"""
:param segments_path: path to the input wav
:param phonemes_result_path: path to phonemes results
that is required by the Local version of the Mfcc
:return: computed list of mfcc features with all required metadata
"""
wav = get_segment(segments_path, 'wav')
frequency = wav.frame_rate
phoneme_len = self.process_settings.get('phoneme_len', 2048)
ignore_shorter_phonemes = self.process_settings.get(
'ignore_shorter_phonemes', True)
mfcc_nfft = self.process_settings.get('mfcc_nfft', 2048)
mfcc_winstep = self.process_settings.get('mfcc_winstep', 0.1)
with open(phonemes_result_path, 'r') as f:
schema = DecoderOutputSchema()
json_file = json.load(f)
phonemes_result = schema.load(json_file)
phonemes_info = [info for info in phonemes_result[
'segment_info'] if info['word'] not in self.
blacklisted_phonemes]
mfcc_result = []
for info in phonemes_info:
start, stop = 1000 * info['start'], 1000 * info['end']
segment = np.array(wav[start:stop].get_array_of_samples())
if ignore_shorter_phonemes and segment.size < phoneme_len:
continue
mfcc_features = mfcc(segment, samplerate=frequency, nfft=
mfcc_nfft, winstep=mfcc_winstep)
for i in range(len(mfcc_features)):
ith_mfcc = np.array(mfcc_features[i, :])
ith_mfcc_result_row = {'i': i, 'length': len(mfcc_features),
'mfcc': ith_mfcc, **info}
mfcc_result.append(ith_mfcc_result_row)
return mfcc_result
<|reserved_special_token_1|>
import json
import logging
import numpy as np
from python_speech_features import mfcc
from format_converters import get_segment
from schemas import *
from chains.mfcc import Mfcc
logger = logging.getLogger()
class MfccLocal(Mfcc):
"""
MfccLocal computes Mfcc features for each phoneme from the sample
that are not blacklisted based on phoneme label that is
received from Phoneme chain.
It subclasses Formants to not repeat the sample_layer logic
which is valid also in this context
"""
abstract_class = False
@staticmethod
def sample_result_filename(out_sample_path):
return f'{out_sample_path[:-5]}_mfcc_result.json'
@staticmethod
def filenames_to_skip_sample(out_sample_path):
return [f'{out_sample_path[:-5]}_mfcc_result.csv']
@staticmethod
def serialize_to_json(mfcc_result):
"""
:param mfcc_result: list of mfcc measurements with
necessary metadata
:return: serialized object of proper schema
"""
mfcc_schema = MfccLocalSchema()
mfcc_dict = {'mfcc_info': mfcc_result}
return mfcc_schema.dumps(mfcc_dict)
def compute_mfcc(self, segments_path, phonemes_result_path):
"""
:param segments_path: path to the input wav
:param phonemes_result_path: path to phonemes results
that is required by the Local version of the Mfcc
:return: computed list of mfcc features with all required metadata
"""
wav = get_segment(segments_path, 'wav')
frequency = wav.frame_rate
phoneme_len = self.process_settings.get('phoneme_len', 2048)
ignore_shorter_phonemes = self.process_settings.get(
'ignore_shorter_phonemes', True)
mfcc_nfft = self.process_settings.get('mfcc_nfft', 2048)
mfcc_winstep = self.process_settings.get('mfcc_winstep', 0.1)
with open(phonemes_result_path, 'r') as f:
schema = DecoderOutputSchema()
json_file = json.load(f)
phonemes_result = schema.load(json_file)
phonemes_info = [info for info in phonemes_result[
'segment_info'] if info['word'] not in self.
blacklisted_phonemes]
mfcc_result = []
for info in phonemes_info:
start, stop = 1000 * info['start'], 1000 * info['end']
segment = np.array(wav[start:stop].get_array_of_samples())
if ignore_shorter_phonemes and segment.size < phoneme_len:
continue
mfcc_features = mfcc(segment, samplerate=frequency, nfft=
mfcc_nfft, winstep=mfcc_winstep)
for i in range(len(mfcc_features)):
ith_mfcc = np.array(mfcc_features[i, :])
ith_mfcc_result_row = {'i': i, 'length': len(mfcc_features),
'mfcc': ith_mfcc, **info}
mfcc_result.append(ith_mfcc_result_row)
return mfcc_result
<|reserved_special_token_1|>
import json
import logging
import numpy as np
from python_speech_features import mfcc
from format_converters import get_segment
from schemas import *
from chains.mfcc import Mfcc
logger = logging.getLogger()
class MfccLocal(Mfcc):
"""
MfccLocal computes Mfcc features for each phoneme from the sample
that are not blacklisted based on phoneme label that is
received from Phoneme chain.
It subclasses Formants to not repeat the sample_layer logic
which is valid also in this context
"""
abstract_class = False
@staticmethod
def sample_result_filename(out_sample_path):
return f'{out_sample_path[:-5]}_mfcc_result.json'
@staticmethod
def filenames_to_skip_sample(out_sample_path):
return [f'{out_sample_path[:-5]}_mfcc_result.csv']
@staticmethod
def serialize_to_json(mfcc_result):
"""
:param mfcc_result: list of mfcc measurements with
necessary metadata
:return: serialized object of proper schema
"""
mfcc_schema = MfccLocalSchema()
mfcc_dict = {'mfcc_info': mfcc_result}
return mfcc_schema.dumps(mfcc_dict)
def compute_mfcc(self, segments_path, phonemes_result_path):
"""
:param segments_path: path to the input wav
:param phonemes_result_path: path to phonemes results
that is required by the Local version of the Mfcc
:return: computed list of mfcc features with all required metadata
"""
wav = get_segment(segments_path, 'wav')
frequency = wav.frame_rate
phoneme_len = self.process_settings.get("phoneme_len", 2048)
ignore_shorter_phonemes = self.process_settings.get("ignore_shorter_phonemes", True)
mfcc_nfft = self.process_settings.get("mfcc_nfft", 2048)
mfcc_winstep = self.process_settings.get("mfcc_winstep", 0.1)
with open(phonemes_result_path, 'r') as f:
schema = DecoderOutputSchema()
json_file = json.load(f)
phonemes_result = schema.load(json_file)
phonemes_info = [info for info in phonemes_result['segment_info']
if info['word'] not in self.blacklisted_phonemes]
mfcc_result = []
for info in phonemes_info:
start, stop = (1000 * info['start'], 1000 * info['end'])
segment = np.array(wav[start:stop].get_array_of_samples())
if ignore_shorter_phonemes and segment.size < phoneme_len:
continue
mfcc_features = mfcc(segment, samplerate=frequency,
nfft=mfcc_nfft, winstep=mfcc_winstep)
for i in range(len(mfcc_features)):
ith_mfcc = np.array(mfcc_features[i, :])
ith_mfcc_result_row = {'i': i, 'length': len(mfcc_features),
'mfcc': ith_mfcc, **info}
mfcc_result.append(ith_mfcc_result_row)
return mfcc_result
|
flexible
|
{
"blob_id": "44214492dd7283da4b9a77bd2a1fa9d9c0643ff2",
"index": 1188,
"step-1": "<mask token>\n\n\nclass MfccLocal(Mfcc):\n <mask token>\n abstract_class = False\n\n @staticmethod\n def sample_result_filename(out_sample_path):\n return f'{out_sample_path[:-5]}_mfcc_result.json'\n\n @staticmethod\n def filenames_to_skip_sample(out_sample_path):\n return [f'{out_sample_path[:-5]}_mfcc_result.csv']\n\n @staticmethod\n def serialize_to_json(mfcc_result):\n \"\"\"\n :param mfcc_result: list of mfcc measurements with\n necessary metadata\n :return: serialized object of proper schema\n \"\"\"\n mfcc_schema = MfccLocalSchema()\n mfcc_dict = {'mfcc_info': mfcc_result}\n return mfcc_schema.dumps(mfcc_dict)\n\n def compute_mfcc(self, segments_path, phonemes_result_path):\n \"\"\"\n\n :param segments_path: path to the input wav\n :param phonemes_result_path: path to phonemes results\n that is required by the Local version of the Mfcc\n :return: computed list of mfcc features with all required metadata\n \"\"\"\n wav = get_segment(segments_path, 'wav')\n frequency = wav.frame_rate\n phoneme_len = self.process_settings.get('phoneme_len', 2048)\n ignore_shorter_phonemes = self.process_settings.get(\n 'ignore_shorter_phonemes', True)\n mfcc_nfft = self.process_settings.get('mfcc_nfft', 2048)\n mfcc_winstep = self.process_settings.get('mfcc_winstep', 0.1)\n with open(phonemes_result_path, 'r') as f:\n schema = DecoderOutputSchema()\n json_file = json.load(f)\n phonemes_result = schema.load(json_file)\n phonemes_info = [info for info in phonemes_result[\n 'segment_info'] if info['word'] not in self.\n blacklisted_phonemes]\n mfcc_result = []\n for info in phonemes_info:\n start, stop = 1000 * info['start'], 1000 * info['end']\n segment = np.array(wav[start:stop].get_array_of_samples())\n if ignore_shorter_phonemes and segment.size < phoneme_len:\n continue\n mfcc_features = mfcc(segment, samplerate=frequency, nfft=\n mfcc_nfft, winstep=mfcc_winstep)\n for i in range(len(mfcc_features)):\n ith_mfcc = np.array(mfcc_features[i, :])\n ith_mfcc_result_row = {'i': i, 'length': len(mfcc_features),\n 'mfcc': ith_mfcc, **info}\n mfcc_result.append(ith_mfcc_result_row)\n return mfcc_result\n",
"step-2": "<mask token>\n\n\nclass MfccLocal(Mfcc):\n \"\"\"\n MfccLocal computes Mfcc features for each phoneme from the sample\n that are not blacklisted based on phoneme label that is\n received from Phoneme chain.\n\n It subclasses Formants to not repeat the sample_layer logic\n which is valid also in this context\n \"\"\"\n abstract_class = False\n\n @staticmethod\n def sample_result_filename(out_sample_path):\n return f'{out_sample_path[:-5]}_mfcc_result.json'\n\n @staticmethod\n def filenames_to_skip_sample(out_sample_path):\n return [f'{out_sample_path[:-5]}_mfcc_result.csv']\n\n @staticmethod\n def serialize_to_json(mfcc_result):\n \"\"\"\n :param mfcc_result: list of mfcc measurements with\n necessary metadata\n :return: serialized object of proper schema\n \"\"\"\n mfcc_schema = MfccLocalSchema()\n mfcc_dict = {'mfcc_info': mfcc_result}\n return mfcc_schema.dumps(mfcc_dict)\n\n def compute_mfcc(self, segments_path, phonemes_result_path):\n \"\"\"\n\n :param segments_path: path to the input wav\n :param phonemes_result_path: path to phonemes results\n that is required by the Local version of the Mfcc\n :return: computed list of mfcc features with all required metadata\n \"\"\"\n wav = get_segment(segments_path, 'wav')\n frequency = wav.frame_rate\n phoneme_len = self.process_settings.get('phoneme_len', 2048)\n ignore_shorter_phonemes = self.process_settings.get(\n 'ignore_shorter_phonemes', True)\n mfcc_nfft = self.process_settings.get('mfcc_nfft', 2048)\n mfcc_winstep = self.process_settings.get('mfcc_winstep', 0.1)\n with open(phonemes_result_path, 'r') as f:\n schema = DecoderOutputSchema()\n json_file = json.load(f)\n phonemes_result = schema.load(json_file)\n phonemes_info = [info for info in phonemes_result[\n 'segment_info'] if info['word'] not in self.\n blacklisted_phonemes]\n mfcc_result = []\n for info in phonemes_info:\n start, stop = 1000 * info['start'], 1000 * info['end']\n segment = np.array(wav[start:stop].get_array_of_samples())\n if ignore_shorter_phonemes and segment.size < phoneme_len:\n continue\n mfcc_features = mfcc(segment, samplerate=frequency, nfft=\n mfcc_nfft, winstep=mfcc_winstep)\n for i in range(len(mfcc_features)):\n ith_mfcc = np.array(mfcc_features[i, :])\n ith_mfcc_result_row = {'i': i, 'length': len(mfcc_features),\n 'mfcc': ith_mfcc, **info}\n mfcc_result.append(ith_mfcc_result_row)\n return mfcc_result\n",
"step-3": "<mask token>\nlogger = logging.getLogger()\n\n\nclass MfccLocal(Mfcc):\n \"\"\"\n MfccLocal computes Mfcc features for each phoneme from the sample\n that are not blacklisted based on phoneme label that is\n received from Phoneme chain.\n\n It subclasses Formants to not repeat the sample_layer logic\n which is valid also in this context\n \"\"\"\n abstract_class = False\n\n @staticmethod\n def sample_result_filename(out_sample_path):\n return f'{out_sample_path[:-5]}_mfcc_result.json'\n\n @staticmethod\n def filenames_to_skip_sample(out_sample_path):\n return [f'{out_sample_path[:-5]}_mfcc_result.csv']\n\n @staticmethod\n def serialize_to_json(mfcc_result):\n \"\"\"\n :param mfcc_result: list of mfcc measurements with\n necessary metadata\n :return: serialized object of proper schema\n \"\"\"\n mfcc_schema = MfccLocalSchema()\n mfcc_dict = {'mfcc_info': mfcc_result}\n return mfcc_schema.dumps(mfcc_dict)\n\n def compute_mfcc(self, segments_path, phonemes_result_path):\n \"\"\"\n\n :param segments_path: path to the input wav\n :param phonemes_result_path: path to phonemes results\n that is required by the Local version of the Mfcc\n :return: computed list of mfcc features with all required metadata\n \"\"\"\n wav = get_segment(segments_path, 'wav')\n frequency = wav.frame_rate\n phoneme_len = self.process_settings.get('phoneme_len', 2048)\n ignore_shorter_phonemes = self.process_settings.get(\n 'ignore_shorter_phonemes', True)\n mfcc_nfft = self.process_settings.get('mfcc_nfft', 2048)\n mfcc_winstep = self.process_settings.get('mfcc_winstep', 0.1)\n with open(phonemes_result_path, 'r') as f:\n schema = DecoderOutputSchema()\n json_file = json.load(f)\n phonemes_result = schema.load(json_file)\n phonemes_info = [info for info in phonemes_result[\n 'segment_info'] if info['word'] not in self.\n blacklisted_phonemes]\n mfcc_result = []\n for info in phonemes_info:\n start, stop = 1000 * info['start'], 1000 * info['end']\n segment = np.array(wav[start:stop].get_array_of_samples())\n if ignore_shorter_phonemes and segment.size < phoneme_len:\n continue\n mfcc_features = mfcc(segment, samplerate=frequency, nfft=\n mfcc_nfft, winstep=mfcc_winstep)\n for i in range(len(mfcc_features)):\n ith_mfcc = np.array(mfcc_features[i, :])\n ith_mfcc_result_row = {'i': i, 'length': len(mfcc_features),\n 'mfcc': ith_mfcc, **info}\n mfcc_result.append(ith_mfcc_result_row)\n return mfcc_result\n",
"step-4": "import json\nimport logging\nimport numpy as np\nfrom python_speech_features import mfcc\nfrom format_converters import get_segment\nfrom schemas import *\nfrom chains.mfcc import Mfcc\nlogger = logging.getLogger()\n\n\nclass MfccLocal(Mfcc):\n \"\"\"\n MfccLocal computes Mfcc features for each phoneme from the sample\n that are not blacklisted based on phoneme label that is\n received from Phoneme chain.\n\n It subclasses Formants to not repeat the sample_layer logic\n which is valid also in this context\n \"\"\"\n abstract_class = False\n\n @staticmethod\n def sample_result_filename(out_sample_path):\n return f'{out_sample_path[:-5]}_mfcc_result.json'\n\n @staticmethod\n def filenames_to_skip_sample(out_sample_path):\n return [f'{out_sample_path[:-5]}_mfcc_result.csv']\n\n @staticmethod\n def serialize_to_json(mfcc_result):\n \"\"\"\n :param mfcc_result: list of mfcc measurements with\n necessary metadata\n :return: serialized object of proper schema\n \"\"\"\n mfcc_schema = MfccLocalSchema()\n mfcc_dict = {'mfcc_info': mfcc_result}\n return mfcc_schema.dumps(mfcc_dict)\n\n def compute_mfcc(self, segments_path, phonemes_result_path):\n \"\"\"\n\n :param segments_path: path to the input wav\n :param phonemes_result_path: path to phonemes results\n that is required by the Local version of the Mfcc\n :return: computed list of mfcc features with all required metadata\n \"\"\"\n wav = get_segment(segments_path, 'wav')\n frequency = wav.frame_rate\n phoneme_len = self.process_settings.get('phoneme_len', 2048)\n ignore_shorter_phonemes = self.process_settings.get(\n 'ignore_shorter_phonemes', True)\n mfcc_nfft = self.process_settings.get('mfcc_nfft', 2048)\n mfcc_winstep = self.process_settings.get('mfcc_winstep', 0.1)\n with open(phonemes_result_path, 'r') as f:\n schema = DecoderOutputSchema()\n json_file = json.load(f)\n phonemes_result = schema.load(json_file)\n phonemes_info = [info for info in phonemes_result[\n 'segment_info'] if info['word'] not in self.\n blacklisted_phonemes]\n mfcc_result = []\n for info in phonemes_info:\n start, stop = 1000 * info['start'], 1000 * info['end']\n segment = np.array(wav[start:stop].get_array_of_samples())\n if ignore_shorter_phonemes and segment.size < phoneme_len:\n continue\n mfcc_features = mfcc(segment, samplerate=frequency, nfft=\n mfcc_nfft, winstep=mfcc_winstep)\n for i in range(len(mfcc_features)):\n ith_mfcc = np.array(mfcc_features[i, :])\n ith_mfcc_result_row = {'i': i, 'length': len(mfcc_features),\n 'mfcc': ith_mfcc, **info}\n mfcc_result.append(ith_mfcc_result_row)\n return mfcc_result\n",
"step-5": "import json\nimport logging\n\nimport numpy as np\nfrom python_speech_features import mfcc\n\nfrom format_converters import get_segment\nfrom schemas import *\nfrom chains.mfcc import Mfcc\nlogger = logging.getLogger()\n\n\nclass MfccLocal(Mfcc):\n \"\"\"\n MfccLocal computes Mfcc features for each phoneme from the sample\n that are not blacklisted based on phoneme label that is\n received from Phoneme chain.\n\n It subclasses Formants to not repeat the sample_layer logic\n which is valid also in this context\n \"\"\"\n\n abstract_class = False\n\n @staticmethod\n def sample_result_filename(out_sample_path):\n return f'{out_sample_path[:-5]}_mfcc_result.json'\n\n @staticmethod\n def filenames_to_skip_sample(out_sample_path):\n return [f'{out_sample_path[:-5]}_mfcc_result.csv']\n\n @staticmethod\n def serialize_to_json(mfcc_result):\n \"\"\"\n :param mfcc_result: list of mfcc measurements with\n necessary metadata\n :return: serialized object of proper schema\n \"\"\"\n mfcc_schema = MfccLocalSchema()\n mfcc_dict = {'mfcc_info': mfcc_result}\n return mfcc_schema.dumps(mfcc_dict)\n\n def compute_mfcc(self, segments_path, phonemes_result_path):\n \"\"\"\n\n :param segments_path: path to the input wav\n :param phonemes_result_path: path to phonemes results\n that is required by the Local version of the Mfcc\n :return: computed list of mfcc features with all required metadata\n \"\"\"\n wav = get_segment(segments_path, 'wav')\n frequency = wav.frame_rate\n phoneme_len = self.process_settings.get(\"phoneme_len\", 2048)\n ignore_shorter_phonemes = self.process_settings.get(\"ignore_shorter_phonemes\", True)\n mfcc_nfft = self.process_settings.get(\"mfcc_nfft\", 2048)\n mfcc_winstep = self.process_settings.get(\"mfcc_winstep\", 0.1)\n\n with open(phonemes_result_path, 'r') as f:\n schema = DecoderOutputSchema()\n json_file = json.load(f)\n phonemes_result = schema.load(json_file)\n phonemes_info = [info for info in phonemes_result['segment_info']\n if info['word'] not in self.blacklisted_phonemes]\n\n mfcc_result = []\n for info in phonemes_info:\n start, stop = (1000 * info['start'], 1000 * info['end'])\n segment = np.array(wav[start:stop].get_array_of_samples())\n if ignore_shorter_phonemes and segment.size < phoneme_len:\n continue\n mfcc_features = mfcc(segment, samplerate=frequency,\n nfft=mfcc_nfft, winstep=mfcc_winstep)\n for i in range(len(mfcc_features)):\n ith_mfcc = np.array(mfcc_features[i, :])\n ith_mfcc_result_row = {'i': i, 'length': len(mfcc_features),\n 'mfcc': ith_mfcc, **info}\n mfcc_result.append(ith_mfcc_result_row)\n return mfcc_result",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
class Solution(object):
<|reserved_special_token_0|>
directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
if len(grid) == 0:
return 0
m = len(grid)
n = len(grid[0])
island_count = 0
matrix_visited = [[(False) for _ in range(n)] for _ in range(m)]
from collections import deque
queue = deque()
"""
从 (0,0) 开始,对每个格子尝试一次 BFS 操作
"""
for i in range(m):
for j in range(n):
if grid[i][j] == '1' and not matrix_visited[i][j]:
island_count += 1
matrix_visited[i][j] = True
queue.append((i, j))
while queue:
x, y = queue.popleft()
for direction in self.directions:
new_i = x + direction[0]
new_j = y + direction[1]
"""
标记该格子已被访问,并且入队列的条件:
1.横坐标在网格内
2.纵坐标在网格内
3.该格子没有被遍历过
4.该格子是陆地
"""
if (0 <= new_i <= m - 1 and 0 <= new_j <= n - 1 and
not matrix_visited[new_i][new_j] and grid[
new_i][new_j] == '1'):
matrix_visited[new_i][new_j] = True
queue.append((new_i, new_j))
pass
pass
pass
pass
pass
return island_count
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Solution(object):
"""
x-1,y
x,y-1 x,y x,y+1
x+1,y
"""
directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
if len(grid) == 0:
return 0
m = len(grid)
n = len(grid[0])
island_count = 0
matrix_visited = [[(False) for _ in range(n)] for _ in range(m)]
from collections import deque
queue = deque()
"""
从 (0,0) 开始,对每个格子尝试一次 BFS 操作
"""
for i in range(m):
for j in range(n):
if grid[i][j] == '1' and not matrix_visited[i][j]:
island_count += 1
matrix_visited[i][j] = True
queue.append((i, j))
while queue:
x, y = queue.popleft()
for direction in self.directions:
new_i = x + direction[0]
new_j = y + direction[1]
"""
标记该格子已被访问,并且入队列的条件:
1.横坐标在网格内
2.纵坐标在网格内
3.该格子没有被遍历过
4.该格子是陆地
"""
if (0 <= new_i <= m - 1 and 0 <= new_j <= n - 1 and
not matrix_visited[new_i][new_j] and grid[
new_i][new_j] == '1'):
matrix_visited[new_i][new_j] = True
queue.append((new_i, new_j))
pass
pass
pass
pass
pass
return island_count
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def dfs(self, matrix, i, j, m, n, visited):
"""
深度优先遍历
"""
visited[i][j] = True
for direction in self.directions:
new_i = i + direction[0]
new_j = j + direction[1]
"""
对下一个格子,执行 DFS 的条件:
1.横坐标在网格内
2.纵坐标在网格内
3.该格子没有被遍历过
4.该格子是陆地
"""
if 0 <= new_i <= m - 1 and 0 <= new_j <= n - 1 and not visited[
new_i][new_j] and matrix[new_i][new_j] == '1':
self.dfs(matrix, new_i, new_j, m, n, visited)
pass
pass
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
if len(grid) == 0:
return 0
m = len(grid)
n = len(grid[0])
island_count = 0
matrix_visited = [[(False) for _ in range(n)] for _ in range(m)]
"""
从 (0,0) 开始,对每个格子尝试一次 DFS 操作
"""
for i in range(m):
for j in range(n):
if grid[i][j] == '1' and not matrix_visited[i][j]:
self.dfs(grid, i, j, m, n, matrix_visited)
island_count += 1
pass
pass
pass
return island_count
<|reserved_special_token_0|>
class Solution(object):
"""
x-1,y
x,y-1 x,y x,y+1
x+1,y
"""
directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
if len(grid) == 0:
return 0
m = len(grid)
n = len(grid[0])
island_count = 0
matrix_visited = [[(False) for _ in range(n)] for _ in range(m)]
from collections import deque
queue = deque()
"""
从 (0,0) 开始,对每个格子尝试一次 BFS 操作
"""
for i in range(m):
for j in range(n):
if grid[i][j] == '1' and not matrix_visited[i][j]:
island_count += 1
matrix_visited[i][j] = True
queue.append((i, j))
while queue:
x, y = queue.popleft()
for direction in self.directions:
new_i = x + direction[0]
new_j = y + direction[1]
"""
标记该格子已被访问,并且入队列的条件:
1.横坐标在网格内
2.纵坐标在网格内
3.该格子没有被遍历过
4.该格子是陆地
"""
if (0 <= new_i <= m - 1 and 0 <= new_j <= n - 1 and
not matrix_visited[new_i][new_j] and grid[
new_i][new_j] == '1'):
matrix_visited[new_i][new_j] = True
queue.append((new_i, new_j))
pass
pass
pass
pass
pass
return island_count
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution(object):
"""
x-1,y
x,y-1 x,y x,y+1
x+1,y
"""
directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]
def dfs(self, matrix, i, j, m, n, visited):
"""
深度优先遍历
"""
visited[i][j] = True
for direction in self.directions:
new_i = i + direction[0]
new_j = j + direction[1]
"""
对下一个格子,执行 DFS 的条件:
1.横坐标在网格内
2.纵坐标在网格内
3.该格子没有被遍历过
4.该格子是陆地
"""
if 0 <= new_i <= m - 1 and 0 <= new_j <= n - 1 and not visited[
new_i][new_j] and matrix[new_i][new_j] == '1':
self.dfs(matrix, new_i, new_j, m, n, visited)
pass
pass
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
if len(grid) == 0:
return 0
m = len(grid)
n = len(grid[0])
island_count = 0
matrix_visited = [[(False) for _ in range(n)] for _ in range(m)]
"""
从 (0,0) 开始,对每个格子尝试一次 DFS 操作
"""
for i in range(m):
for j in range(n):
if grid[i][j] == '1' and not matrix_visited[i][j]:
self.dfs(grid, i, j, m, n, matrix_visited)
island_count += 1
pass
pass
pass
return island_count
<|reserved_special_token_0|>
class Solution(object):
"""
x-1,y
x,y-1 x,y x,y+1
x+1,y
"""
directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
if len(grid) == 0:
return 0
m = len(grid)
n = len(grid[0])
island_count = 0
matrix_visited = [[(False) for _ in range(n)] for _ in range(m)]
from collections import deque
queue = deque()
"""
从 (0,0) 开始,对每个格子尝试一次 BFS 操作
"""
for i in range(m):
for j in range(n):
if grid[i][j] == '1' and not matrix_visited[i][j]:
island_count += 1
matrix_visited[i][j] = True
queue.append((i, j))
while queue:
x, y = queue.popleft()
for direction in self.directions:
new_i = x + direction[0]
new_j = y + direction[1]
"""
标记该格子已被访问,并且入队列的条件:
1.横坐标在网格内
2.纵坐标在网格内
3.该格子没有被遍历过
4.该格子是陆地
"""
if (0 <= new_i <= m - 1 and 0 <= new_j <= n - 1 and
not matrix_visited[new_i][new_j] and grid[
new_i][new_j] == '1'):
matrix_visited[new_i][new_j] = True
queue.append((new_i, new_j))
pass
pass
pass
pass
pass
return island_count
<|reserved_special_token_0|>
print(result)
<|reserved_special_token_1|>
#!/usr/bin/env python
# coding:utf-8
"""
200. 岛屿数量
难度
中等
给定一个由 '1'(陆地)和 '0'(水)组成的的二维网格,计算岛屿的数量。一个岛被水包围,并且它是通过水平方向或垂直方向上相邻的陆地连接而成的。你可以假设网格的四个边均被水包围。
示例 1:
输入:
11110
11010
11000
00000
输出: 1
示例 2:
输入:
11000
11000
00100
00011
输出: 3
"""
# ================================================================================
"""
洪水填充算法(Flood Fill Algorithm)
"""
"""
Flood Fill 算法
是从一个区域中提取若干个连通的点与其他相邻区域区分开(或分别染成不同颜色)的经典算法。
因为其思路类似洪水从一个区域扩散到所有能到达的区域而得名。
从一个点扩散开,找到与其连通的点,这不是什么高深的算法,
其实就是从一个点开始,进行一次“深度优先遍历”或者“广度优先遍历”,
通过“深度优先遍历”或者“广度优先遍历”发现一片连着的区域,
对于这道题来说,
就是从一个是“陆地”的格子开始进行一次“深度优先遍历”或者“广度优先遍历”,
把与之相连的所有的格子都标记上,视为发现了一个“岛屿”。
说明:
那么每一次进行“深度优先遍历”或者“广度优先遍历”的条件就是:
1、这个格子是陆地(“1”),如果是水域(“0”)就无从谈论“岛屿”;
2、这个格子不能是之前发现“岛屿”的过程中执行了“深度优先遍历”或者“广度优先遍历”操作,而被标记的格子。
"""
# ================================================================================
"""
思路:
DFS(深度优先遍历)
(回溯)
时间复杂度:
O()
空间复杂度:
O()
"""
class Solution(object):
# 方向数组,它表示了相对于当前位置的 4 个方向的横、纵坐标的偏移量
"""
x-1,y
x,y-1 x,y x,y+1
x+1,y
"""
# 这4个方向的顺序无关紧要
# 此处的方向顺序:上、右、下、左
directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]
def dfs(self, matrix, i, j, m, n, visited):
"""
深度优先遍历
"""
visited[i][j] = True
# print '(%s,%s)' % (i, j)
for direction in self.directions:
new_i = i + direction[0]
new_j = j + direction[1]
"""
对下一个格子,执行 DFS 的条件:
1.横坐标在网格内
2.纵坐标在网格内
3.该格子没有被遍历过
4.该格子是陆地
"""
if 0<=new_i<=m-1 \
and 0<=new_j<=n-1 \
and not visited[new_i][new_j] \
and matrix[new_i][new_j] == '1':
self.dfs(matrix, new_i, new_j, m, n, visited)
pass
pass
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
if len(grid) == 0:
return 0
m = len(grid)
n = len(grid[0])
# 孤岛计数
island_count = 0
# 已访问过的记录矩阵
matrix_visited = [[False for _ in range(n)] for _ in range(m)]
"""
从 (0,0) 开始,对每个格子尝试一次 DFS 操作
"""
for i in range(m):
for j in range(n):
# 只要是陆地,且没有被访问过的,就可以使用 DFS 发现与之相连的陆地,并进行标记
if grid[i][j] == '1' and not matrix_visited[i][j]:
self.dfs(grid, i, j, m, n, matrix_visited)
# 岛屿计数 +1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
island_count += 1
# print 'island_count:', island_count
pass
pass
pass
return island_count
# ================================================================================
"""
思路:
BFS(广度优先遍历)
(需要一个辅助队列)
时间复杂度:
O()
空间复杂度:
O()
"""
class Solution(object):
# 方向数组,它表示了相对于当前位置的 4 个方向的横、纵坐标的偏移量
"""
x-1,y
x,y-1 x,y x,y+1
x+1,y
"""
# 这4个方向的顺序无关紧要
# 此处的方向顺序:上、右、下、左
directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
if len(grid) == 0:
return 0
m = len(grid)
n = len(grid[0])
# 孤岛计数
island_count = 0
# 已访问过的记录矩阵
matrix_visited = [[False for _ in range(n)] for _ in range(m)]
# 辅助队列
from collections import deque
queue = deque()
"""
从 (0,0) 开始,对每个格子尝试一次 BFS 操作
"""
for i in range(m):
for j in range(n):
# 只要是陆地,且没有被访问过的,就可以使用 BFS 发现与之相连的陆地,并进行标记
if grid[i][j] == '1' and not matrix_visited[i][j]:
# ------------------------------
# 岛屿计数 +1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
island_count += 1
# print 'island_count: ', island_count
matrix_visited[i][j] = True
# print '(%s,%s)' % (i, j)
queue.append((i, j))
# ------------------------------
while queue:
x, y = queue.popleft()
# 依次检查 4 个方向的邻居
for direction in self.directions:
new_i = x + direction[0]
new_j = y + direction[1]
"""
标记该格子已被访问,并且入队列的条件:
1.横坐标在网格内
2.纵坐标在网格内
3.该格子没有被遍历过
4.该格子是陆地
"""
if 0 <= new_i <= m - 1 \
and 0 <= new_j <= n - 1 \
and not matrix_visited[new_i][new_j] \
and grid[new_i][new_j] == '1':
# 标记已访问
matrix_visited[new_i][new_j] = True
# print '(%s,%s)' % (new_i, new_j)
# 加入队列
queue.append((new_i, new_j))
pass
pass
# ------------------------------
pass
pass
pass
return island_count
# ================================================================================
# ================================================================================
# ================================================================================
# ================================================================================
gggg = [['1', '1', '1', '1', '0'],
['1', '1', '0', '1', '0'],
['1', '1', '0', '0', '0'],
['0', '0', '0', '0', '0']]
solution = Solution()
result = solution.numIslands(gggg)
print(result)
|
flexible
|
{
"blob_id": "b46f19708e9e2a1be2bbd001ca6341ee7468a60d",
"index": 7147,
"step-1": "<mask token>\n\n\nclass Solution(object):\n <mask token>\n directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]\n\n def numIslands(self, grid):\n \"\"\"\n :type grid: List[List[str]]\n :rtype: int\n \"\"\"\n if len(grid) == 0:\n return 0\n m = len(grid)\n n = len(grid[0])\n island_count = 0\n matrix_visited = [[(False) for _ in range(n)] for _ in range(m)]\n from collections import deque\n queue = deque()\n \"\"\"\n 从 (0,0) 开始,对每个格子尝试一次 BFS 操作\n \"\"\"\n for i in range(m):\n for j in range(n):\n if grid[i][j] == '1' and not matrix_visited[i][j]:\n island_count += 1\n matrix_visited[i][j] = True\n queue.append((i, j))\n while queue:\n x, y = queue.popleft()\n for direction in self.directions:\n new_i = x + direction[0]\n new_j = y + direction[1]\n \"\"\"\n 标记该格子已被访问,并且入队列的条件:\n 1.横坐标在网格内\n 2.纵坐标在网格内\n 3.该格子没有被遍历过\n 4.该格子是陆地\n \"\"\"\n if (0 <= new_i <= m - 1 and 0 <= new_j <= n - 1 and\n not matrix_visited[new_i][new_j] and grid[\n new_i][new_j] == '1'):\n matrix_visited[new_i][new_j] = True\n queue.append((new_i, new_j))\n pass\n pass\n pass\n pass\n pass\n return island_count\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass Solution(object):\n \"\"\"\n x-1,y\n x,y-1 x,y x,y+1\n x+1,y\n \"\"\"\n directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]\n\n def numIslands(self, grid):\n \"\"\"\n :type grid: List[List[str]]\n :rtype: int\n \"\"\"\n if len(grid) == 0:\n return 0\n m = len(grid)\n n = len(grid[0])\n island_count = 0\n matrix_visited = [[(False) for _ in range(n)] for _ in range(m)]\n from collections import deque\n queue = deque()\n \"\"\"\n 从 (0,0) 开始,对每个格子尝试一次 BFS 操作\n \"\"\"\n for i in range(m):\n for j in range(n):\n if grid[i][j] == '1' and not matrix_visited[i][j]:\n island_count += 1\n matrix_visited[i][j] = True\n queue.append((i, j))\n while queue:\n x, y = queue.popleft()\n for direction in self.directions:\n new_i = x + direction[0]\n new_j = y + direction[1]\n \"\"\"\n 标记该格子已被访问,并且入队列的条件:\n 1.横坐标在网格内\n 2.纵坐标在网格内\n 3.该格子没有被遍历过\n 4.该格子是陆地\n \"\"\"\n if (0 <= new_i <= m - 1 and 0 <= new_j <= n - 1 and\n not matrix_visited[new_i][new_j] and grid[\n new_i][new_j] == '1'):\n matrix_visited[new_i][new_j] = True\n queue.append((new_i, new_j))\n pass\n pass\n pass\n pass\n pass\n return island_count\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution(object):\n <mask token>\n <mask token>\n\n def dfs(self, matrix, i, j, m, n, visited):\n \"\"\"\n 深度优先遍历\n \"\"\"\n visited[i][j] = True\n for direction in self.directions:\n new_i = i + direction[0]\n new_j = j + direction[1]\n \"\"\"\n 对下一个格子,执行 DFS 的条件:\n 1.横坐标在网格内\n 2.纵坐标在网格内\n 3.该格子没有被遍历过\n 4.该格子是陆地\n \"\"\"\n if 0 <= new_i <= m - 1 and 0 <= new_j <= n - 1 and not visited[\n new_i][new_j] and matrix[new_i][new_j] == '1':\n self.dfs(matrix, new_i, new_j, m, n, visited)\n pass\n pass\n\n def numIslands(self, grid):\n \"\"\"\n :type grid: List[List[str]]\n :rtype: int\n \"\"\"\n if len(grid) == 0:\n return 0\n m = len(grid)\n n = len(grid[0])\n island_count = 0\n matrix_visited = [[(False) for _ in range(n)] for _ in range(m)]\n \"\"\"\n 从 (0,0) 开始,对每个格子尝试一次 DFS 操作\n \"\"\"\n for i in range(m):\n for j in range(n):\n if grid[i][j] == '1' and not matrix_visited[i][j]:\n self.dfs(grid, i, j, m, n, matrix_visited)\n island_count += 1\n pass\n pass\n pass\n return island_count\n\n\n<mask token>\n\n\nclass Solution(object):\n \"\"\"\n x-1,y\n x,y-1 x,y x,y+1\n x+1,y\n \"\"\"\n directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]\n\n def numIslands(self, grid):\n \"\"\"\n :type grid: List[List[str]]\n :rtype: int\n \"\"\"\n if len(grid) == 0:\n return 0\n m = len(grid)\n n = len(grid[0])\n island_count = 0\n matrix_visited = [[(False) for _ in range(n)] for _ in range(m)]\n from collections import deque\n queue = deque()\n \"\"\"\n 从 (0,0) 开始,对每个格子尝试一次 BFS 操作\n \"\"\"\n for i in range(m):\n for j in range(n):\n if grid[i][j] == '1' and not matrix_visited[i][j]:\n island_count += 1\n matrix_visited[i][j] = True\n queue.append((i, j))\n while queue:\n x, y = queue.popleft()\n for direction in self.directions:\n new_i = x + direction[0]\n new_j = y + direction[1]\n \"\"\"\n 标记该格子已被访问,并且入队列的条件:\n 1.横坐标在网格内\n 2.纵坐标在网格内\n 3.该格子没有被遍历过\n 4.该格子是陆地\n \"\"\"\n if (0 <= new_i <= m - 1 and 0 <= new_j <= n - 1 and\n not matrix_visited[new_i][new_j] and grid[\n new_i][new_j] == '1'):\n matrix_visited[new_i][new_j] = True\n queue.append((new_i, new_j))\n pass\n pass\n pass\n pass\n pass\n return island_count\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Solution(object):\n \"\"\"\n x-1,y\n x,y-1 x,y x,y+1\n x+1,y\n \"\"\"\n directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]\n\n def dfs(self, matrix, i, j, m, n, visited):\n \"\"\"\n 深度优先遍历\n \"\"\"\n visited[i][j] = True\n for direction in self.directions:\n new_i = i + direction[0]\n new_j = j + direction[1]\n \"\"\"\n 对下一个格子,执行 DFS 的条件:\n 1.横坐标在网格内\n 2.纵坐标在网格内\n 3.该格子没有被遍历过\n 4.该格子是陆地\n \"\"\"\n if 0 <= new_i <= m - 1 and 0 <= new_j <= n - 1 and not visited[\n new_i][new_j] and matrix[new_i][new_j] == '1':\n self.dfs(matrix, new_i, new_j, m, n, visited)\n pass\n pass\n\n def numIslands(self, grid):\n \"\"\"\n :type grid: List[List[str]]\n :rtype: int\n \"\"\"\n if len(grid) == 0:\n return 0\n m = len(grid)\n n = len(grid[0])\n island_count = 0\n matrix_visited = [[(False) for _ in range(n)] for _ in range(m)]\n \"\"\"\n 从 (0,0) 开始,对每个格子尝试一次 DFS 操作\n \"\"\"\n for i in range(m):\n for j in range(n):\n if grid[i][j] == '1' and not matrix_visited[i][j]:\n self.dfs(grid, i, j, m, n, matrix_visited)\n island_count += 1\n pass\n pass\n pass\n return island_count\n\n\n<mask token>\n\n\nclass Solution(object):\n \"\"\"\n x-1,y\n x,y-1 x,y x,y+1\n x+1,y\n \"\"\"\n directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]\n\n def numIslands(self, grid):\n \"\"\"\n :type grid: List[List[str]]\n :rtype: int\n \"\"\"\n if len(grid) == 0:\n return 0\n m = len(grid)\n n = len(grid[0])\n island_count = 0\n matrix_visited = [[(False) for _ in range(n)] for _ in range(m)]\n from collections import deque\n queue = deque()\n \"\"\"\n 从 (0,0) 开始,对每个格子尝试一次 BFS 操作\n \"\"\"\n for i in range(m):\n for j in range(n):\n if grid[i][j] == '1' and not matrix_visited[i][j]:\n island_count += 1\n matrix_visited[i][j] = True\n queue.append((i, j))\n while queue:\n x, y = queue.popleft()\n for direction in self.directions:\n new_i = x + direction[0]\n new_j = y + direction[1]\n \"\"\"\n 标记该格子已被访问,并且入队列的条件:\n 1.横坐标在网格内\n 2.纵坐标在网格内\n 3.该格子没有被遍历过\n 4.该格子是陆地\n \"\"\"\n if (0 <= new_i <= m - 1 and 0 <= new_j <= n - 1 and\n not matrix_visited[new_i][new_j] and grid[\n new_i][new_j] == '1'):\n matrix_visited[new_i][new_j] = True\n queue.append((new_i, new_j))\n pass\n pass\n pass\n pass\n pass\n return island_count\n\n\n<mask token>\nprint(result)\n",
"step-5": "#!/usr/bin/env python\n# coding:utf-8\n\n\"\"\"\n200. 岛屿数量\n难度\n中等\n\n给定一个由 '1'(陆地)和 '0'(水)组成的的二维网格,计算岛屿的数量。一个岛被水包围,并且它是通过水平方向或垂直方向上相邻的陆地连接而成的。你可以假设网格的四个边均被水包围。\n\n示例 1:\n\n输入:\n11110\n11010\n11000\n00000\n\n输出: 1\n示例 2:\n\n输入:\n11000\n11000\n00100\n00011\n\n输出: 3\n\"\"\"\n# ================================================================================\n\"\"\"\n洪水填充算法(Flood Fill Algorithm)\n\"\"\"\n\"\"\"\nFlood Fill 算法\n\n是从一个区域中提取若干个连通的点与其他相邻区域区分开(或分别染成不同颜色)的经典算法。\n因为其思路类似洪水从一个区域扩散到所有能到达的区域而得名。\n\n从一个点扩散开,找到与其连通的点,这不是什么高深的算法,\n其实就是从一个点开始,进行一次“深度优先遍历”或者“广度优先遍历”,\n通过“深度优先遍历”或者“广度优先遍历”发现一片连着的区域,\n\n对于这道题来说,\n就是从一个是“陆地”的格子开始进行一次“深度优先遍历”或者“广度优先遍历”,\n把与之相连的所有的格子都标记上,视为发现了一个“岛屿”。\n\n说明:\n那么每一次进行“深度优先遍历”或者“广度优先遍历”的条件就是:\n1、这个格子是陆地(“1”),如果是水域(“0”)就无从谈论“岛屿”;\n2、这个格子不能是之前发现“岛屿”的过程中执行了“深度优先遍历”或者“广度优先遍历”操作,而被标记的格子。\n\"\"\"\n# ================================================================================\n\"\"\"\n思路:\n DFS(深度优先遍历)\n (回溯)\n时间复杂度:\n O()\n空间复杂度: \n O()\n\"\"\"\n\n\nclass Solution(object):\n # 方向数组,它表示了相对于当前位置的 4 个方向的横、纵坐标的偏移量\n \"\"\"\n x-1,y\n x,y-1 x,y x,y+1\n x+1,y\n \"\"\"\n # 这4个方向的顺序无关紧要\n # 此处的方向顺序:上、右、下、左\n directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]\n\n def dfs(self, matrix, i, j, m, n, visited):\n \"\"\"\n 深度优先遍历\n \"\"\"\n visited[i][j] = True\n # print '(%s,%s)' % (i, j)\n for direction in self.directions:\n new_i = i + direction[0]\n new_j = j + direction[1]\n \"\"\"\n 对下一个格子,执行 DFS 的条件:\n 1.横坐标在网格内\n 2.纵坐标在网格内\n 3.该格子没有被遍历过\n 4.该格子是陆地\n \"\"\"\n if 0<=new_i<=m-1 \\\n and 0<=new_j<=n-1 \\\n and not visited[new_i][new_j] \\\n and matrix[new_i][new_j] == '1':\n self.dfs(matrix, new_i, new_j, m, n, visited)\n pass\n pass\n\n def numIslands(self, grid):\n \"\"\"\n :type grid: List[List[str]]\n :rtype: int\n \"\"\"\n if len(grid) == 0:\n return 0\n m = len(grid)\n n = len(grid[0])\n # 孤岛计数\n island_count = 0\n # 已访问过的记录矩阵\n matrix_visited = [[False for _ in range(n)] for _ in range(m)]\n \"\"\"\n 从 (0,0) 开始,对每个格子尝试一次 DFS 操作\n \"\"\"\n for i in range(m):\n for j in range(n):\n # 只要是陆地,且没有被访问过的,就可以使用 DFS 发现与之相连的陆地,并进行标记\n if grid[i][j] == '1' and not matrix_visited[i][j]:\n self.dfs(grid, i, j, m, n, matrix_visited)\n # 岛屿计数 +1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n island_count += 1\n # print 'island_count:', island_count\n pass\n pass\n pass\n return island_count\n\n# ================================================================================\n\"\"\"\n思路:\n BFS(广度优先遍历)\n (需要一个辅助队列)\n时间复杂度:\n O()\n空间复杂度: \n O()\n\"\"\"\n\n\nclass Solution(object):\n # 方向数组,它表示了相对于当前位置的 4 个方向的横、纵坐标的偏移量\n \"\"\"\n x-1,y\n x,y-1 x,y x,y+1\n x+1,y\n \"\"\"\n # 这4个方向的顺序无关紧要\n # 此处的方向顺序:上、右、下、左\n directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]\n\n def numIslands(self, grid):\n \"\"\"\n :type grid: List[List[str]]\n :rtype: int\n \"\"\"\n if len(grid) == 0:\n return 0\n m = len(grid)\n n = len(grid[0])\n # 孤岛计数\n island_count = 0\n # 已访问过的记录矩阵\n matrix_visited = [[False for _ in range(n)] for _ in range(m)]\n # 辅助队列\n from collections import deque\n queue = deque()\n \"\"\"\n 从 (0,0) 开始,对每个格子尝试一次 BFS 操作\n \"\"\"\n for i in range(m):\n for j in range(n):\n # 只要是陆地,且没有被访问过的,就可以使用 BFS 发现与之相连的陆地,并进行标记\n if grid[i][j] == '1' and not matrix_visited[i][j]:\n # ------------------------------\n # 岛屿计数 +1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n island_count += 1\n # print 'island_count: ', island_count\n matrix_visited[i][j] = True\n # print '(%s,%s)' % (i, j)\n queue.append((i, j))\n # ------------------------------\n while queue:\n x, y = queue.popleft()\n # 依次检查 4 个方向的邻居\n for direction in self.directions:\n new_i = x + direction[0]\n new_j = y + direction[1]\n \"\"\"\n 标记该格子已被访问,并且入队列的条件:\n 1.横坐标在网格内\n 2.纵坐标在网格内\n 3.该格子没有被遍历过\n 4.该格子是陆地\n \"\"\"\n if 0 <= new_i <= m - 1 \\\n and 0 <= new_j <= n - 1 \\\n and not matrix_visited[new_i][new_j] \\\n and grid[new_i][new_j] == '1':\n # 标记已访问\n matrix_visited[new_i][new_j] = True\n # print '(%s,%s)' % (new_i, new_j)\n # 加入队列\n queue.append((new_i, new_j))\n pass\n pass\n # ------------------------------\n pass\n pass\n pass\n return island_count\n\n\n# ================================================================================\n# ================================================================================\n# ================================================================================\n# ================================================================================\n\n\ngggg = [['1', '1', '1', '1', '0'],\n ['1', '1', '0', '1', '0'],\n ['1', '1', '0', '0', '0'],\n ['0', '0', '0', '0', '0']]\nsolution = Solution()\nresult = solution.numIslands(gggg)\nprint(result)\n",
"step-ids": [
3,
5,
7,
10,
12
]
}
|
[
3,
5,
7,
10,
12
] |
<|reserved_special_token_0|>
def deconnexion(Chrome):
"""登陆"""
"""初始化"""
global web, actions
web = webdriver.Chrome(Chrome)
web.maximize_window()
web.implicitly_wait(10)
web.get(
'http://www.wjw-cdc.com:8003/user-center-portal/login?redirect=%2Fmain'
)
actions = ActionChains(web)
"""登录网页"""
username = web.find_element_by_xpath(
'/html/body/div/div/div[1]/div/div[2]/form/div[1]/div/div[1]/input')
password = web.find_element_by_xpath(
'/html/body/div/div/div[1]/div/div[2]/form/div[2]/div/div[2]/input')
username.send_keys('15375429564')
password.send_keys('cdc1234cdc')
enter = web.find_element_by_xpath(
'/html/body/div/div/div[1]/div/div[2]/form/div[3]/div/button')
enter.click()
return 0
def menu_lien():
"""跳转页面"""
enter_into = web.find_element_by_xpath(
'/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article'
)
enter_into.click()
return 0
def confirm_area(city, area):
"""确定区域"""
"""点击区域"""
enter_area = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[3]/div[1]/span/div/div[1]/input'
).click()
"""点击安徽省"""
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
try:
enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'
)
enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(
'el-scrollbar__view')
except:
time.sleep(1)
enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'
)
enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(
'el-scrollbar__view')
enter_AnHui = enter_AnHui_on.find_element_by_tag_name('li')
enter_AnHui_down = enter_AnHui.find_element_by_class_name('el-radio__input'
)
web.execute_script('arguments[0].click();', enter_AnHui_down)
"""选择城市"""
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
try:
enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_city_on = enter_city_on_on[1].find_element_by_class_name(
'el-cascader-menu__wrap')
except:
time.sleep(1)
enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_city_on = enter_city_on_on[1].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_city = enter_city_on.find_elements_by_tag_name('li')
for i in range(len(enter_city)):
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_city_on = enter_city_on_on[1].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_city = enter_city_on.find_elements_by_tag_name('li')
if enter_city[i].text == city:
enter_city_down = enter_city[i].find_element_by_class_name(
'el-radio__input')
web.execute_script('arguments[0].click();', enter_city_down)
break
"""选则区县"""
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
try:
enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_area_on = enter_area_on_on[2].find_element_by_class_name(
'el-cascader-menu__wrap')
except:
time.sleep(1)
enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_area_on = enter_area_on_on[2].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_area = enter_area_on.find_elements_by_tag_name('li')
for i in range(len(enter_area)):
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_area_on = enter_area_on_on[2].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_area = enter_area_on.find_elements_by_tag_name('li')
if enter_area[i].text == area:
enter_area_down = enter_area[i].find_element_by_class_name(
'el-radio__input')
web.execute_script('arguments[0].click();', enter_area_down)
break
return 0
def confirm_time_on(excel_time):
if type(excel_time) == str:
return str(excel_time)
elif type(excel_time) == datetime.datetime:
excel_time_2 = excel_time.strftime('%Y-%m-%d')
return str(excel_time_2)
def confirm_tiem(time):
"""确定时间"""
time = confirm_time_on(time)
enter_time = web.find_elements_by_class_name('el-range-input')
for i in enter_time:
i.send_keys(time)
return 0
<|reserved_special_token_0|>
def search():
"""点击搜索"""
enter_search = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[3]/button[1]').click()
return 0
def reset():
"""点击重置"""
enter_reset = web.find_element_by_xpath(
'/html/body/div/section/main/div/div[3]/button[2]').click()
return 0
<|reserved_special_token_0|>
def resetting_excel(cause, clinique, path='D:\\林钟\\下载'):
"""重命名病例"""
try:
files = os.listdir(path)
src = path + '\\' + '外呼结果导出表格.xlsx'
if cause == '发热伴畏寒|寒战':
cause = "发热伴畏寒寒战'"
if cause == '畏寒|寒战':
cause = "畏寒寒战'"
dst = path + '\\' + clinique + '--' + cause + '.xlsx'
os.rename(src, dst)
except FileExistsError:
files = os.listdir(path)
src = path + '\\' + '外呼结果导出表格.xlsx'
if cause == '发热伴畏寒|寒战':
cause = "发热伴畏寒寒战'"
if cause == '畏寒|寒战':
cause = "畏寒寒战'"
dst = path + '\\' + clinique + '--' + cause + '.xlsx'
os.remove(dst)
os.rename(src, dst)
return 0
def pagination():
pagination__total = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[5]/span[1]')
a = int(pagination__total.text[2:-2])
return a
<|reserved_special_token_0|>
def tourne_page():
enter_tourne_page = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[5]/button[2]/i').click()
return ''
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def deconnexion(Chrome):
"""登陆"""
"""初始化"""
global web, actions
web = webdriver.Chrome(Chrome)
web.maximize_window()
web.implicitly_wait(10)
web.get(
'http://www.wjw-cdc.com:8003/user-center-portal/login?redirect=%2Fmain'
)
actions = ActionChains(web)
"""登录网页"""
username = web.find_element_by_xpath(
'/html/body/div/div/div[1]/div/div[2]/form/div[1]/div/div[1]/input')
password = web.find_element_by_xpath(
'/html/body/div/div/div[1]/div/div[2]/form/div[2]/div/div[2]/input')
username.send_keys('15375429564')
password.send_keys('cdc1234cdc')
enter = web.find_element_by_xpath(
'/html/body/div/div/div[1]/div/div[2]/form/div[3]/div/button')
enter.click()
return 0
def menu_lien():
"""跳转页面"""
enter_into = web.find_element_by_xpath(
'/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article'
)
enter_into.click()
return 0
def confirm_area(city, area):
"""确定区域"""
"""点击区域"""
enter_area = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[3]/div[1]/span/div/div[1]/input'
).click()
"""点击安徽省"""
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
try:
enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'
)
enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(
'el-scrollbar__view')
except:
time.sleep(1)
enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'
)
enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(
'el-scrollbar__view')
enter_AnHui = enter_AnHui_on.find_element_by_tag_name('li')
enter_AnHui_down = enter_AnHui.find_element_by_class_name('el-radio__input'
)
web.execute_script('arguments[0].click();', enter_AnHui_down)
"""选择城市"""
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
try:
enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_city_on = enter_city_on_on[1].find_element_by_class_name(
'el-cascader-menu__wrap')
except:
time.sleep(1)
enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_city_on = enter_city_on_on[1].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_city = enter_city_on.find_elements_by_tag_name('li')
for i in range(len(enter_city)):
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_city_on = enter_city_on_on[1].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_city = enter_city_on.find_elements_by_tag_name('li')
if enter_city[i].text == city:
enter_city_down = enter_city[i].find_element_by_class_name(
'el-radio__input')
web.execute_script('arguments[0].click();', enter_city_down)
break
"""选则区县"""
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
try:
enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_area_on = enter_area_on_on[2].find_element_by_class_name(
'el-cascader-menu__wrap')
except:
time.sleep(1)
enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_area_on = enter_area_on_on[2].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_area = enter_area_on.find_elements_by_tag_name('li')
for i in range(len(enter_area)):
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_area_on = enter_area_on_on[2].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_area = enter_area_on.find_elements_by_tag_name('li')
if enter_area[i].text == area:
enter_area_down = enter_area[i].find_element_by_class_name(
'el-radio__input')
web.execute_script('arguments[0].click();', enter_area_down)
break
return 0
def confirm_time_on(excel_time):
if type(excel_time) == str:
return str(excel_time)
elif type(excel_time) == datetime.datetime:
excel_time_2 = excel_time.strftime('%Y-%m-%d')
return str(excel_time_2)
def confirm_tiem(time):
"""确定时间"""
time = confirm_time_on(time)
enter_time = web.find_elements_by_class_name('el-range-input')
for i in enter_time:
i.send_keys(time)
return 0
<|reserved_special_token_0|>
def search():
"""点击搜索"""
enter_search = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[3]/button[1]').click()
return 0
def reset():
"""点击重置"""
enter_reset = web.find_element_by_xpath(
'/html/body/div/section/main/div/div[3]/button[2]').click()
return 0
<|reserved_special_token_0|>
def download_cas():
"""下载病例"""
enter_download_cas = web.find_element_by_xpath(
'/html/body/div[1]/section/main/section/main/div[2]/ul/li[2]').click()
enter_download_cas_1 = web.find_element_by_xpath(
'/html/body/div[1]/section/main/section/main/div[2]/div/div[2]/div/div[1]/div/button[3]'
).click()
return 0
def resetting_excel(cause, clinique, path='D:\\林钟\\下载'):
"""重命名病例"""
try:
files = os.listdir(path)
src = path + '\\' + '外呼结果导出表格.xlsx'
if cause == '发热伴畏寒|寒战':
cause = "发热伴畏寒寒战'"
if cause == '畏寒|寒战':
cause = "畏寒寒战'"
dst = path + '\\' + clinique + '--' + cause + '.xlsx'
os.rename(src, dst)
except FileExistsError:
files = os.listdir(path)
src = path + '\\' + '外呼结果导出表格.xlsx'
if cause == '发热伴畏寒|寒战':
cause = "发热伴畏寒寒战'"
if cause == '畏寒|寒战':
cause = "畏寒寒战'"
dst = path + '\\' + clinique + '--' + cause + '.xlsx'
os.remove(dst)
os.rename(src, dst)
return 0
def pagination():
pagination__total = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[5]/span[1]')
a = int(pagination__total.text[2:-2])
return a
def search_data(cause, city, area, clinique, excel_time):
"""查找数据"""
ls_2 = []
trlist_table_on = web.find_element_by_class_name('is-scrolling-none')
trlist_table = trlist_table_on.find_element_by_class_name('el-table__body')
trlist_tr = trlist_table.find_elements_by_tag_name('tr')
for row in range(len(trlist_tr)):
trlist_table = web.find_element_by_class_name('el-table__body')
trlist_tr = trlist_table.find_elements_by_tag_name('tr')
trlist_td = trlist_tr[row].find_elements_by_tag_name('td')
i = 0
j = 0
ls = []
for col in range(len(trlist_td)):
i += 1
if i == 2:
ls.append(trlist_td[col].text)
elif i == 3:
ls.append(trlist_td[col].text)
elif i == 7:
ls.append(trlist_td[col])
elif i == 9:
j = 1
ls.append(trlist_td[col])
trlist_td = trlist_tr[row].find_elements_by_tag_name('td')
if ls[0] == cause:
if ls[1] == '安徽省/' + city + '/' + area + '/' + clinique:
if j == 0:
ls_2.append(ls[2])
elif j == 1:
ls_2.append(ls[3])
return ls_2
def search_data_down(cause, clinique, path):
"""找到病例后的对病例进行一系列的处理"""
"""下载病例"""
download_cas()
"""返回上一界面"""
web.back()
"""点击重置"""
reset()
"""点击待完成"""
pending()
"""给病例重命名"""
time.sleep(2)
try:
resetting_excel(cause, clinique, path)
except FileNotFoundError:
time.sleep(2)
resetting_excel(cause, clinique, path)
print(clinique + '--' + cause + '已下载完成!')
def tourne_page():
enter_tourne_page = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[5]/button[2]/i').click()
return ''
def search_data_on(cause, city, area, clinique, excel_time, path):
"""核心处理流程"""
time.sleep(2)
number = pagination()
"""判断待处理下标是否为0"""
if number == 0:
"""点击已完成"""
accomplish()
time.sleep(2)
number_accmplish_1 = pagination()
"""判断已完成的下标是否为0"""
if number_accmplish_1 == 0:
"""如果为0下载失败"""
download_revers.append(clinique + '--' + cause + ' 下载失败!')
else:
"""不为0判断当前界面是否只有20条数据"""
if 0 < number_accmplish_1 <= 20:
"""只有20条数据查找数据"""
accomplish_search_data = search_data(cause, city, area,
clinique, excel_time)
if len(accomplish_search_data) == 0:
"""如果没找到结束"""
download_revers.append(clinique + '--' + cause + ' 下载失败!')
reset()
else:
"""如果找到则点击"""
accomplish_search_data[0].click()
search_data_down(cause, clinique, path)
elif 20 < number_accmplish_1 <= 40:
"""多于20条数据"""
accomplish_search_data = search_data(cause, city, area,
clinique, excel_time)
"""判断第一页有没有查到"""
if len(accomplish_search_data) == 0:
"""如果没找到翻页"""
tourne_page()
accomplish_search_data = search_data(cause, city, area,
clinique, excel_time)
"""判断翻页后有没有找到"""
if len(accomplish_search_data) == 0:
"""如果没找到存入列表"""
download_revers.append(clinique + '--' + cause +
' 下载失败!')
reset()
else:
"""找到后点击"""
accomplish_search_data[0].click()
search_data_down(cause, clinique, path)
else:
download_revers.append(clinique + '--' + cause + ' 下载失败!')
reset()
else:
"""判断待处理里是否小于20条数据"""
if 0 < number <= 20:
"""如果小于进行查找"""
pending__search_data = search_data(cause, city, area, clinique,
excel_time)
"""判断有没有找到"""
if len(pending__search_data) == 0:
"""没找到"""
"""点击已完成"""
accomplish()
time.sleep(2)
number_accmplish_1 = pagination()
"""判断已完成的下标是否为0"""
if number_accmplish_1 == 0:
"""如果为0下载失败"""
download_revers.append(clinique + '--' + cause + ' 下载失败!')
else:
"""不为0判断当前界面是否只有20条数据"""
if 0 < number_accmplish_1 <= 20:
"""只有20条数据查找数据"""
accomplish_search_data = search_data(cause, city,
area, clinique, excel_time)
if len(accomplish_search_data) == 0:
"""如果没找到结束"""
download_revers.append(clinique + '--' + cause +
' 下载失败!')
reset()
else:
"""如果找到则点击"""
accomplish_search_data[0].click()
search_data_down(cause, clinique, path)
elif 20 < number_accmplish_1 <= 40:
"""多于20条数据"""
accomplish_search_data = search_data(cause, city,
area, clinique, excel_time)
"""判断第一页有没有查到"""
if len(accomplish_search_data) == 0:
"""如果没找到翻页"""
tourne_page()
accomplish_search_data = search_data(cause,
city, area, clinique, excel_time)
"""判断翻页后有没有找到"""
if len(accomplish_search_data) == 0:
"""如果没找到存入列表"""
download_revers.append(clinique + '--' +
cause + ' 下载失败!')
reset()
else:
"""找到后点击"""
accomplish_search_data[0].click()
search_data_down(cause, clinique, path)
else:
download_revers.append(clinique + '--' + cause +
' 下载失败!')
reset()
else:
"""找到了"""
pending__search_data[0].click()
search_data_down(cause, clinique, path)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def deconnexion(Chrome):
"""登陆"""
"""初始化"""
global web, actions
web = webdriver.Chrome(Chrome)
web.maximize_window()
web.implicitly_wait(10)
web.get(
'http://www.wjw-cdc.com:8003/user-center-portal/login?redirect=%2Fmain'
)
actions = ActionChains(web)
"""登录网页"""
username = web.find_element_by_xpath(
'/html/body/div/div/div[1]/div/div[2]/form/div[1]/div/div[1]/input')
password = web.find_element_by_xpath(
'/html/body/div/div/div[1]/div/div[2]/form/div[2]/div/div[2]/input')
username.send_keys('15375429564')
password.send_keys('cdc1234cdc')
enter = web.find_element_by_xpath(
'/html/body/div/div/div[1]/div/div[2]/form/div[3]/div/button')
enter.click()
return 0
def menu_lien():
"""跳转页面"""
enter_into = web.find_element_by_xpath(
'/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article'
)
enter_into.click()
return 0
def confirm_area(city, area):
"""确定区域"""
"""点击区域"""
enter_area = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[3]/div[1]/span/div/div[1]/input'
).click()
"""点击安徽省"""
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
try:
enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'
)
enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(
'el-scrollbar__view')
except:
time.sleep(1)
enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'
)
enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(
'el-scrollbar__view')
enter_AnHui = enter_AnHui_on.find_element_by_tag_name('li')
enter_AnHui_down = enter_AnHui.find_element_by_class_name('el-radio__input'
)
web.execute_script('arguments[0].click();', enter_AnHui_down)
"""选择城市"""
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
try:
enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_city_on = enter_city_on_on[1].find_element_by_class_name(
'el-cascader-menu__wrap')
except:
time.sleep(1)
enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_city_on = enter_city_on_on[1].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_city = enter_city_on.find_elements_by_tag_name('li')
for i in range(len(enter_city)):
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_city_on = enter_city_on_on[1].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_city = enter_city_on.find_elements_by_tag_name('li')
if enter_city[i].text == city:
enter_city_down = enter_city[i].find_element_by_class_name(
'el-radio__input')
web.execute_script('arguments[0].click();', enter_city_down)
break
"""选则区县"""
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
try:
enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_area_on = enter_area_on_on[2].find_element_by_class_name(
'el-cascader-menu__wrap')
except:
time.sleep(1)
enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_area_on = enter_area_on_on[2].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_area = enter_area_on.find_elements_by_tag_name('li')
for i in range(len(enter_area)):
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_area_on = enter_area_on_on[2].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_area = enter_area_on.find_elements_by_tag_name('li')
if enter_area[i].text == area:
enter_area_down = enter_area[i].find_element_by_class_name(
'el-radio__input')
web.execute_script('arguments[0].click();', enter_area_down)
break
return 0
def confirm_time_on(excel_time):
if type(excel_time) == str:
return str(excel_time)
elif type(excel_time) == datetime.datetime:
excel_time_2 = excel_time.strftime('%Y-%m-%d')
return str(excel_time_2)
def confirm_tiem(time):
"""确定时间"""
time = confirm_time_on(time)
enter_time = web.find_elements_by_class_name('el-range-input')
for i in enter_time:
i.send_keys(time)
return 0
def confirm_cause(cause):
"""选则症状"""
enter_symptom = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[3]/div[3]/span/div/div[2]/input'
).click()
enter_on = web.find_element_by_class_name('is-multiple')
enter_on_1 = enter_on.find_element_by_class_name('el-scrollbar')
enter_on_symptom = enter_on_1.find_elements_by_tag_name('li')
for i in range(len(enter_on_symptom)):
enter_on = web.find_element_by_class_name('is-multiple')
enter_on_symptom = enter_on.find_elements_by_tag_name('li')
if enter_on_symptom[i].text == cause:
enter_on_symptom[i].click()
break
return 0
def search():
"""点击搜索"""
enter_search = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[3]/button[1]').click()
return 0
def reset():
"""点击重置"""
enter_reset = web.find_element_by_xpath(
'/html/body/div/section/main/div/div[3]/button[2]').click()
return 0
def pending():
"""待处理"""
enter_pending = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[1]'
).click()
return 0
<|reserved_special_token_0|>
def download_cas():
"""下载病例"""
enter_download_cas = web.find_element_by_xpath(
'/html/body/div[1]/section/main/section/main/div[2]/ul/li[2]').click()
enter_download_cas_1 = web.find_element_by_xpath(
'/html/body/div[1]/section/main/section/main/div[2]/div/div[2]/div/div[1]/div/button[3]'
).click()
return 0
def resetting_excel(cause, clinique, path='D:\\林钟\\下载'):
"""重命名病例"""
try:
files = os.listdir(path)
src = path + '\\' + '外呼结果导出表格.xlsx'
if cause == '发热伴畏寒|寒战':
cause = "发热伴畏寒寒战'"
if cause == '畏寒|寒战':
cause = "畏寒寒战'"
dst = path + '\\' + clinique + '--' + cause + '.xlsx'
os.rename(src, dst)
except FileExistsError:
files = os.listdir(path)
src = path + '\\' + '外呼结果导出表格.xlsx'
if cause == '发热伴畏寒|寒战':
cause = "发热伴畏寒寒战'"
if cause == '畏寒|寒战':
cause = "畏寒寒战'"
dst = path + '\\' + clinique + '--' + cause + '.xlsx'
os.remove(dst)
os.rename(src, dst)
return 0
def pagination():
pagination__total = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[5]/span[1]')
a = int(pagination__total.text[2:-2])
return a
def search_data(cause, city, area, clinique, excel_time):
"""查找数据"""
ls_2 = []
trlist_table_on = web.find_element_by_class_name('is-scrolling-none')
trlist_table = trlist_table_on.find_element_by_class_name('el-table__body')
trlist_tr = trlist_table.find_elements_by_tag_name('tr')
for row in range(len(trlist_tr)):
trlist_table = web.find_element_by_class_name('el-table__body')
trlist_tr = trlist_table.find_elements_by_tag_name('tr')
trlist_td = trlist_tr[row].find_elements_by_tag_name('td')
i = 0
j = 0
ls = []
for col in range(len(trlist_td)):
i += 1
if i == 2:
ls.append(trlist_td[col].text)
elif i == 3:
ls.append(trlist_td[col].text)
elif i == 7:
ls.append(trlist_td[col])
elif i == 9:
j = 1
ls.append(trlist_td[col])
trlist_td = trlist_tr[row].find_elements_by_tag_name('td')
if ls[0] == cause:
if ls[1] == '安徽省/' + city + '/' + area + '/' + clinique:
if j == 0:
ls_2.append(ls[2])
elif j == 1:
ls_2.append(ls[3])
return ls_2
def search_data_down(cause, clinique, path):
"""找到病例后的对病例进行一系列的处理"""
"""下载病例"""
download_cas()
"""返回上一界面"""
web.back()
"""点击重置"""
reset()
"""点击待完成"""
pending()
"""给病例重命名"""
time.sleep(2)
try:
resetting_excel(cause, clinique, path)
except FileNotFoundError:
time.sleep(2)
resetting_excel(cause, clinique, path)
print(clinique + '--' + cause + '已下载完成!')
def tourne_page():
enter_tourne_page = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[5]/button[2]/i').click()
return ''
def search_data_on(cause, city, area, clinique, excel_time, path):
"""核心处理流程"""
time.sleep(2)
number = pagination()
"""判断待处理下标是否为0"""
if number == 0:
"""点击已完成"""
accomplish()
time.sleep(2)
number_accmplish_1 = pagination()
"""判断已完成的下标是否为0"""
if number_accmplish_1 == 0:
"""如果为0下载失败"""
download_revers.append(clinique + '--' + cause + ' 下载失败!')
else:
"""不为0判断当前界面是否只有20条数据"""
if 0 < number_accmplish_1 <= 20:
"""只有20条数据查找数据"""
accomplish_search_data = search_data(cause, city, area,
clinique, excel_time)
if len(accomplish_search_data) == 0:
"""如果没找到结束"""
download_revers.append(clinique + '--' + cause + ' 下载失败!')
reset()
else:
"""如果找到则点击"""
accomplish_search_data[0].click()
search_data_down(cause, clinique, path)
elif 20 < number_accmplish_1 <= 40:
"""多于20条数据"""
accomplish_search_data = search_data(cause, city, area,
clinique, excel_time)
"""判断第一页有没有查到"""
if len(accomplish_search_data) == 0:
"""如果没找到翻页"""
tourne_page()
accomplish_search_data = search_data(cause, city, area,
clinique, excel_time)
"""判断翻页后有没有找到"""
if len(accomplish_search_data) == 0:
"""如果没找到存入列表"""
download_revers.append(clinique + '--' + cause +
' 下载失败!')
reset()
else:
"""找到后点击"""
accomplish_search_data[0].click()
search_data_down(cause, clinique, path)
else:
download_revers.append(clinique + '--' + cause + ' 下载失败!')
reset()
else:
"""判断待处理里是否小于20条数据"""
if 0 < number <= 20:
"""如果小于进行查找"""
pending__search_data = search_data(cause, city, area, clinique,
excel_time)
"""判断有没有找到"""
if len(pending__search_data) == 0:
"""没找到"""
"""点击已完成"""
accomplish()
time.sleep(2)
number_accmplish_1 = pagination()
"""判断已完成的下标是否为0"""
if number_accmplish_1 == 0:
"""如果为0下载失败"""
download_revers.append(clinique + '--' + cause + ' 下载失败!')
else:
"""不为0判断当前界面是否只有20条数据"""
if 0 < number_accmplish_1 <= 20:
"""只有20条数据查找数据"""
accomplish_search_data = search_data(cause, city,
area, clinique, excel_time)
if len(accomplish_search_data) == 0:
"""如果没找到结束"""
download_revers.append(clinique + '--' + cause +
' 下载失败!')
reset()
else:
"""如果找到则点击"""
accomplish_search_data[0].click()
search_data_down(cause, clinique, path)
elif 20 < number_accmplish_1 <= 40:
"""多于20条数据"""
accomplish_search_data = search_data(cause, city,
area, clinique, excel_time)
"""判断第一页有没有查到"""
if len(accomplish_search_data) == 0:
"""如果没找到翻页"""
tourne_page()
accomplish_search_data = search_data(cause,
city, area, clinique, excel_time)
"""判断翻页后有没有找到"""
if len(accomplish_search_data) == 0:
"""如果没找到存入列表"""
download_revers.append(clinique + '--' +
cause + ' 下载失败!')
reset()
else:
"""找到后点击"""
accomplish_search_data[0].click()
search_data_down(cause, clinique, path)
else:
download_revers.append(clinique + '--' + cause +
' 下载失败!')
reset()
else:
"""找到了"""
pending__search_data[0].click()
search_data_down(cause, clinique, path)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def deconnexion(Chrome):
"""登陆"""
"""初始化"""
global web, actions
web = webdriver.Chrome(Chrome)
web.maximize_window()
web.implicitly_wait(10)
web.get(
'http://www.wjw-cdc.com:8003/user-center-portal/login?redirect=%2Fmain'
)
actions = ActionChains(web)
"""登录网页"""
username = web.find_element_by_xpath(
'/html/body/div/div/div[1]/div/div[2]/form/div[1]/div/div[1]/input')
password = web.find_element_by_xpath(
'/html/body/div/div/div[1]/div/div[2]/form/div[2]/div/div[2]/input')
username.send_keys('15375429564')
password.send_keys('cdc1234cdc')
enter = web.find_element_by_xpath(
'/html/body/div/div/div[1]/div/div[2]/form/div[3]/div/button')
enter.click()
return 0
def menu_lien():
"""跳转页面"""
enter_into = web.find_element_by_xpath(
'/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article'
)
enter_into.click()
return 0
def confirm_area(city, area):
"""确定区域"""
"""点击区域"""
enter_area = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[3]/div[1]/span/div/div[1]/input'
).click()
"""点击安徽省"""
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
try:
enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'
)
enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(
'el-scrollbar__view')
except:
time.sleep(1)
enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'
)
enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(
'el-scrollbar__view')
enter_AnHui = enter_AnHui_on.find_element_by_tag_name('li')
enter_AnHui_down = enter_AnHui.find_element_by_class_name('el-radio__input'
)
web.execute_script('arguments[0].click();', enter_AnHui_down)
"""选择城市"""
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
try:
enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_city_on = enter_city_on_on[1].find_element_by_class_name(
'el-cascader-menu__wrap')
except:
time.sleep(1)
enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_city_on = enter_city_on_on[1].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_city = enter_city_on.find_elements_by_tag_name('li')
for i in range(len(enter_city)):
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_city_on = enter_city_on_on[1].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_city = enter_city_on.find_elements_by_tag_name('li')
if enter_city[i].text == city:
enter_city_down = enter_city[i].find_element_by_class_name(
'el-radio__input')
web.execute_script('arguments[0].click();', enter_city_down)
break
"""选则区县"""
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
try:
enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_area_on = enter_area_on_on[2].find_element_by_class_name(
'el-cascader-menu__wrap')
except:
time.sleep(1)
enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_area_on = enter_area_on_on[2].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_area = enter_area_on.find_elements_by_tag_name('li')
for i in range(len(enter_area)):
enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')
enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')
enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')
enter_area_on = enter_area_on_on[2].find_element_by_class_name(
'el-cascader-menu__wrap')
enter_area = enter_area_on.find_elements_by_tag_name('li')
if enter_area[i].text == area:
enter_area_down = enter_area[i].find_element_by_class_name(
'el-radio__input')
web.execute_script('arguments[0].click();', enter_area_down)
break
return 0
def confirm_time_on(excel_time):
if type(excel_time) == str:
return str(excel_time)
elif type(excel_time) == datetime.datetime:
excel_time_2 = excel_time.strftime('%Y-%m-%d')
return str(excel_time_2)
def confirm_tiem(time):
"""确定时间"""
time = confirm_time_on(time)
enter_time = web.find_elements_by_class_name('el-range-input')
for i in enter_time:
i.send_keys(time)
return 0
def confirm_cause(cause):
"""选则症状"""
enter_symptom = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[3]/div[3]/span/div/div[2]/input'
).click()
enter_on = web.find_element_by_class_name('is-multiple')
enter_on_1 = enter_on.find_element_by_class_name('el-scrollbar')
enter_on_symptom = enter_on_1.find_elements_by_tag_name('li')
for i in range(len(enter_on_symptom)):
enter_on = web.find_element_by_class_name('is-multiple')
enter_on_symptom = enter_on.find_elements_by_tag_name('li')
if enter_on_symptom[i].text == cause:
enter_on_symptom[i].click()
break
return 0
def search():
"""点击搜索"""
enter_search = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[3]/button[1]').click()
return 0
def reset():
"""点击重置"""
enter_reset = web.find_element_by_xpath(
'/html/body/div/section/main/div/div[3]/button[2]').click()
return 0
def pending():
"""待处理"""
enter_pending = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[1]'
).click()
return 0
def accomplish():
"""已完成"""
enter__accomplish = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[3]'
).click()
return 0
def download_cas():
"""下载病例"""
enter_download_cas = web.find_element_by_xpath(
'/html/body/div[1]/section/main/section/main/div[2]/ul/li[2]').click()
enter_download_cas_1 = web.find_element_by_xpath(
'/html/body/div[1]/section/main/section/main/div[2]/div/div[2]/div/div[1]/div/button[3]'
).click()
return 0
def resetting_excel(cause, clinique, path='D:\\林钟\\下载'):
"""重命名病例"""
try:
files = os.listdir(path)
src = path + '\\' + '外呼结果导出表格.xlsx'
if cause == '发热伴畏寒|寒战':
cause = "发热伴畏寒寒战'"
if cause == '畏寒|寒战':
cause = "畏寒寒战'"
dst = path + '\\' + clinique + '--' + cause + '.xlsx'
os.rename(src, dst)
except FileExistsError:
files = os.listdir(path)
src = path + '\\' + '外呼结果导出表格.xlsx'
if cause == '发热伴畏寒|寒战':
cause = "发热伴畏寒寒战'"
if cause == '畏寒|寒战':
cause = "畏寒寒战'"
dst = path + '\\' + clinique + '--' + cause + '.xlsx'
os.remove(dst)
os.rename(src, dst)
return 0
def pagination():
pagination__total = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[5]/span[1]')
a = int(pagination__total.text[2:-2])
return a
def search_data(cause, city, area, clinique, excel_time):
"""查找数据"""
ls_2 = []
trlist_table_on = web.find_element_by_class_name('is-scrolling-none')
trlist_table = trlist_table_on.find_element_by_class_name('el-table__body')
trlist_tr = trlist_table.find_elements_by_tag_name('tr')
for row in range(len(trlist_tr)):
trlist_table = web.find_element_by_class_name('el-table__body')
trlist_tr = trlist_table.find_elements_by_tag_name('tr')
trlist_td = trlist_tr[row].find_elements_by_tag_name('td')
i = 0
j = 0
ls = []
for col in range(len(trlist_td)):
i += 1
if i == 2:
ls.append(trlist_td[col].text)
elif i == 3:
ls.append(trlist_td[col].text)
elif i == 7:
ls.append(trlist_td[col])
elif i == 9:
j = 1
ls.append(trlist_td[col])
trlist_td = trlist_tr[row].find_elements_by_tag_name('td')
if ls[0] == cause:
if ls[1] == '安徽省/' + city + '/' + area + '/' + clinique:
if j == 0:
ls_2.append(ls[2])
elif j == 1:
ls_2.append(ls[3])
return ls_2
def search_data_down(cause, clinique, path):
"""找到病例后的对病例进行一系列的处理"""
"""下载病例"""
download_cas()
"""返回上一界面"""
web.back()
"""点击重置"""
reset()
"""点击待完成"""
pending()
"""给病例重命名"""
time.sleep(2)
try:
resetting_excel(cause, clinique, path)
except FileNotFoundError:
time.sleep(2)
resetting_excel(cause, clinique, path)
print(clinique + '--' + cause + '已下载完成!')
def tourne_page():
enter_tourne_page = web.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[5]/button[2]/i').click()
return ''
def search_data_on(cause, city, area, clinique, excel_time, path):
"""核心处理流程"""
time.sleep(2)
number = pagination()
"""判断待处理下标是否为0"""
if number == 0:
"""点击已完成"""
accomplish()
time.sleep(2)
number_accmplish_1 = pagination()
"""判断已完成的下标是否为0"""
if number_accmplish_1 == 0:
"""如果为0下载失败"""
download_revers.append(clinique + '--' + cause + ' 下载失败!')
else:
"""不为0判断当前界面是否只有20条数据"""
if 0 < number_accmplish_1 <= 20:
"""只有20条数据查找数据"""
accomplish_search_data = search_data(cause, city, area,
clinique, excel_time)
if len(accomplish_search_data) == 0:
"""如果没找到结束"""
download_revers.append(clinique + '--' + cause + ' 下载失败!')
reset()
else:
"""如果找到则点击"""
accomplish_search_data[0].click()
search_data_down(cause, clinique, path)
elif 20 < number_accmplish_1 <= 40:
"""多于20条数据"""
accomplish_search_data = search_data(cause, city, area,
clinique, excel_time)
"""判断第一页有没有查到"""
if len(accomplish_search_data) == 0:
"""如果没找到翻页"""
tourne_page()
accomplish_search_data = search_data(cause, city, area,
clinique, excel_time)
"""判断翻页后有没有找到"""
if len(accomplish_search_data) == 0:
"""如果没找到存入列表"""
download_revers.append(clinique + '--' + cause +
' 下载失败!')
reset()
else:
"""找到后点击"""
accomplish_search_data[0].click()
search_data_down(cause, clinique, path)
else:
download_revers.append(clinique + '--' + cause + ' 下载失败!')
reset()
else:
"""判断待处理里是否小于20条数据"""
if 0 < number <= 20:
"""如果小于进行查找"""
pending__search_data = search_data(cause, city, area, clinique,
excel_time)
"""判断有没有找到"""
if len(pending__search_data) == 0:
"""没找到"""
"""点击已完成"""
accomplish()
time.sleep(2)
number_accmplish_1 = pagination()
"""判断已完成的下标是否为0"""
if number_accmplish_1 == 0:
"""如果为0下载失败"""
download_revers.append(clinique + '--' + cause + ' 下载失败!')
else:
"""不为0判断当前界面是否只有20条数据"""
if 0 < number_accmplish_1 <= 20:
"""只有20条数据查找数据"""
accomplish_search_data = search_data(cause, city,
area, clinique, excel_time)
if len(accomplish_search_data) == 0:
"""如果没找到结束"""
download_revers.append(clinique + '--' + cause +
' 下载失败!')
reset()
else:
"""如果找到则点击"""
accomplish_search_data[0].click()
search_data_down(cause, clinique, path)
elif 20 < number_accmplish_1 <= 40:
"""多于20条数据"""
accomplish_search_data = search_data(cause, city,
area, clinique, excel_time)
"""判断第一页有没有查到"""
if len(accomplish_search_data) == 0:
"""如果没找到翻页"""
tourne_page()
accomplish_search_data = search_data(cause,
city, area, clinique, excel_time)
"""判断翻页后有没有找到"""
if len(accomplish_search_data) == 0:
"""如果没找到存入列表"""
download_revers.append(clinique + '--' +
cause + ' 下载失败!')
reset()
else:
"""找到后点击"""
accomplish_search_data[0].click()
search_data_down(cause, clinique, path)
else:
download_revers.append(clinique + '--' + cause +
' 下载失败!')
reset()
else:
"""找到了"""
pending__search_data[0].click()
search_data_down(cause, clinique, path)
if __name__ == '__main__':
download_revers = []
"""初始化"""
url = input('请输入文件的绝对路径:')
path = 'D:\\林钟\\下载'
Chrome = 'D:\\PYthon\\webdrivers\\chromedriver.exe'
time1 = time.time()
"""登录页面"""
deconnexion(Chrome)
print('已登陆')
menu_lien()
print('已跳转')
"""读取表格"""
excel = vb.load_workbook(url)
sheet = excel['1-每日监控告警明细']
subscript = 1
for i in sheet.iter_rows(min_row=2, max_row=101, max_col=1):
for cell in i:
if cell.value in ['3', 3, '高']:
"""初始化数值"""
cause = sheet['I' + str(cell.row)].value
city = sheet['E' + str(cell.row)].value
area = sheet['F' + str(cell.row)].value
clinique = sheet['G' + str(cell.row)].value
excel_time = sheet['D' + str(cell.row)].value
"""搜索"""
try:
confirm_area(city, area)
confirm_tiem(excel_time)
confirm_cause(cause)
search()
except:
try:
web.refresh()
print('刷新成功')
confirm_area(city, area)
confirm_tiem(excel_time)
confirm_cause(cause)
search()
except Exception as e:
print('刷新失败!', format(e))
"""查找数据"""
search_data_on(cause, city, area, clinique, excel_time, path)
"""打印最终结果"""
print('')
print('<-----------下面是下载失败的----------->')
for i in download_revers:
print(i)
print('已全部下载完毕')
time2 = time.time()
print('用时:{:.2f} 秒'.format(time2 - time1))
<|reserved_special_token_1|>
from selenium import webdriver
import time
import datetime
import os
import openpyxl as vb
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.action_chains import ActionChains
def deconnexion(Chrome):
"""登陆"""
"""初始化"""
global web, actions
web = webdriver.Chrome(Chrome) #公司电脑
# web = webdriver.Chrome(r'D:\python\webdrivers\chromedriver.exe') #自己的电脑
web.maximize_window()
web.implicitly_wait(10) # 最大运行时间不超过10秒
web.get('http://www.wjw-cdc.com:8003/user-center-portal/login?redirect=%2Fmain')
actions = ActionChains(web)
"""登录网页"""
username = web.find_element_by_xpath('/html/body/div/div/div[1]/div/div[2]/form/div[1]/div/div[1]/input') # 获得账号和密码
password = web.find_element_by_xpath('/html/body/div/div/div[1]/div/div[2]/form/div[2]/div/div[2]/input')
username.send_keys('15375429564')
password.send_keys("cdc1234cdc")
enter = web.find_element_by_xpath("/html/body/div/div/div[1]/div/div[2]/form/div[3]/div/button")
enter.click()
return 0
def menu_lien():
"""跳转页面"""
enter_into = web.find_element_by_xpath(
"/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article")
enter_into.click()
return 0
def confirm_area(city, area):
"""确定区域"""
"""点击区域"""
enter_area = web.find_element_by_xpath("/html/body/div[1]/section/main/div/div[3]/div[1]/span/div/div[1]/input").click()
"""点击安徽省"""
enter_on_on = web.find_element_by_class_name("el-cascader__dropdown")
enter_on = enter_on_on.find_element_by_class_name("el-cascader-panel")
try:
enter_AnHui_on_on = enter_on.find_elements_by_class_name("el-scrollbar")
enter_AnHui_on =enter_AnHui_on_on[0].find_element_by_class_name("el-scrollbar__view")
except:
time.sleep(1)
enter_AnHui_on_on = enter_on.find_elements_by_class_name("el-scrollbar")
enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name("el-scrollbar__view")
enter_AnHui = enter_AnHui_on.find_element_by_tag_name("li")
enter_AnHui_down =enter_AnHui.find_element_by_class_name("el-radio__input")
web.execute_script("arguments[0].click();", enter_AnHui_down)
"""选择城市"""
enter_on_on = web.find_element_by_class_name("el-cascader__dropdown")
enter_on = enter_on_on.find_element_by_class_name("el-cascader-panel")
try:
enter_city_on_on =enter_on.find_elements_by_class_name("el-scrollbar")
enter_city_on = enter_city_on_on[1].find_element_by_class_name("el-cascader-menu__wrap")
except:
time.sleep(1)
enter_city_on_on = enter_on.find_elements_by_class_name("el-scrollbar")
enter_city_on = enter_city_on_on[1].find_element_by_class_name("el-cascader-menu__wrap")
enter_city = enter_city_on.find_elements_by_tag_name("li")
for i in range(len(enter_city)):
enter_on_on = web.find_element_by_class_name("el-cascader__dropdown")
enter_on = enter_on_on.find_element_by_class_name("el-cascader-panel")
enter_city_on_on = enter_on.find_elements_by_class_name("el-scrollbar")
enter_city_on = enter_city_on_on[1].find_element_by_class_name("el-cascader-menu__wrap")
enter_city = enter_city_on.find_elements_by_tag_name("li")
if enter_city[i].text ==city:
enter_city_down = enter_city[i].find_element_by_class_name("el-radio__input")
web.execute_script("arguments[0].click();", enter_city_down)
break
"""选则区县"""
enter_on_on = web.find_element_by_class_name("el-cascader__dropdown")
enter_on = enter_on_on.find_element_by_class_name("el-cascader-panel")
try:
enter_area_on_on =enter_on.find_elements_by_class_name("el-scrollbar")
enter_area_on = enter_area_on_on[2].find_element_by_class_name("el-cascader-menu__wrap")
except:
time.sleep(1)
enter_area_on_on = enter_on.find_elements_by_class_name("el-scrollbar")
enter_area_on = enter_area_on_on[2].find_element_by_class_name("el-cascader-menu__wrap")
enter_area = enter_area_on.find_elements_by_tag_name("li")
for i in range(len(enter_area)):
enter_on_on = web.find_element_by_class_name("el-cascader__dropdown")
enter_on = enter_on_on.find_element_by_class_name("el-cascader-panel")
enter_area_on_on = enter_on.find_elements_by_class_name("el-scrollbar")
enter_area_on = enter_area_on_on[2].find_element_by_class_name("el-cascader-menu__wrap")
enter_area = enter_area_on.find_elements_by_tag_name("li")
if enter_area[i].text ==area:
enter_area_down = enter_area[i].find_element_by_class_name("el-radio__input")
web.execute_script("arguments[0].click();", enter_area_down)
break
return 0
def confirm_time_on(excel_time):
if type(excel_time) == str:
return str(excel_time)
elif type(excel_time) == datetime.datetime:
excel_time_2 = excel_time.strftime('%Y-%m-%d')
return str(excel_time_2)
def confirm_tiem(time):
"""确定时间"""
time =confirm_time_on(time)
enter_time = web.find_elements_by_class_name("el-range-input")
for i in enter_time:
i.send_keys(time)
return 0
def confirm_cause(cause):
"""选则症状"""
enter_symptom = web.find_element_by_xpath("/html/body/div[1]/section/main/div/div[3]/div[3]/span/div/div[2]/input").click()
enter_on = web.find_element_by_class_name("is-multiple")
enter_on_1 =enter_on.find_element_by_class_name("el-scrollbar")
enter_on_symptom = enter_on_1.find_elements_by_tag_name("li")
for i in range(len(enter_on_symptom)):
enter_on = web.find_element_by_class_name("is-multiple")
enter_on_symptom = enter_on.find_elements_by_tag_name("li")
if enter_on_symptom[i].text == cause:
enter_on_symptom[i].click()
break
return 0
def search():
"""点击搜索"""
enter_search = web.find_element_by_xpath("/html/body/div[1]/section/main/div/div[3]/button[1]").click()
return 0
def reset():
"""点击重置"""
enter_reset = web.find_element_by_xpath("/html/body/div/section/main/div/div[3]/button[2]").click()
return 0
def pending():
"""待处理"""
enter_pending = web.find_element_by_xpath(
"/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[1]").click()
return 0
def accomplish():
"""已完成"""
enter__accomplish = web.find_element_by_xpath(
"/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[3]").click()
return 0
def download_cas():
"""下载病例"""
enter_download_cas = web.find_element_by_xpath(
"/html/body/div[1]/section/main/section/main/div[2]/ul/li[2]").click()
enter_download_cas_1 = web.find_element_by_xpath(
"/html/body/div[1]/section/main/section/main/div[2]/div/div[2]/div/div[1]/div/button[3]").click()
return 0
def resetting_excel(cause, clinique, path="D:\林钟\下载"):
"""重命名病例"""
try:
files = os.listdir(path)
src = path + "\\" + "外呼结果导出表格.xlsx"
if cause =="发热伴畏寒|寒战":
cause ="发热伴畏寒寒战'"
if cause == "畏寒|寒战":
cause = "畏寒寒战'"
dst = path + "\\" + clinique + "--" + cause + ".xlsx"
os.rename(src, dst)
except (FileExistsError):
files = os.listdir(path)
src = path + "\\" + "外呼结果导出表格.xlsx"
if cause =="发热伴畏寒|寒战":
cause ="发热伴畏寒寒战'"
if cause == "畏寒|寒战":
cause = "畏寒寒战'"
dst = path + "\\" + clinique + "--" + cause + ".xlsx"
os.remove(dst)
os.rename(src, dst)
return 0
def pagination(): #获取当前界面一共有多少条数据
pagination__total = web.find_element_by_xpath("/html/body/div[1]/section/main/div/div[5]/span[1]")
a = int(pagination__total.text[2:-2])
return a
def search_data(cause, city, area, clinique, excel_time):
"""查找数据"""
ls_2 =[] #存储最终点击的元素,如果为空则说明没找到。
trlist_table_on = web.find_element_by_class_name("is-scrolling-none")
trlist_table = trlist_table_on.find_element_by_class_name("el-table__body")
trlist_tr = trlist_table.find_elements_by_tag_name("tr")
for row in range(len(trlist_tr)):
trlist_table = web.find_element_by_class_name("el-table__body")
trlist_tr = trlist_table.find_elements_by_tag_name("tr")
trlist_td = trlist_tr[row].find_elements_by_tag_name("td")
i = 0
j = 0
ls = []
for col in range(len(trlist_td)):
i += 1
if i == 2:
ls.append(trlist_td[col].text)
elif i == 3:
ls.append(trlist_td[col].text)
elif i == 7:
ls.append(trlist_td[col])
elif i == 9:
j = 1
ls.append((trlist_td[col]))
trlist_td = trlist_tr[row].find_elements_by_tag_name("td")
if ls[0] == cause:
if ls[1] == ("安徽省/" + city + "/" + area + "/" + clinique):
if j == 0:
# ls[2].click()
ls_2.append(ls[2])
elif j == 1:
# ls[3].click()
ls_2.append(ls[3])
return ls_2
def search_data_down(cause,clinique,path):
"""找到病例后的对病例进行一系列的处理"""
"""下载病例"""
download_cas()
"""返回上一界面"""
web.back()
"""点击重置"""
reset()
"""点击待完成"""
pending()
"""给病例重命名"""
time.sleep(2)
try:
resetting_excel(cause, clinique,path)
except FileNotFoundError:
time.sleep(2)
resetting_excel(cause, clinique,path)
print(clinique + "--" + cause + "已下载完成!")
def tourne_page():
enter_tourne_page =web.find_element_by_xpath("/html/body/div[1]/section/main/div/div[5]/button[2]/i").click()
return ""
def search_data_on(cause, city, area, clinique, excel_time,path):
"""核心处理流程"""
time.sleep(2)
number = pagination()
"""判断待处理下标是否为0"""
if number == 0 :
"""点击已完成"""
accomplish()
time.sleep(2)
number_accmplish_1 = pagination()
"""判断已完成的下标是否为0"""
if number_accmplish_1 == 0:
"""如果为0下载失败"""
download_revers.append(clinique + "--" + cause + " 下载失败!")
else:
"""不为0判断当前界面是否只有20条数据"""
if 0 < number_accmplish_1 <= 20:
"""只有20条数据查找数据"""
accomplish_search_data = search_data(cause, city, area, clinique, excel_time)
if len(accomplish_search_data) == 0:
"""如果没找到结束"""
download_revers.append(clinique + "--" + cause + " 下载失败!")
reset()
else:
"""如果找到则点击"""
accomplish_search_data[0].click()
search_data_down(cause,clinique,path)
elif 20 < number_accmplish_1 <= 40:
"""多于20条数据"""
accomplish_search_data = search_data(cause, city, area, clinique, excel_time)
"""判断第一页有没有查到"""
if len(accomplish_search_data) == 0:
"""如果没找到翻页"""
tourne_page()
accomplish_search_data = search_data(cause, city, area, clinique, excel_time)
"""判断翻页后有没有找到"""
if len(accomplish_search_data) == 0:
"""如果没找到存入列表"""
download_revers.append(clinique + "--" + cause + " 下载失败!")
reset()
else:
"""找到后点击"""
accomplish_search_data[0].click()
search_data_down(cause,clinique,path)
else:
download_revers.append(clinique + "--" + cause + " 下载失败!")
reset()
else:
"""判断待处理里是否小于20条数据"""
if 0 < number <= 20:
"""如果小于进行查找"""
pending__search_data = search_data(cause, city, area, clinique, excel_time)
"""判断有没有找到"""
if len(pending__search_data) == 0:
"""没找到"""
"""点击已完成"""
accomplish()
time.sleep(2)
number_accmplish_1 = pagination()
"""判断已完成的下标是否为0"""
if number_accmplish_1 == 0:
"""如果为0下载失败"""
download_revers.append(clinique + "--" + cause + " 下载失败!")
else:
"""不为0判断当前界面是否只有20条数据"""
if 0 < number_accmplish_1 <= 20:
"""只有20条数据查找数据"""
accomplish_search_data = search_data(cause, city, area, clinique, excel_time)
if len(accomplish_search_data) == 0:
"""如果没找到结束"""
download_revers.append(clinique + "--" + cause + " 下载失败!")
reset()
else:
"""如果找到则点击"""
accomplish_search_data[0].click()
search_data_down(cause, clinique, path)
elif 20 < number_accmplish_1 <= 40:
"""多于20条数据"""
accomplish_search_data = search_data(cause, city, area, clinique, excel_time)
"""判断第一页有没有查到"""
if len(accomplish_search_data) == 0:
"""如果没找到翻页"""
tourne_page()
accomplish_search_data = search_data(cause, city, area, clinique, excel_time)
"""判断翻页后有没有找到"""
if len(accomplish_search_data) == 0:
"""如果没找到存入列表"""
download_revers.append(clinique + "--" + cause + " 下载失败!")
reset()
else:
"""找到后点击"""
accomplish_search_data[0].click()
search_data_down(cause, clinique, path)
else:
download_revers.append(clinique + "--" + cause + " 下载失败!")
reset()
else:
"""找到了"""
pending__search_data[0].click()
search_data_down(cause,clinique,path)
# elif 20< number <= 40:
# pending__search_data = search_data(cause, city, area, clinique, excel_time)
# """判断有没有找到"""
# if len(pending__search_data) == 0:
if __name__ == "__main__":
download_revers = []
"""初始化"""
url = input("请输入文件的绝对路径:") #文件路径
path = "D:\林钟\下载" # 下载路径
Chrome = r'D:\PYthon\webdrivers\chromedriver.exe' #驱动路径
time1 = time.time()
"""登录页面"""
deconnexion(Chrome)
print("已登陆")
menu_lien()
print("已跳转")
"""读取表格"""
excel = vb.load_workbook(url)
sheet = excel["1-每日监控告警明细"]
subscript = 1
for i in sheet.iter_rows(min_row=2, max_row=101, max_col=1):
for cell in i:
if cell.value in ["3", 3, "高"]:
"""初始化数值"""
cause = sheet["I" + str(cell.row)].value
city = sheet["E" + str(cell.row)].value
area = sheet["F" + str(cell.row)].value
clinique = sheet["G" + str(cell.row)].value
excel_time = sheet["D" + str(cell.row)].value
"""搜索"""
try:
confirm_area(city, area)
confirm_tiem(excel_time)
confirm_cause(cause)
search()
except:
try:
web.refresh() # 刷新方法 refresh
print('刷新成功')
confirm_area(city, area)
confirm_tiem(excel_time)
confirm_cause(cause)
search()
except Exception as e:
print("刷新失败!", format(e))
"""查找数据"""
search_data_on(cause, city, area, clinique, excel_time, path)
"""打印最终结果"""
print("")
print("<-----------下面是下载失败的----------->")
for i in download_revers:
print(i)
print("已全部下载完毕")
time2 = time.time()
print("用时:{:.2f} 秒".format(time2-time1))
|
flexible
|
{
"blob_id": "d2c31d9c3cc66b43966cfd852582539d4e4bea17",
"index": 321,
"step-1": "<mask token>\n\n\ndef deconnexion(Chrome):\n \"\"\"登陆\"\"\"\n \"\"\"初始化\"\"\"\n global web, actions\n web = webdriver.Chrome(Chrome)\n web.maximize_window()\n web.implicitly_wait(10)\n web.get(\n 'http://www.wjw-cdc.com:8003/user-center-portal/login?redirect=%2Fmain'\n )\n actions = ActionChains(web)\n \"\"\"登录网页\"\"\"\n username = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[1]/div/div[1]/input')\n password = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[2]/div/div[2]/input')\n username.send_keys('15375429564')\n password.send_keys('cdc1234cdc')\n enter = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[3]/div/button')\n enter.click()\n return 0\n\n\ndef menu_lien():\n \"\"\"跳转页面\"\"\"\n enter_into = web.find_element_by_xpath(\n '/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article'\n )\n enter_into.click()\n return 0\n\n\ndef confirm_area(city, area):\n \"\"\"确定区域\"\"\"\n \"\"\"点击区域\"\"\"\n enter_area = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/div[1]/span/div/div[1]/input'\n ).click()\n \"\"\"点击安徽省\"\"\"\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n try:\n enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'\n )\n enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(\n 'el-scrollbar__view')\n except:\n time.sleep(1)\n enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'\n )\n enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(\n 'el-scrollbar__view')\n enter_AnHui = enter_AnHui_on.find_element_by_tag_name('li')\n enter_AnHui_down = enter_AnHui.find_element_by_class_name('el-radio__input'\n )\n web.execute_script('arguments[0].click();', enter_AnHui_down)\n \"\"\"选择城市\"\"\"\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n try:\n enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n except:\n time.sleep(1)\n enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_city = enter_city_on.find_elements_by_tag_name('li')\n for i in range(len(enter_city)):\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_city = enter_city_on.find_elements_by_tag_name('li')\n if enter_city[i].text == city:\n enter_city_down = enter_city[i].find_element_by_class_name(\n 'el-radio__input')\n web.execute_script('arguments[0].click();', enter_city_down)\n break\n \"\"\"选则区县\"\"\"\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n try:\n enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n except:\n time.sleep(1)\n enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_area = enter_area_on.find_elements_by_tag_name('li')\n for i in range(len(enter_area)):\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_area = enter_area_on.find_elements_by_tag_name('li')\n if enter_area[i].text == area:\n enter_area_down = enter_area[i].find_element_by_class_name(\n 'el-radio__input')\n web.execute_script('arguments[0].click();', enter_area_down)\n break\n return 0\n\n\ndef confirm_time_on(excel_time):\n if type(excel_time) == str:\n return str(excel_time)\n elif type(excel_time) == datetime.datetime:\n excel_time_2 = excel_time.strftime('%Y-%m-%d')\n return str(excel_time_2)\n\n\ndef confirm_tiem(time):\n \"\"\"确定时间\"\"\"\n time = confirm_time_on(time)\n enter_time = web.find_elements_by_class_name('el-range-input')\n for i in enter_time:\n i.send_keys(time)\n return 0\n\n\n<mask token>\n\n\ndef search():\n \"\"\"点击搜索\"\"\"\n enter_search = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/button[1]').click()\n return 0\n\n\ndef reset():\n \"\"\"点击重置\"\"\"\n enter_reset = web.find_element_by_xpath(\n '/html/body/div/section/main/div/div[3]/button[2]').click()\n return 0\n\n\n<mask token>\n\n\ndef resetting_excel(cause, clinique, path='D:\\\\林钟\\\\下载'):\n \"\"\"重命名病例\"\"\"\n try:\n files = os.listdir(path)\n src = path + '\\\\' + '外呼结果导出表格.xlsx'\n if cause == '发热伴畏寒|寒战':\n cause = \"发热伴畏寒寒战'\"\n if cause == '畏寒|寒战':\n cause = \"畏寒寒战'\"\n dst = path + '\\\\' + clinique + '--' + cause + '.xlsx'\n os.rename(src, dst)\n except FileExistsError:\n files = os.listdir(path)\n src = path + '\\\\' + '外呼结果导出表格.xlsx'\n if cause == '发热伴畏寒|寒战':\n cause = \"发热伴畏寒寒战'\"\n if cause == '畏寒|寒战':\n cause = \"畏寒寒战'\"\n dst = path + '\\\\' + clinique + '--' + cause + '.xlsx'\n os.remove(dst)\n os.rename(src, dst)\n return 0\n\n\ndef pagination():\n pagination__total = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[5]/span[1]')\n a = int(pagination__total.text[2:-2])\n return a\n\n\n<mask token>\n\n\ndef tourne_page():\n enter_tourne_page = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[5]/button[2]/i').click()\n return ''\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef deconnexion(Chrome):\n \"\"\"登陆\"\"\"\n \"\"\"初始化\"\"\"\n global web, actions\n web = webdriver.Chrome(Chrome)\n web.maximize_window()\n web.implicitly_wait(10)\n web.get(\n 'http://www.wjw-cdc.com:8003/user-center-portal/login?redirect=%2Fmain'\n )\n actions = ActionChains(web)\n \"\"\"登录网页\"\"\"\n username = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[1]/div/div[1]/input')\n password = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[2]/div/div[2]/input')\n username.send_keys('15375429564')\n password.send_keys('cdc1234cdc')\n enter = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[3]/div/button')\n enter.click()\n return 0\n\n\ndef menu_lien():\n \"\"\"跳转页面\"\"\"\n enter_into = web.find_element_by_xpath(\n '/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article'\n )\n enter_into.click()\n return 0\n\n\ndef confirm_area(city, area):\n \"\"\"确定区域\"\"\"\n \"\"\"点击区域\"\"\"\n enter_area = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/div[1]/span/div/div[1]/input'\n ).click()\n \"\"\"点击安徽省\"\"\"\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n try:\n enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'\n )\n enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(\n 'el-scrollbar__view')\n except:\n time.sleep(1)\n enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'\n )\n enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(\n 'el-scrollbar__view')\n enter_AnHui = enter_AnHui_on.find_element_by_tag_name('li')\n enter_AnHui_down = enter_AnHui.find_element_by_class_name('el-radio__input'\n )\n web.execute_script('arguments[0].click();', enter_AnHui_down)\n \"\"\"选择城市\"\"\"\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n try:\n enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n except:\n time.sleep(1)\n enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_city = enter_city_on.find_elements_by_tag_name('li')\n for i in range(len(enter_city)):\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_city = enter_city_on.find_elements_by_tag_name('li')\n if enter_city[i].text == city:\n enter_city_down = enter_city[i].find_element_by_class_name(\n 'el-radio__input')\n web.execute_script('arguments[0].click();', enter_city_down)\n break\n \"\"\"选则区县\"\"\"\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n try:\n enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n except:\n time.sleep(1)\n enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_area = enter_area_on.find_elements_by_tag_name('li')\n for i in range(len(enter_area)):\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_area = enter_area_on.find_elements_by_tag_name('li')\n if enter_area[i].text == area:\n enter_area_down = enter_area[i].find_element_by_class_name(\n 'el-radio__input')\n web.execute_script('arguments[0].click();', enter_area_down)\n break\n return 0\n\n\ndef confirm_time_on(excel_time):\n if type(excel_time) == str:\n return str(excel_time)\n elif type(excel_time) == datetime.datetime:\n excel_time_2 = excel_time.strftime('%Y-%m-%d')\n return str(excel_time_2)\n\n\ndef confirm_tiem(time):\n \"\"\"确定时间\"\"\"\n time = confirm_time_on(time)\n enter_time = web.find_elements_by_class_name('el-range-input')\n for i in enter_time:\n i.send_keys(time)\n return 0\n\n\n<mask token>\n\n\ndef search():\n \"\"\"点击搜索\"\"\"\n enter_search = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/button[1]').click()\n return 0\n\n\ndef reset():\n \"\"\"点击重置\"\"\"\n enter_reset = web.find_element_by_xpath(\n '/html/body/div/section/main/div/div[3]/button[2]').click()\n return 0\n\n\n<mask token>\n\n\ndef download_cas():\n \"\"\"下载病例\"\"\"\n enter_download_cas = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/ul/li[2]').click()\n enter_download_cas_1 = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/div/div[2]/div/div[1]/div/button[3]'\n ).click()\n return 0\n\n\ndef resetting_excel(cause, clinique, path='D:\\\\林钟\\\\下载'):\n \"\"\"重命名病例\"\"\"\n try:\n files = os.listdir(path)\n src = path + '\\\\' + '外呼结果导出表格.xlsx'\n if cause == '发热伴畏寒|寒战':\n cause = \"发热伴畏寒寒战'\"\n if cause == '畏寒|寒战':\n cause = \"畏寒寒战'\"\n dst = path + '\\\\' + clinique + '--' + cause + '.xlsx'\n os.rename(src, dst)\n except FileExistsError:\n files = os.listdir(path)\n src = path + '\\\\' + '外呼结果导出表格.xlsx'\n if cause == '发热伴畏寒|寒战':\n cause = \"发热伴畏寒寒战'\"\n if cause == '畏寒|寒战':\n cause = \"畏寒寒战'\"\n dst = path + '\\\\' + clinique + '--' + cause + '.xlsx'\n os.remove(dst)\n os.rename(src, dst)\n return 0\n\n\ndef pagination():\n pagination__total = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[5]/span[1]')\n a = int(pagination__total.text[2:-2])\n return a\n\n\ndef search_data(cause, city, area, clinique, excel_time):\n \"\"\"查找数据\"\"\"\n ls_2 = []\n trlist_table_on = web.find_element_by_class_name('is-scrolling-none')\n trlist_table = trlist_table_on.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n for row in range(len(trlist_tr)):\n trlist_table = web.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n i = 0\n j = 0\n ls = []\n for col in range(len(trlist_td)):\n i += 1\n if i == 2:\n ls.append(trlist_td[col].text)\n elif i == 3:\n ls.append(trlist_td[col].text)\n elif i == 7:\n ls.append(trlist_td[col])\n elif i == 9:\n j = 1\n ls.append(trlist_td[col])\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n if ls[0] == cause:\n if ls[1] == '安徽省/' + city + '/' + area + '/' + clinique:\n if j == 0:\n ls_2.append(ls[2])\n elif j == 1:\n ls_2.append(ls[3])\n return ls_2\n\n\ndef search_data_down(cause, clinique, path):\n \"\"\"找到病例后的对病例进行一系列的处理\"\"\"\n \"\"\"下载病例\"\"\"\n download_cas()\n \"\"\"返回上一界面\"\"\"\n web.back()\n \"\"\"点击重置\"\"\"\n reset()\n \"\"\"点击待完成\"\"\"\n pending()\n \"\"\"给病例重命名\"\"\"\n time.sleep(2)\n try:\n resetting_excel(cause, clinique, path)\n except FileNotFoundError:\n time.sleep(2)\n resetting_excel(cause, clinique, path)\n print(clinique + '--' + cause + '已下载完成!')\n\n\ndef tourne_page():\n enter_tourne_page = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[5]/button[2]/i').click()\n return ''\n\n\ndef search_data_on(cause, city, area, clinique, excel_time, path):\n \"\"\"核心处理流程\"\"\"\n time.sleep(2)\n number = pagination()\n \"\"\"判断待处理下标是否为0\"\"\"\n if number == 0:\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"判断待处理里是否小于20条数据\"\"\"\n if 0 < number <= 20:\n \"\"\"如果小于进行查找\"\"\"\n pending__search_data = search_data(cause, city, area, clinique,\n excel_time)\n \"\"\"判断有没有找到\"\"\"\n if len(pending__search_data) == 0:\n \"\"\"没找到\"\"\"\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause,\n city, area, clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' +\n cause + ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到了\"\"\"\n pending__search_data[0].click()\n search_data_down(cause, clinique, path)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef deconnexion(Chrome):\n \"\"\"登陆\"\"\"\n \"\"\"初始化\"\"\"\n global web, actions\n web = webdriver.Chrome(Chrome)\n web.maximize_window()\n web.implicitly_wait(10)\n web.get(\n 'http://www.wjw-cdc.com:8003/user-center-portal/login?redirect=%2Fmain'\n )\n actions = ActionChains(web)\n \"\"\"登录网页\"\"\"\n username = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[1]/div/div[1]/input')\n password = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[2]/div/div[2]/input')\n username.send_keys('15375429564')\n password.send_keys('cdc1234cdc')\n enter = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[3]/div/button')\n enter.click()\n return 0\n\n\ndef menu_lien():\n \"\"\"跳转页面\"\"\"\n enter_into = web.find_element_by_xpath(\n '/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article'\n )\n enter_into.click()\n return 0\n\n\ndef confirm_area(city, area):\n \"\"\"确定区域\"\"\"\n \"\"\"点击区域\"\"\"\n enter_area = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/div[1]/span/div/div[1]/input'\n ).click()\n \"\"\"点击安徽省\"\"\"\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n try:\n enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'\n )\n enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(\n 'el-scrollbar__view')\n except:\n time.sleep(1)\n enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'\n )\n enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(\n 'el-scrollbar__view')\n enter_AnHui = enter_AnHui_on.find_element_by_tag_name('li')\n enter_AnHui_down = enter_AnHui.find_element_by_class_name('el-radio__input'\n )\n web.execute_script('arguments[0].click();', enter_AnHui_down)\n \"\"\"选择城市\"\"\"\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n try:\n enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n except:\n time.sleep(1)\n enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_city = enter_city_on.find_elements_by_tag_name('li')\n for i in range(len(enter_city)):\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_city = enter_city_on.find_elements_by_tag_name('li')\n if enter_city[i].text == city:\n enter_city_down = enter_city[i].find_element_by_class_name(\n 'el-radio__input')\n web.execute_script('arguments[0].click();', enter_city_down)\n break\n \"\"\"选则区县\"\"\"\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n try:\n enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n except:\n time.sleep(1)\n enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_area = enter_area_on.find_elements_by_tag_name('li')\n for i in range(len(enter_area)):\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_area = enter_area_on.find_elements_by_tag_name('li')\n if enter_area[i].text == area:\n enter_area_down = enter_area[i].find_element_by_class_name(\n 'el-radio__input')\n web.execute_script('arguments[0].click();', enter_area_down)\n break\n return 0\n\n\ndef confirm_time_on(excel_time):\n if type(excel_time) == str:\n return str(excel_time)\n elif type(excel_time) == datetime.datetime:\n excel_time_2 = excel_time.strftime('%Y-%m-%d')\n return str(excel_time_2)\n\n\ndef confirm_tiem(time):\n \"\"\"确定时间\"\"\"\n time = confirm_time_on(time)\n enter_time = web.find_elements_by_class_name('el-range-input')\n for i in enter_time:\n i.send_keys(time)\n return 0\n\n\ndef confirm_cause(cause):\n \"\"\"选则症状\"\"\"\n enter_symptom = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/div[3]/span/div/div[2]/input'\n ).click()\n enter_on = web.find_element_by_class_name('is-multiple')\n enter_on_1 = enter_on.find_element_by_class_name('el-scrollbar')\n enter_on_symptom = enter_on_1.find_elements_by_tag_name('li')\n for i in range(len(enter_on_symptom)):\n enter_on = web.find_element_by_class_name('is-multiple')\n enter_on_symptom = enter_on.find_elements_by_tag_name('li')\n if enter_on_symptom[i].text == cause:\n enter_on_symptom[i].click()\n break\n return 0\n\n\ndef search():\n \"\"\"点击搜索\"\"\"\n enter_search = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/button[1]').click()\n return 0\n\n\ndef reset():\n \"\"\"点击重置\"\"\"\n enter_reset = web.find_element_by_xpath(\n '/html/body/div/section/main/div/div[3]/button[2]').click()\n return 0\n\n\ndef pending():\n \"\"\"待处理\"\"\"\n enter_pending = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[1]'\n ).click()\n return 0\n\n\n<mask token>\n\n\ndef download_cas():\n \"\"\"下载病例\"\"\"\n enter_download_cas = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/ul/li[2]').click()\n enter_download_cas_1 = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/div/div[2]/div/div[1]/div/button[3]'\n ).click()\n return 0\n\n\ndef resetting_excel(cause, clinique, path='D:\\\\林钟\\\\下载'):\n \"\"\"重命名病例\"\"\"\n try:\n files = os.listdir(path)\n src = path + '\\\\' + '外呼结果导出表格.xlsx'\n if cause == '发热伴畏寒|寒战':\n cause = \"发热伴畏寒寒战'\"\n if cause == '畏寒|寒战':\n cause = \"畏寒寒战'\"\n dst = path + '\\\\' + clinique + '--' + cause + '.xlsx'\n os.rename(src, dst)\n except FileExistsError:\n files = os.listdir(path)\n src = path + '\\\\' + '外呼结果导出表格.xlsx'\n if cause == '发热伴畏寒|寒战':\n cause = \"发热伴畏寒寒战'\"\n if cause == '畏寒|寒战':\n cause = \"畏寒寒战'\"\n dst = path + '\\\\' + clinique + '--' + cause + '.xlsx'\n os.remove(dst)\n os.rename(src, dst)\n return 0\n\n\ndef pagination():\n pagination__total = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[5]/span[1]')\n a = int(pagination__total.text[2:-2])\n return a\n\n\ndef search_data(cause, city, area, clinique, excel_time):\n \"\"\"查找数据\"\"\"\n ls_2 = []\n trlist_table_on = web.find_element_by_class_name('is-scrolling-none')\n trlist_table = trlist_table_on.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n for row in range(len(trlist_tr)):\n trlist_table = web.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n i = 0\n j = 0\n ls = []\n for col in range(len(trlist_td)):\n i += 1\n if i == 2:\n ls.append(trlist_td[col].text)\n elif i == 3:\n ls.append(trlist_td[col].text)\n elif i == 7:\n ls.append(trlist_td[col])\n elif i == 9:\n j = 1\n ls.append(trlist_td[col])\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n if ls[0] == cause:\n if ls[1] == '安徽省/' + city + '/' + area + '/' + clinique:\n if j == 0:\n ls_2.append(ls[2])\n elif j == 1:\n ls_2.append(ls[3])\n return ls_2\n\n\ndef search_data_down(cause, clinique, path):\n \"\"\"找到病例后的对病例进行一系列的处理\"\"\"\n \"\"\"下载病例\"\"\"\n download_cas()\n \"\"\"返回上一界面\"\"\"\n web.back()\n \"\"\"点击重置\"\"\"\n reset()\n \"\"\"点击待完成\"\"\"\n pending()\n \"\"\"给病例重命名\"\"\"\n time.sleep(2)\n try:\n resetting_excel(cause, clinique, path)\n except FileNotFoundError:\n time.sleep(2)\n resetting_excel(cause, clinique, path)\n print(clinique + '--' + cause + '已下载完成!')\n\n\ndef tourne_page():\n enter_tourne_page = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[5]/button[2]/i').click()\n return ''\n\n\ndef search_data_on(cause, city, area, clinique, excel_time, path):\n \"\"\"核心处理流程\"\"\"\n time.sleep(2)\n number = pagination()\n \"\"\"判断待处理下标是否为0\"\"\"\n if number == 0:\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"判断待处理里是否小于20条数据\"\"\"\n if 0 < number <= 20:\n \"\"\"如果小于进行查找\"\"\"\n pending__search_data = search_data(cause, city, area, clinique,\n excel_time)\n \"\"\"判断有没有找到\"\"\"\n if len(pending__search_data) == 0:\n \"\"\"没找到\"\"\"\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause,\n city, area, clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' +\n cause + ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到了\"\"\"\n pending__search_data[0].click()\n search_data_down(cause, clinique, path)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef deconnexion(Chrome):\n \"\"\"登陆\"\"\"\n \"\"\"初始化\"\"\"\n global web, actions\n web = webdriver.Chrome(Chrome)\n web.maximize_window()\n web.implicitly_wait(10)\n web.get(\n 'http://www.wjw-cdc.com:8003/user-center-portal/login?redirect=%2Fmain'\n )\n actions = ActionChains(web)\n \"\"\"登录网页\"\"\"\n username = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[1]/div/div[1]/input')\n password = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[2]/div/div[2]/input')\n username.send_keys('15375429564')\n password.send_keys('cdc1234cdc')\n enter = web.find_element_by_xpath(\n '/html/body/div/div/div[1]/div/div[2]/form/div[3]/div/button')\n enter.click()\n return 0\n\n\ndef menu_lien():\n \"\"\"跳转页面\"\"\"\n enter_into = web.find_element_by_xpath(\n '/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article'\n )\n enter_into.click()\n return 0\n\n\ndef confirm_area(city, area):\n \"\"\"确定区域\"\"\"\n \"\"\"点击区域\"\"\"\n enter_area = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/div[1]/span/div/div[1]/input'\n ).click()\n \"\"\"点击安徽省\"\"\"\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n try:\n enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'\n )\n enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(\n 'el-scrollbar__view')\n except:\n time.sleep(1)\n enter_AnHui_on_on = enter_on.find_elements_by_class_name('el-scrollbar'\n )\n enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(\n 'el-scrollbar__view')\n enter_AnHui = enter_AnHui_on.find_element_by_tag_name('li')\n enter_AnHui_down = enter_AnHui.find_element_by_class_name('el-radio__input'\n )\n web.execute_script('arguments[0].click();', enter_AnHui_down)\n \"\"\"选择城市\"\"\"\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n try:\n enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n except:\n time.sleep(1)\n enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_city = enter_city_on.find_elements_by_tag_name('li')\n for i in range(len(enter_city)):\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n enter_city_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_city = enter_city_on.find_elements_by_tag_name('li')\n if enter_city[i].text == city:\n enter_city_down = enter_city[i].find_element_by_class_name(\n 'el-radio__input')\n web.execute_script('arguments[0].click();', enter_city_down)\n break\n \"\"\"选则区县\"\"\"\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n try:\n enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n except:\n time.sleep(1)\n enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_area = enter_area_on.find_elements_by_tag_name('li')\n for i in range(len(enter_area)):\n enter_on_on = web.find_element_by_class_name('el-cascader__dropdown')\n enter_on = enter_on_on.find_element_by_class_name('el-cascader-panel')\n enter_area_on_on = enter_on.find_elements_by_class_name('el-scrollbar')\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\n 'el-cascader-menu__wrap')\n enter_area = enter_area_on.find_elements_by_tag_name('li')\n if enter_area[i].text == area:\n enter_area_down = enter_area[i].find_element_by_class_name(\n 'el-radio__input')\n web.execute_script('arguments[0].click();', enter_area_down)\n break\n return 0\n\n\ndef confirm_time_on(excel_time):\n if type(excel_time) == str:\n return str(excel_time)\n elif type(excel_time) == datetime.datetime:\n excel_time_2 = excel_time.strftime('%Y-%m-%d')\n return str(excel_time_2)\n\n\ndef confirm_tiem(time):\n \"\"\"确定时间\"\"\"\n time = confirm_time_on(time)\n enter_time = web.find_elements_by_class_name('el-range-input')\n for i in enter_time:\n i.send_keys(time)\n return 0\n\n\ndef confirm_cause(cause):\n \"\"\"选则症状\"\"\"\n enter_symptom = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/div[3]/span/div/div[2]/input'\n ).click()\n enter_on = web.find_element_by_class_name('is-multiple')\n enter_on_1 = enter_on.find_element_by_class_name('el-scrollbar')\n enter_on_symptom = enter_on_1.find_elements_by_tag_name('li')\n for i in range(len(enter_on_symptom)):\n enter_on = web.find_element_by_class_name('is-multiple')\n enter_on_symptom = enter_on.find_elements_by_tag_name('li')\n if enter_on_symptom[i].text == cause:\n enter_on_symptom[i].click()\n break\n return 0\n\n\ndef search():\n \"\"\"点击搜索\"\"\"\n enter_search = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[3]/button[1]').click()\n return 0\n\n\ndef reset():\n \"\"\"点击重置\"\"\"\n enter_reset = web.find_element_by_xpath(\n '/html/body/div/section/main/div/div[3]/button[2]').click()\n return 0\n\n\ndef pending():\n \"\"\"待处理\"\"\"\n enter_pending = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[1]'\n ).click()\n return 0\n\n\ndef accomplish():\n \"\"\"已完成\"\"\"\n enter__accomplish = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[3]'\n ).click()\n return 0\n\n\ndef download_cas():\n \"\"\"下载病例\"\"\"\n enter_download_cas = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/ul/li[2]').click()\n enter_download_cas_1 = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/section/main/div[2]/div/div[2]/div/div[1]/div/button[3]'\n ).click()\n return 0\n\n\ndef resetting_excel(cause, clinique, path='D:\\\\林钟\\\\下载'):\n \"\"\"重命名病例\"\"\"\n try:\n files = os.listdir(path)\n src = path + '\\\\' + '外呼结果导出表格.xlsx'\n if cause == '发热伴畏寒|寒战':\n cause = \"发热伴畏寒寒战'\"\n if cause == '畏寒|寒战':\n cause = \"畏寒寒战'\"\n dst = path + '\\\\' + clinique + '--' + cause + '.xlsx'\n os.rename(src, dst)\n except FileExistsError:\n files = os.listdir(path)\n src = path + '\\\\' + '外呼结果导出表格.xlsx'\n if cause == '发热伴畏寒|寒战':\n cause = \"发热伴畏寒寒战'\"\n if cause == '畏寒|寒战':\n cause = \"畏寒寒战'\"\n dst = path + '\\\\' + clinique + '--' + cause + '.xlsx'\n os.remove(dst)\n os.rename(src, dst)\n return 0\n\n\ndef pagination():\n pagination__total = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[5]/span[1]')\n a = int(pagination__total.text[2:-2])\n return a\n\n\ndef search_data(cause, city, area, clinique, excel_time):\n \"\"\"查找数据\"\"\"\n ls_2 = []\n trlist_table_on = web.find_element_by_class_name('is-scrolling-none')\n trlist_table = trlist_table_on.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n for row in range(len(trlist_tr)):\n trlist_table = web.find_element_by_class_name('el-table__body')\n trlist_tr = trlist_table.find_elements_by_tag_name('tr')\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n i = 0\n j = 0\n ls = []\n for col in range(len(trlist_td)):\n i += 1\n if i == 2:\n ls.append(trlist_td[col].text)\n elif i == 3:\n ls.append(trlist_td[col].text)\n elif i == 7:\n ls.append(trlist_td[col])\n elif i == 9:\n j = 1\n ls.append(trlist_td[col])\n trlist_td = trlist_tr[row].find_elements_by_tag_name('td')\n if ls[0] == cause:\n if ls[1] == '安徽省/' + city + '/' + area + '/' + clinique:\n if j == 0:\n ls_2.append(ls[2])\n elif j == 1:\n ls_2.append(ls[3])\n return ls_2\n\n\ndef search_data_down(cause, clinique, path):\n \"\"\"找到病例后的对病例进行一系列的处理\"\"\"\n \"\"\"下载病例\"\"\"\n download_cas()\n \"\"\"返回上一界面\"\"\"\n web.back()\n \"\"\"点击重置\"\"\"\n reset()\n \"\"\"点击待完成\"\"\"\n pending()\n \"\"\"给病例重命名\"\"\"\n time.sleep(2)\n try:\n resetting_excel(cause, clinique, path)\n except FileNotFoundError:\n time.sleep(2)\n resetting_excel(cause, clinique, path)\n print(clinique + '--' + cause + '已下载完成!')\n\n\ndef tourne_page():\n enter_tourne_page = web.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[5]/button[2]/i').click()\n return ''\n\n\ndef search_data_on(cause, city, area, clinique, excel_time, path):\n \"\"\"核心处理流程\"\"\"\n time.sleep(2)\n number = pagination()\n \"\"\"判断待处理下标是否为0\"\"\"\n if number == 0:\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause, city, area,\n clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n reset()\n else:\n \"\"\"判断待处理里是否小于20条数据\"\"\"\n if 0 < number <= 20:\n \"\"\"如果小于进行查找\"\"\"\n pending__search_data = search_data(cause, city, area, clinique,\n excel_time)\n \"\"\"判断有没有找到\"\"\"\n if len(pending__search_data) == 0:\n \"\"\"没找到\"\"\"\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + '--' + cause + ' 下载失败!')\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city,\n area, clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause,\n city, area, clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + '--' +\n cause + ' 下载失败!')\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + '--' + cause +\n ' 下载失败!')\n reset()\n else:\n \"\"\"找到了\"\"\"\n pending__search_data[0].click()\n search_data_down(cause, clinique, path)\n\n\nif __name__ == '__main__':\n download_revers = []\n \"\"\"初始化\"\"\"\n url = input('请输入文件的绝对路径:')\n path = 'D:\\\\林钟\\\\下载'\n Chrome = 'D:\\\\PYthon\\\\webdrivers\\\\chromedriver.exe'\n time1 = time.time()\n \"\"\"登录页面\"\"\"\n deconnexion(Chrome)\n print('已登陆')\n menu_lien()\n print('已跳转')\n \"\"\"读取表格\"\"\"\n excel = vb.load_workbook(url)\n sheet = excel['1-每日监控告警明细']\n subscript = 1\n for i in sheet.iter_rows(min_row=2, max_row=101, max_col=1):\n for cell in i:\n if cell.value in ['3', 3, '高']:\n \"\"\"初始化数值\"\"\"\n cause = sheet['I' + str(cell.row)].value\n city = sheet['E' + str(cell.row)].value\n area = sheet['F' + str(cell.row)].value\n clinique = sheet['G' + str(cell.row)].value\n excel_time = sheet['D' + str(cell.row)].value\n \"\"\"搜索\"\"\"\n try:\n confirm_area(city, area)\n confirm_tiem(excel_time)\n confirm_cause(cause)\n search()\n except:\n try:\n web.refresh()\n print('刷新成功')\n confirm_area(city, area)\n confirm_tiem(excel_time)\n confirm_cause(cause)\n search()\n except Exception as e:\n print('刷新失败!', format(e))\n \"\"\"查找数据\"\"\"\n search_data_on(cause, city, area, clinique, excel_time, path)\n \"\"\"打印最终结果\"\"\"\n print('')\n print('<-----------下面是下载失败的----------->')\n for i in download_revers:\n print(i)\n print('已全部下载完毕')\n time2 = time.time()\n print('用时:{:.2f} 秒'.format(time2 - time1))\n",
"step-5": "from selenium import webdriver\nimport time\nimport datetime\nimport os\nimport openpyxl as vb\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.common.action_chains import ActionChains\n\ndef deconnexion(Chrome):\n \"\"\"登陆\"\"\"\n \"\"\"初始化\"\"\"\n global web, actions\n web = webdriver.Chrome(Chrome) #公司电脑\n # web = webdriver.Chrome(r'D:\\python\\webdrivers\\chromedriver.exe') #自己的电脑\n web.maximize_window()\n web.implicitly_wait(10) # 最大运行时间不超过10秒\n web.get('http://www.wjw-cdc.com:8003/user-center-portal/login?redirect=%2Fmain')\n actions = ActionChains(web)\n\n \"\"\"登录网页\"\"\"\n username = web.find_element_by_xpath('/html/body/div/div/div[1]/div/div[2]/form/div[1]/div/div[1]/input') # 获得账号和密码\n password = web.find_element_by_xpath('/html/body/div/div/div[1]/div/div[2]/form/div[2]/div/div[2]/input')\n username.send_keys('15375429564')\n password.send_keys(\"cdc1234cdc\")\n enter = web.find_element_by_xpath(\"/html/body/div/div/div[1]/div/div[2]/form/div[3]/div/button\")\n enter.click()\n return 0\ndef menu_lien():\n \"\"\"跳转页面\"\"\"\n enter_into = web.find_element_by_xpath(\n \"/html/body/div[1]/div/div[2]/section/div/div[1]/ul/li[1]/ul/li[2]/span/div[2]/section/article\")\n enter_into.click()\n return 0\ndef confirm_area(city, area):\n \"\"\"确定区域\"\"\"\n \"\"\"点击区域\"\"\"\n enter_area = web.find_element_by_xpath(\"/html/body/div[1]/section/main/div/div[3]/div[1]/span/div/div[1]/input\").click()\n \"\"\"点击安徽省\"\"\"\n enter_on_on = web.find_element_by_class_name(\"el-cascader__dropdown\")\n enter_on = enter_on_on.find_element_by_class_name(\"el-cascader-panel\")\n try:\n enter_AnHui_on_on = enter_on.find_elements_by_class_name(\"el-scrollbar\")\n enter_AnHui_on =enter_AnHui_on_on[0].find_element_by_class_name(\"el-scrollbar__view\")\n except:\n time.sleep(1)\n enter_AnHui_on_on = enter_on.find_elements_by_class_name(\"el-scrollbar\")\n enter_AnHui_on = enter_AnHui_on_on[0].find_element_by_class_name(\"el-scrollbar__view\")\n enter_AnHui = enter_AnHui_on.find_element_by_tag_name(\"li\")\n enter_AnHui_down =enter_AnHui.find_element_by_class_name(\"el-radio__input\")\n web.execute_script(\"arguments[0].click();\", enter_AnHui_down)\n \"\"\"选择城市\"\"\"\n enter_on_on = web.find_element_by_class_name(\"el-cascader__dropdown\")\n enter_on = enter_on_on.find_element_by_class_name(\"el-cascader-panel\")\n try:\n enter_city_on_on =enter_on.find_elements_by_class_name(\"el-scrollbar\")\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\"el-cascader-menu__wrap\")\n except:\n time.sleep(1)\n enter_city_on_on = enter_on.find_elements_by_class_name(\"el-scrollbar\")\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\"el-cascader-menu__wrap\")\n enter_city = enter_city_on.find_elements_by_tag_name(\"li\")\n for i in range(len(enter_city)):\n enter_on_on = web.find_element_by_class_name(\"el-cascader__dropdown\")\n enter_on = enter_on_on.find_element_by_class_name(\"el-cascader-panel\")\n enter_city_on_on = enter_on.find_elements_by_class_name(\"el-scrollbar\")\n enter_city_on = enter_city_on_on[1].find_element_by_class_name(\"el-cascader-menu__wrap\")\n enter_city = enter_city_on.find_elements_by_tag_name(\"li\")\n if enter_city[i].text ==city:\n enter_city_down = enter_city[i].find_element_by_class_name(\"el-radio__input\")\n web.execute_script(\"arguments[0].click();\", enter_city_down)\n break\n \"\"\"选则区县\"\"\"\n enter_on_on = web.find_element_by_class_name(\"el-cascader__dropdown\")\n enter_on = enter_on_on.find_element_by_class_name(\"el-cascader-panel\")\n try:\n enter_area_on_on =enter_on.find_elements_by_class_name(\"el-scrollbar\")\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\"el-cascader-menu__wrap\")\n except:\n time.sleep(1)\n enter_area_on_on = enter_on.find_elements_by_class_name(\"el-scrollbar\")\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\"el-cascader-menu__wrap\")\n enter_area = enter_area_on.find_elements_by_tag_name(\"li\")\n for i in range(len(enter_area)):\n enter_on_on = web.find_element_by_class_name(\"el-cascader__dropdown\")\n enter_on = enter_on_on.find_element_by_class_name(\"el-cascader-panel\")\n enter_area_on_on = enter_on.find_elements_by_class_name(\"el-scrollbar\")\n enter_area_on = enter_area_on_on[2].find_element_by_class_name(\"el-cascader-menu__wrap\")\n enter_area = enter_area_on.find_elements_by_tag_name(\"li\")\n if enter_area[i].text ==area:\n enter_area_down = enter_area[i].find_element_by_class_name(\"el-radio__input\")\n web.execute_script(\"arguments[0].click();\", enter_area_down)\n break\n\n return 0\ndef confirm_time_on(excel_time):\n if type(excel_time) == str:\n return str(excel_time)\n elif type(excel_time) == datetime.datetime:\n excel_time_2 = excel_time.strftime('%Y-%m-%d')\n return str(excel_time_2)\ndef confirm_tiem(time):\n \"\"\"确定时间\"\"\"\n time =confirm_time_on(time)\n enter_time = web.find_elements_by_class_name(\"el-range-input\")\n for i in enter_time:\n i.send_keys(time)\n return 0\ndef confirm_cause(cause):\n \"\"\"选则症状\"\"\"\n enter_symptom = web.find_element_by_xpath(\"/html/body/div[1]/section/main/div/div[3]/div[3]/span/div/div[2]/input\").click()\n enter_on = web.find_element_by_class_name(\"is-multiple\")\n enter_on_1 =enter_on.find_element_by_class_name(\"el-scrollbar\")\n enter_on_symptom = enter_on_1.find_elements_by_tag_name(\"li\")\n for i in range(len(enter_on_symptom)):\n enter_on = web.find_element_by_class_name(\"is-multiple\")\n enter_on_symptom = enter_on.find_elements_by_tag_name(\"li\")\n if enter_on_symptom[i].text == cause:\n enter_on_symptom[i].click()\n break\n return 0\ndef search():\n \"\"\"点击搜索\"\"\"\n enter_search = web.find_element_by_xpath(\"/html/body/div[1]/section/main/div/div[3]/button[1]\").click()\n return 0\ndef reset():\n \"\"\"点击重置\"\"\"\n enter_reset = web.find_element_by_xpath(\"/html/body/div/section/main/div/div[3]/button[2]\").click()\n return 0\ndef pending():\n \"\"\"待处理\"\"\"\n enter_pending = web.find_element_by_xpath(\n \"/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[1]\").click()\n return 0\ndef accomplish():\n \"\"\"已完成\"\"\"\n enter__accomplish = web.find_element_by_xpath(\n \"/html/body/div[1]/section/main/div/div[4]/div/div[1]/div/div/div/div[3]\").click()\n return 0\ndef download_cas():\n \"\"\"下载病例\"\"\"\n enter_download_cas = web.find_element_by_xpath(\n \"/html/body/div[1]/section/main/section/main/div[2]/ul/li[2]\").click()\n enter_download_cas_1 = web.find_element_by_xpath(\n \"/html/body/div[1]/section/main/section/main/div[2]/div/div[2]/div/div[1]/div/button[3]\").click()\n return 0\ndef resetting_excel(cause, clinique, path=\"D:\\林钟\\下载\"):\n \"\"\"重命名病例\"\"\"\n try:\n files = os.listdir(path)\n src = path + \"\\\\\" + \"外呼结果导出表格.xlsx\"\n if cause ==\"发热伴畏寒|寒战\":\n cause =\"发热伴畏寒寒战'\"\n if cause == \"畏寒|寒战\":\n cause = \"畏寒寒战'\"\n dst = path + \"\\\\\" + clinique + \"--\" + cause + \".xlsx\"\n os.rename(src, dst)\n except (FileExistsError):\n files = os.listdir(path)\n src = path + \"\\\\\" + \"外呼结果导出表格.xlsx\"\n if cause ==\"发热伴畏寒|寒战\":\n cause =\"发热伴畏寒寒战'\"\n if cause == \"畏寒|寒战\":\n cause = \"畏寒寒战'\"\n dst = path + \"\\\\\" + clinique + \"--\" + cause + \".xlsx\"\n os.remove(dst)\n os.rename(src, dst)\n\n return 0\ndef pagination(): #获取当前界面一共有多少条数据\n pagination__total = web.find_element_by_xpath(\"/html/body/div[1]/section/main/div/div[5]/span[1]\")\n a = int(pagination__total.text[2:-2])\n return a\ndef search_data(cause, city, area, clinique, excel_time):\n \"\"\"查找数据\"\"\"\n ls_2 =[] #存储最终点击的元素,如果为空则说明没找到。\n trlist_table_on = web.find_element_by_class_name(\"is-scrolling-none\")\n trlist_table = trlist_table_on.find_element_by_class_name(\"el-table__body\")\n trlist_tr = trlist_table.find_elements_by_tag_name(\"tr\")\n for row in range(len(trlist_tr)):\n trlist_table = web.find_element_by_class_name(\"el-table__body\")\n trlist_tr = trlist_table.find_elements_by_tag_name(\"tr\")\n trlist_td = trlist_tr[row].find_elements_by_tag_name(\"td\")\n i = 0\n j = 0\n ls = []\n for col in range(len(trlist_td)):\n i += 1\n if i == 2:\n ls.append(trlist_td[col].text)\n elif i == 3:\n ls.append(trlist_td[col].text)\n elif i == 7:\n ls.append(trlist_td[col])\n elif i == 9:\n j = 1\n ls.append((trlist_td[col]))\n trlist_td = trlist_tr[row].find_elements_by_tag_name(\"td\")\n if ls[0] == cause:\n if ls[1] == (\"安徽省/\" + city + \"/\" + area + \"/\" + clinique):\n if j == 0:\n # ls[2].click()\n ls_2.append(ls[2])\n elif j == 1:\n # ls[3].click()\n ls_2.append(ls[3])\n return ls_2\ndef search_data_down(cause,clinique,path):\n \"\"\"找到病例后的对病例进行一系列的处理\"\"\"\n \"\"\"下载病例\"\"\"\n download_cas()\n \"\"\"返回上一界面\"\"\"\n web.back()\n \"\"\"点击重置\"\"\"\n reset()\n \"\"\"点击待完成\"\"\"\n pending()\n \"\"\"给病例重命名\"\"\"\n time.sleep(2)\n try:\n resetting_excel(cause, clinique,path)\n except FileNotFoundError:\n time.sleep(2)\n resetting_excel(cause, clinique,path)\n print(clinique + \"--\" + cause + \"已下载完成!\")\ndef tourne_page():\n enter_tourne_page =web.find_element_by_xpath(\"/html/body/div[1]/section/main/div/div[5]/button[2]/i\").click()\n return \"\"\ndef search_data_on(cause, city, area, clinique, excel_time,path):\n \"\"\"核心处理流程\"\"\"\n time.sleep(2)\n number = pagination()\n \"\"\"判断待处理下标是否为0\"\"\"\n if number == 0 :\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + \"--\" + cause + \" 下载失败!\")\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city, area, clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + \"--\" + cause + \" 下载失败!\")\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause,clinique,path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city, area, clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause, city, area, clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + \"--\" + cause + \" 下载失败!\")\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause,clinique,path)\n else:\n download_revers.append(clinique + \"--\" + cause + \" 下载失败!\")\n reset()\n else:\n \"\"\"判断待处理里是否小于20条数据\"\"\"\n if 0 < number <= 20:\n \"\"\"如果小于进行查找\"\"\"\n pending__search_data = search_data(cause, city, area, clinique, excel_time)\n \"\"\"判断有没有找到\"\"\"\n if len(pending__search_data) == 0:\n \"\"\"没找到\"\"\"\n \"\"\"点击已完成\"\"\"\n accomplish()\n time.sleep(2)\n number_accmplish_1 = pagination()\n \"\"\"判断已完成的下标是否为0\"\"\"\n if number_accmplish_1 == 0:\n \"\"\"如果为0下载失败\"\"\"\n download_revers.append(clinique + \"--\" + cause + \" 下载失败!\")\n else:\n \"\"\"不为0判断当前界面是否只有20条数据\"\"\"\n if 0 < number_accmplish_1 <= 20:\n \"\"\"只有20条数据查找数据\"\"\"\n accomplish_search_data = search_data(cause, city, area, clinique, excel_time)\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到结束\"\"\"\n download_revers.append(clinique + \"--\" + cause + \" 下载失败!\")\n reset()\n else:\n \"\"\"如果找到则点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n elif 20 < number_accmplish_1 <= 40:\n \"\"\"多于20条数据\"\"\"\n accomplish_search_data = search_data(cause, city, area, clinique, excel_time)\n \"\"\"判断第一页有没有查到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到翻页\"\"\"\n tourne_page()\n accomplish_search_data = search_data(cause, city, area, clinique, excel_time)\n \"\"\"判断翻页后有没有找到\"\"\"\n if len(accomplish_search_data) == 0:\n \"\"\"如果没找到存入列表\"\"\"\n download_revers.append(clinique + \"--\" + cause + \" 下载失败!\")\n reset()\n else:\n \"\"\"找到后点击\"\"\"\n accomplish_search_data[0].click()\n search_data_down(cause, clinique, path)\n else:\n download_revers.append(clinique + \"--\" + cause + \" 下载失败!\")\n reset()\n else:\n \"\"\"找到了\"\"\"\n pending__search_data[0].click()\n search_data_down(cause,clinique,path)\n\n # elif 20< number <= 40:\n # pending__search_data = search_data(cause, city, area, clinique, excel_time)\n # \"\"\"判断有没有找到\"\"\"\n # if len(pending__search_data) == 0:\n\n\nif __name__ == \"__main__\":\n\n download_revers = []\n \"\"\"初始化\"\"\"\n url = input(\"请输入文件的绝对路径:\") #文件路径\n path = \"D:\\林钟\\下载\" # 下载路径\n Chrome = r'D:\\PYthon\\webdrivers\\chromedriver.exe' #驱动路径\n time1 = time.time()\n \"\"\"登录页面\"\"\"\n deconnexion(Chrome)\n print(\"已登陆\")\n menu_lien()\n print(\"已跳转\")\n\n \"\"\"读取表格\"\"\"\n excel = vb.load_workbook(url)\n sheet = excel[\"1-每日监控告警明细\"]\n subscript = 1\n for i in sheet.iter_rows(min_row=2, max_row=101, max_col=1):\n for cell in i:\n if cell.value in [\"3\", 3, \"高\"]:\n\n \"\"\"初始化数值\"\"\"\n cause = sheet[\"I\" + str(cell.row)].value\n city = sheet[\"E\" + str(cell.row)].value\n area = sheet[\"F\" + str(cell.row)].value\n clinique = sheet[\"G\" + str(cell.row)].value\n excel_time = sheet[\"D\" + str(cell.row)].value\n\n \"\"\"搜索\"\"\"\n try:\n confirm_area(city, area)\n confirm_tiem(excel_time)\n confirm_cause(cause)\n search()\n except:\n try:\n web.refresh() # 刷新方法 refresh\n print('刷新成功')\n confirm_area(city, area)\n confirm_tiem(excel_time)\n confirm_cause(cause)\n search()\n except Exception as e:\n print(\"刷新失败!\", format(e))\n\n\n \"\"\"查找数据\"\"\"\n search_data_on(cause, city, area, clinique, excel_time, path)\n\n\n \"\"\"打印最终结果\"\"\"\n print(\"\")\n print(\"<-----------下面是下载失败的----------->\")\n for i in download_revers:\n print(i)\n print(\"已全部下载完毕\")\n time2 = time.time()\n print(\"用时:{:.2f} 秒\".format(time2-time1))",
"step-ids": [
10,
14,
16,
18,
20
]
}
|
[
10,
14,
16,
18,
20
] |
#! /usr/bin/python
import Nodo,CLS,copy,sys
from excepcion import *
sys.setrecursionlimit(100000)
# Match
def match(nodo1,nodo2):
#print 'MATCH\n -',nodo1,'\n -',nodo2
# if isinstance(nodo1,Nodo.Nodo) and isinstance(nodo2,Nodo.Nodo):
# print ' -',nodo1.type,'\n -',nodo2.type
# Fin de recursion
if (not isinstance(nodo1,Nodo.Nodo)) and (not isinstance(nodo2,Nodo.Nodo)):
#print '- ',nodo1,'\n- ',nodo2
return nodo1 == nodo2
# Bajar si el nodo es no_terminal
if nodo1.type == 'no_terminal' or nodo1.type == '' or nodo1.type == 'PATRON' or nodo1.type == 'sub' or nodo1.type == 'LISTAPATRON' or nodo1.type == 'lp': return match(nodo1.izquierdo,nodo2)
if nodo2.type == 'no_terminal' or nodo2.type == '' or nodo2.type == 'PATRON' or nodo2.type == 'sub' or nodo2.type == 'LISTAPATRON' or nodo2.type == 'lp': return match(nodo2.izquierdo,nodo1)
# Variables hacen match con todo
if nodo1.type == 'VARIABLE' or nodo2.type == 'VARIABLE':
#print '- Variable\n -'
return True
# Constantes
if nodo1.type == 'CONSTANTE' and nodo2.type == 'CONSTANTE':
#print '- Constante\n -'
#print '(',nodo1.type,' ',nodo2.type,')\n'
return match(nodo1.izquierdo.izquierdo,nodo2.izquierdo.izquierdo)
# Entero
if nodo1.type == 'ENTERO' and nodo2.type == 'ENTERO':
#print '- Entero\n -'
return match(nodo1.izquierdo,nodo2.izquierdo)
# Booleano
if nodo1.type == 'BOOLEANO' and nodo2.type == 'BOOLEANO':
# print '- Booleano\n -'
return match(nodo1.izquierdo,nodo2.izquierdo)
# Listavacia
if nodo1.type == 'CONSTLV' and nodo2.type == 'CONSTLV':
#return match(nodo1.izquierdo,nodo2.izquierdo)
return True
# Listas
if nodo1.type == 'LISTA' and nodo2.type == 'LISTA':
#print 'BLAH',nodo1.izquierdo,nodo2.izquierdo,nodo1.derecho,nodo2.derecho
#print match(nodo1.izquierdo,nodo2.izquierdo) and match(nodo1.derecho,nodo2.derecho)
#return match(nodo1.izquierdo,nodo2.izquierdo) and match(nodo1.derecho,nodo2.derecho)
return comparar_listas(nodo1,nodo2,[])
# print 'falso'
return False
# Comparar Listas
def comparar_listas(lista1,lista2,tuplas):
print 'incomp-tuplas: ',tuplas
if match(lista1.izquierdo,lista2.izquierdo):
tuplas.append((lista1.izquierdo,lista2.izquierdo))
d1 = lista1.derecho
d2 = lista2.derecho
if d1.type == 'LISTA':
if d2.type == 'LISTA':
comparar_listas(lista1.derecho,lista2.derecho,tuplas)
else:
if match(d1,d2): tuplas.append((d1,d2))
elif d2.type == 'LISTA':
if match(d1,d2): tuplas.append((d1,d2))
else:
if match(d1,d2): tuplas.append((d1,d2))
return tuplas
else: return False
# Replace
def replace(diccionario,clave,valor):
diccionario[clave] = valor
return diccionario
# Extend
def extend(diccionario,clave,valor):
diccionario[clave] = valor
return diccionario
# Lookup
def lookup(clave,diccionario):
try:
if clave in diccionario:
return diccionario[clave]
else: raise ParametrosError('Variable '+str(clave)+' no declarada')
except ParametrosError, messag:
messag = messag.messg
print 'Error : ' + messag
# Eval
def eval(nodo,env,orientacion):
if orientacion == 'izquierda': return eval(nodo.izquierdo,env)
return eval(nodo.derecho,env)
# Valor
def valor(nodo):
while isinstance(nodo,Nodo.Nodo):
if nodo.type == 'BOOLEANO':
if nodo.izquierdo == 'TRUE': return True
else: return False
elif nodo.type != 'LISTA':
nodo = nodo.izquierdo
else:
return str(valor(nodo.izquierdo))+'::'+str(valor(nodo.derecho))
return nodo
# Cantidad de patrones de una Funcion
def cantidad_patrones(nodo):
while (nodo.type != 'lp'):
nodo = nodo.izquierdo
global suma
suma = 0
tam_listapatron(nodo)
return suma
# # Tamano de una lista de patrones
# def tam_listapatron(nodo):
# global suma
# i = nodo.izquierdo
# d = nodo.derecho
# if nodo.type == 'PATRON':
# suma += 1
# return
# else:
# if isinstance(i,Nodo.Nodo):
# tam_listapatron(i)
# if isinstance(d,Nodo.Nodo):
# tam_listapatron(d)
# return suma
# Apply
def apply(cls,nodo):
for c in cls.clausura:
#print 'C[0]\n =',valor(nodo)
comparar = match(c[0],nodo)
print 'comp', comparar
if comparar:
if isinstance(comparar,list):
#print 'Matcheo de listas', comparar[0],comparar[1]
#print 'APPLY\n @',cls,'\n @',c[1],'\n @',copy.deepcopy(cls.env)
nuevo_env = copy.deepcopy(cls.env)
for n in comparar:
extend(nuevo_env,valor(n[0]),n[1])
print 'NE ',nuevo_env
#print '#',nuevo_env
return eval(c[1],extend(nuevo_env,str(valor(c[0])),nodo))
#print ' @@ ',eval(c[1],extend(cls.env,str(valor(c[0])),nodo))
#return eval(c[1],extend(cls.env,str(valor(c[0])),valor(nodo)))
#print 'retorno',valor(c[1])
else : return eval(c[1],extend(copy.deepcopy(cls.env),str(valor(c[0])),nodo))
raise ParametrosError('Error de matching')
#APPLY VIEJO
# global num_clausura
# #if isinstance(nodo1,Nodo.Nodo) and isinstance(nodo2,Nodo.Nodo): print 'APPLY',nodo1.type,nodo2.type
# #print 'APPLY\n -',nodo1,'\n -',nodo2
# #if nodo2 is None and nodo1 is None: return
# if 'clausura' in env:
# #print 'here'#, valor(env['clausura'][0][0]),valor(env['clausura'][1][0])
# #print 'APPLY',nodo1,nodo2
# #print env
# #i=555
# for c in env['clausura']:
# print '+C0\n +',nodo2,'\n +',c[0],'\n +',c[1],'\n +',env['clausura'][0][1]
# if match(nodo2,c[0]):
# print 'Macheo \n *',c[1],'\n *',extend(env,str(valor(c[0])),valor(nodo2))
# print valor(eval(c[1],extend(env,str(valor(c[0])),valor(nodo2))))
# return #eval(c[1],extend(env,str(valor(c[0])),valor(nodo2)))
# # else: return False
# # i+=111
# #print 'ERROR',c[0],nodo2
# #n = c[0]
# #print n.type, nodo2.type
# #while isinstance(n,Nodo.Nodo):
# # print n.type
# # n = n.izquierdo
# raise 'AA'
# else:
# #print 'aqui \n ',nodo1,'\n ',nodo2,'\n ' ,env
# #print '1zzz'
# #print 'ZZ', eval(nodo1,env)
# #print '2zzz'
# return apply(eval(nodo1,env),eval(nodo2,env),env)
# #return apply(eval(nodo2,eval(nodo2,env)),env
# Obtener clausura de una funcion
def clausura(nodo,env,temp):
if isinstance(nodo,Nodo.Nodo):
if nodo.type == 'lfe':
#print 'in lfe',nodo.izquierdo,nodo.derecho
temp.append((nodo.izquierdo,nodo.derecho))
clausura(nodo.izquierdo,env,temp)
clausura(nodo.derecho,env,temp)
# print '$$$\n',CLS.CLS(env,temp),'\n$$$'
return CLS.CLS(env,temp)
# Obtener patrones de una lista de patrones
def patrones(nodo,listap):
if isinstance(nodo,Nodo.Nodo):
#print nodo
if nodo.type == 'PATRON':
#print 'p',nodo
listap.append(nodo)
if isinstance(nodo.izquierdo,Nodo.Nodo):
patrones(nodo.izquierdo,listap)
if isinstance(nodo.derecho,Nodo.Nodo):
patrones(nodo.derecho,listap)
return listap
# Obtener cuerpo (listas de patrones y expresiones)
# de una funcion
def cuerpo(nodo,body):
#print 'body',body, nodo.type
if isinstance(nodo,Nodo.Nodo):
if nodo.type == 'lfe':
#print 'in lfe',nodo.izquierdo,nodo.derecho
body.append((patrones(nodo.izquierdo,[]),nodo.derecho))
cuerpo(nodo.izquierdo,body)
cuerpo(nodo.derecho,body)
# print '$$$\n',CLS.CLS(env,temp),'\n$$$'
return body
# Factorizar funcion
def factorizar(body):
conjunto = []
particion = {}
clave = {}
exp = {}
p = 0
q = 1
for b in body:
print b[0][0]
#exp.append((b[0][0],b[1]))
conjunto.append(b[0][0])
p = 0
q = 1
print 'len' ,len(conjunto)
while p < len(conjunto):
while q < len(conjunto):
if match(conjunto[p],conjunto[q]) and match (conjunto[q],conjunto[p]):
print 'conjunto',conjunto[p],conjunto[q],p,q
if p in clave:
if clave[p] in particion:
particion[clave[p]].append(conjunto[q])
else:
particion[clave[p]] = [conjunto[q]]
if clave[p] in exp:
exp[clave[p]].append(body[p][1])
exp[clave[p]].append(body[q][1])
else:
exp[clave[p]] = [body[p][1]]
exp[clave[p]].append(body[q][1])
clave[q] = p
clave[p] = q
elif q in clave:
if clave[q] in particion:
particion[clave[q]].append(conjunto[p])
else:
particion[clave[q]] = [conjunto[p]]
if clave[q] in exp:
exp[clave[q]].append(body[p][1])
exp[clave[q]].append(body[q][1])
else:
exp[clave[q]] = [body[p][1]]
exp[clave[q]].append(body[q][1])
clave[p] = q
clave[q] = p
else:
particion[q] = [conjunto[q]]
exp[q]
clave[q] = p
clave[p] = p
else:
if p not in clave:
clave[p] = p
particion[p] = [conjunto[p]]
if q not in clave:
clave[q] = q
particion[q] = [conjunto[q]]
q += 1
p +=1
print particion , exp #particion[0][0] ,particion[2][0]#particion[3][0
# Eval
def es_entero(x,y):
if isinstance(x,int) and isinstance(y,int) and not(isinstance(x,bool)) and not(isinstance(y,bool)):
return True
else:
return False
def es_booleano(x,y):
if isinstance(x,bool) and isinstance(y,bool):
return True
else:
return False
#definicion de excepcion: Error de tipo
def eval(nodo,env):
# if isinstance(nodo,Nodo.Nodo):
# if isinstance(nodo.izquierdo,Nodo.Nodo):
# if isinstance(nodo.derecho,Nodo.Nodo):
# print nodo.type,'\n I: ', nodo.izquierdo.type,'\n D: ',nodo.derecho.type
# else:
# print nodo.type,'\n I: ', nodo.izquierdo.type
# else: print nodo.typee
try:
if not isinstance(nodo,Nodo.Nodo): return nodo
#if nodo.type == 'lp' or nodo.type == 'arg' or nodo.type == 'arg2': return eval(nodo.izquierdo,env)
if nodo.type == 'arg':
#apply(nodo.izquierdo,nodo.derecho,env)
#print 'Doble \n ',nodo.izquierdo,'\n ',nodo.derecho
eval(nodo.izquierdo,env)
eval(nodo.derecho,env)
#apply(eval(nodo.izquierdo,env),eval(nodo.derecho,env))
#print 'Doble2 \n ',nodo.izquierdo,'\n ',nodo.derecho
#if nodo.type == 'lp' or nodo.type == 'arg2': return eval(nodo.izquierdo,env)
if nodo.type == 'arg2': return eval(nodo.izquierdo,env)
if nodo.type == 'lp':return nodo
elif nodo.type == 'FUN':
#print 'In-Fun\n', cuerpo(nodo,[])
cuerpo_fun = cuerpo(nodo,[])
if len(cuerpo_fun[0][0]) != 1:
#factorizado = factorizar(cuerpo_fun)
fun_factorizada = factorizar(nodo)
else:
return clausura(nodo,env,[])
#return eval(nodo.izquierdo,env)
#elif nodo.type == 'LISTAPATRON': return eval(nodo.izquierdo,env)
elif nodo.type == 'IF':
# print 'if'
# print nodo.izquierdo.izquierdo.type
# print nodo.izquierdo.izquierdo
# print valor(eval(nodo.izquierdo.izquierdo,env))
if valor(eval(nodo.izquierdo.izquierdo ,env)) == True:
#print 'Hola'
return eval(nodo.izquierdo.derecho,env)
else:
return eval(nodo.derecho,env)
elif nodo.type == 'LISTAPATRON' or nodo.type == 'LISTA': return nodo
elif nodo.type == 'no_terminal': return eval(nodo.izquierdo,env)
elif nodo.type == 'sub': return eval(nodo.izquierdo,env)
elif nodo.type == '': return eval(nodo.izquierdo,env)
#elif nodo.type == 'CONSTANTE': return nodo.izquierdo.izquierdo
elif nodo.type == 'CONSTANTE' or nodo.type == 'ENTERO':
#print 'kkk',nodo.type
return nodo
elif nodo.type == 'CONSTLV':
#print nodo.izquierdo
#return '[]'
#print nodo.type
return nodo
elif nodo.type == 'MAS' :
#print 'nodos \n', nodo.izquierdo, nodo.derecho
i = valor(eval(nodo.izquierdo,env))
d = valor(eval(nodo.derecho,env))
#if es_entero(i,d):
resultado = i + d
return Nodo.Nodo('CONSTANTE',Nodo.Nodo('ENTERO',resultado))
#else: raise ParametrosError('Error de tipo en los parametros de la suma')
elif nodo.type == 'MENOS' :
i = valor(eval(nodo.izquierdo,env))
d = valor(eval(nodo.derecho,env))
if es_entero(i,d):
resultado = i - d
return Nodo.Nodo('CONSTANTE',Nodo.Nodo('ENTERO',resultado))
else: raise ParametrosError('Error de tipo en los parametros de la resta')
elif nodo.type == 'NEGATIVO' :
i = valor(eval(nodo.izquierdo,env))
if es_entero(i,1):
resultado = -i
return Nodo.Nodo('NEGATIVO',resultado)
else: raise ParametrosError('Error de tipo en el parametro de negativo')
elif nodo.type == 'PRODUCTO' :
i = valor(eval(nodo.izquierdo,env))
d = valor(eval(nodo.derecho,env))
#if es_entero(i,d):
resultado = i * d
#if es_entero(valor(eval(nodo.izquierdo,env)),valor(eval(nodo.derecho,env))):
#resultado = valor(eval(nodo.izquierdo,env)) * valor(eval(nodo.derecho,env))
return Nodo.Nodo('CONSTANTE',Nodo.Nodo('ENTERO',resultado))
#else: raise ParametrosError('Error de tipo en los parametros del producto')
elif nodo.type == 'COCIENTE' :
i = valor(eval(nodo.izquierdo,env))#except ParametrosError, messag:
# messag = messag.messg
# print 'Error : ' + messag
d = valor(eval(nodo.derecho,env))
#if es_entero(i,d):
if (d == 0):
raise ParametrosError('Error: Division por cero')
#else:
resultado = i / d
return Nodo.Nodo('CONSTANTE',Nodo.Nodo('ENTERO',resultado))
#else: raise ParametrosError('Error de tipo de los parametros de la division')
elif nodo.type == 'MENOR' :
i = valor(eval(nodo.izquierdo,env))
d = valor(eval(nodo.derecho,env))
if es_entero(i,d):
resultado = (i < d)
return Nodo.Nodo('CONSTANTE',Nodo.Nodo('BOOLEANO',resultado))
else: raise ParametrosError('Error de tipo en los parametros de: <')
elif nodo.type == 'MENOROIGUAL' :
i = valor(eval(nodo.izquierdo,env))
d = valor(eval(nodo.derecho,env))
if es_entero(i,d):
resultado = (i <= d)
return Nodo.Nodo('CONSTANTE',Nodo.Nodo('BOOLEANO',resultado))
else: raise ParametrosError('Error de tipo en los parametros de: =<')
elif nodo.type == 'MAYOR' :
i = valor(eval(nodo.izquierdo,env))
d = valor(eval(nodo.derecho,env))
if es_entero(i,d):
resultado = (i > d)
return Nodo.Nodo('CONSTANTE',Nodo.Nodo('BOOLEANO',resultado))
else: raise ParametrosError('Error de tipo en los parametros de: >')
elif nodo.type == 'MAYOROIGUAL' :
i = valor(eval(nodo.izquierdo,env))
d = valor(eval(nodo.derecho,env))
if es_entero(i,d):
resultado = (i >= d)
return Nodo.Nodo('CONSTANTE',Nodo.Nodo('BOOLEANO',resultado))
else: raise ParametrosError('Error de tipo en los parametros de: >=')
elif nodo.type == 'IGUAL' :
i = valor(eval(nodo.izquierdo,env))
d = valor(eval(nodo.derecho,env))
#print str(isinstance(i,str)) + ' instancia'
if es_entero(i,d) or es_booleano(i,d):
resultado = (i == d)
return Nodo.Nodo('CONSTANTE',Nodo.Nodo('BOOLEANO',str(resultado).upper()))
else: raise ParametrosError('Error de tipo en los parametros de: =')
elif nodo.type == 'DISTINTO' :
i = valor(eval(nodo.izquierdo,env))
d = valor(eval(nodo.derecho,env))
if es_entero(i,d) or es_booleano(i,d):
resultado = (i != d)
return Nodo.Nodo('CONSTANTE',Nodo.Nodo('BOOLEANO',str(resultado).upper()))
else: raise ParametrosError('Error de tipo en los parametros de: <>')
elif nodo.type == 'NO' :
i = valor(eval(nodo.izquierdo,env))
if es_booleano(bool(i),True):
resultado = not(i)
return Nodo.Nodo('CONSTANTE',Nodo.Nodo('BOOLEANO',str(resultado).upper()))
else: raise 'ERROR: de tipo en la negacion'
elif nodo.type == 'OR' :
i = valor(eval(nodo.izquierdo,env))
d = valor(eval(nodo.derecho,env))
if es_booleano(i,d):
resultado = (i or d)
return Nodo.Nodo('CONSTANTE',Nodo.Nodo('BOOLEANO',str(resultado).upper()))
else: raise 'ERROR: de tipo en los parametros del OR'
elif nodo.type == 'AND' :
i = valor(eval(nodo.izquierdo,env))
d = valor(eval(nodo.derecho,env))
if es_booleano(i,d):
resultado = (i and d)
return Nodo.Nodo('CONSTANTE',Nodo.Nodo('BOOLEANO',str(resultado).upper()))
else: raise 'ERROR: de tipo en los parametros del AND'
elif nodo.type == 'VARIABLE':
#print 'Pepe',env
#if 'clausura' in env:
#print 'pepe'
#for c in env['clausura']:
#print '+C0\n +',nodo2,'\n +',c[0]#,'\n +',env['clausura']
# if match(nodo,c[0]):
#print 'Macheo',nodo2,c[0]
# print 'aaa', c[1]
# return c[1]
#else:
return lookup(str(valor(nodo.izquierdo)),env)
#return eval(lookup(str(valor(nodo.izquierdo)),env),env)
#elif nodo.type == 'PATRON': return eval(nodo.izquierdo,env)
elif nodo.type == 'PATRON': return nodo
elif nodo.type == 'LET':
#valor_patron = str(nodo.izquierdo.izquierdo.izquierdo.izquierdo.izquierdo)
#env = extend(env,valor_patron,nodo.izquierdo.derecho)
p = nodo.izquierdo.izquierdo
e1 = nodo.izquierdo.derecho
e2 = nodo.derecho
env1 = extend(env,p,'fake')
v1 = eval(e1,env1)
return eval(e2,replace(env1,str(valor(p)),v1))
elif nodo.type == 'lfe':
#print 'LFE \n ===>', nodo.derecho
#if 'clausura' in env:
#extend(env,'clausura',env['clausura']+[(nodo.izquierdo,nodo.derecho)])
#print 'a'
#else:
#extend(env,'clausura',[(nodo.izquierdo,nodo.derecho)])
#print 'b'
#print'ENV', env, nodo
return
elif nodo.type == 'APLICAR':
#print 'APLICAR',nodo.izquierdo,nodo.derecho
#apply(nodo.izquierdo,nodo.derecho,env)
return apply(eval(nodo.izquierdo,env),eval(nodo.derecho,env))
except ParametrosError, messag:
messag = messag.messg
print 'ERROR : ' + messag
|
normal
|
{
"blob_id": "1178ad09638a4822461f7394e6cabb2db9516053",
"index": 9664,
"step-1": "#! /usr/bin/python\nimport Nodo,CLS,copy,sys\nfrom excepcion import *\nsys.setrecursionlimit(100000)\n# Match\ndef match(nodo1,nodo2):\n\t#print 'MATCH\\n -',nodo1,'\\n -',nodo2\n#\tif isinstance(nodo1,Nodo.Nodo) and isinstance(nodo2,Nodo.Nodo):\n#\t\tprint ' -',nodo1.type,'\\n -',nodo2.type\n\t# Fin de recursion\n\tif (not isinstance(nodo1,Nodo.Nodo)) and (not isinstance(nodo2,Nodo.Nodo)):\n\t\t#print '- ',nodo1,'\\n- ',nodo2\n\t\treturn nodo1 == nodo2\n\n\t# Bajar si el nodo es no_terminal\n\tif nodo1.type == 'no_terminal' or nodo1.type == '' or nodo1.type == 'PATRON' or nodo1.type == 'sub' or nodo1.type == 'LISTAPATRON' or nodo1.type == 'lp': return match(nodo1.izquierdo,nodo2)\n\tif nodo2.type == 'no_terminal' or nodo2.type == '' or nodo2.type == 'PATRON' or nodo2.type == 'sub' or nodo2.type == 'LISTAPATRON' or nodo2.type == 'lp': return match(nodo2.izquierdo,nodo1)\n\n\t# Variables hacen match con todo\n\tif nodo1.type == 'VARIABLE' or nodo2.type == 'VARIABLE':\n\t\t#print '- Variable\\n -'\n\t\treturn True\n\n\t# Constantes\n\tif nodo1.type == 'CONSTANTE' and nodo2.type == 'CONSTANTE':\n\t\t#print '- Constante\\n -'\n\t\t#print '(',nodo1.type,' ',nodo2.type,')\\n'\n\t\treturn match(nodo1.izquierdo.izquierdo,nodo2.izquierdo.izquierdo)\n\n\t# Entero\n\tif nodo1.type == 'ENTERO' and nodo2.type == 'ENTERO':\n\t\t#print '- Entero\\n -'\n\t\treturn match(nodo1.izquierdo,nodo2.izquierdo)\n\n\t# Booleano\n\tif nodo1.type == 'BOOLEANO' and nodo2.type == 'BOOLEANO':\n\t#\tprint '- Booleano\\n -'\n\t\treturn match(nodo1.izquierdo,nodo2.izquierdo)\n\n\t# Listavacia\n\tif nodo1.type == 'CONSTLV' and nodo2.type == 'CONSTLV':\n\t\t#return match(nodo1.izquierdo,nodo2.izquierdo)\n\t\treturn True\n\n\t# Listas\n\tif nodo1.type == 'LISTA' and nodo2.type == 'LISTA':\n\t\t#print 'BLAH',nodo1.izquierdo,nodo2.izquierdo,nodo1.derecho,nodo2.derecho\n\t\t#print match(nodo1.izquierdo,nodo2.izquierdo) and match(nodo1.derecho,nodo2.derecho)\n\t\t#return match(nodo1.izquierdo,nodo2.izquierdo) and match(nodo1.derecho,nodo2.derecho)\n\t\treturn comparar_listas(nodo1,nodo2,[])\n#\tprint 'falso' \n\n\treturn False\n\n# Comparar Listas\ndef comparar_listas(lista1,lista2,tuplas):\n\tprint 'incomp-tuplas: ',tuplas\n\tif match(lista1.izquierdo,lista2.izquierdo):\n\t\ttuplas.append((lista1.izquierdo,lista2.izquierdo))\n\t\td1 = lista1.derecho\n\t\td2 = lista2.derecho\n\t\tif d1.type == 'LISTA':\n\t\t\tif d2.type == 'LISTA':\n\t\t\t\tcomparar_listas(lista1.derecho,lista2.derecho,tuplas)\n\t\t\telse: \n\t\t\t\tif match(d1,d2): tuplas.append((d1,d2))\n\t\telif d2.type == 'LISTA':\n\t\t\t\tif match(d1,d2): tuplas.append((d1,d2))\n\t\telse:\n\t\t\tif match(d1,d2): tuplas.append((d1,d2))\n\t\treturn tuplas\n\t\t\t\n\telse: return False\n\t\n\n\n# Replace\ndef replace(diccionario,clave,valor):\n\tdiccionario[clave] = valor\n\treturn diccionario\n\n# Extend\ndef extend(diccionario,clave,valor):\n\tdiccionario[clave] = valor\n\treturn diccionario\n\n# Lookup\ndef lookup(clave,diccionario):\n\ttry:\n\t\tif clave in diccionario:\n\t\t\treturn diccionario[clave]\n\t\telse: raise ParametrosError('Variable '+str(clave)+' no declarada')\n\texcept ParametrosError, messag:\n\t\tmessag = messag.messg\n\t\tprint 'Error : ' + messag\n\t\t\t\n\n# Eval\ndef eval(nodo,env,orientacion):\n\tif orientacion == 'izquierda': return eval(nodo.izquierdo,env)\n\treturn eval(nodo.derecho,env)\n\n# Valor\ndef valor(nodo):\n\twhile isinstance(nodo,Nodo.Nodo):\n\t\tif nodo.type == 'BOOLEANO':\n\t\t\tif nodo.izquierdo == 'TRUE': return True\n\t\t\telse: return False\n\t\telif nodo.type != 'LISTA':\n\t\t\tnodo = nodo.izquierdo\n\t\telse:\n\t\t\treturn str(valor(nodo.izquierdo))+'::'+str(valor(nodo.derecho))\n\treturn nodo\n\n# Cantidad de patrones de una Funcion\ndef cantidad_patrones(nodo):\n\twhile (nodo.type != 'lp'):\n\t\tnodo = nodo.izquierdo\n\tglobal suma\n\tsuma = 0\n\ttam_listapatron(nodo)\n\treturn suma\n\n# # Tamano de una lista de patrones\n# def tam_listapatron(nodo):\n# \tglobal suma\n# \ti = nodo.izquierdo\n# \td = nodo.derecho\n# \tif nodo.type == 'PATRON':\n# \t\tsuma += 1\n# \t\treturn\n# \telse:\n# \t\tif isinstance(i,Nodo.Nodo):\n# \t\t\ttam_listapatron(i)\n# \t\tif isinstance(d,Nodo.Nodo):\n# \t\t\ttam_listapatron(d)\n# \treturn suma\n\t\n\t\n# Apply\ndef apply(cls,nodo):\n\tfor c in cls.clausura:\n\t\t#print 'C[0]\\n =',valor(nodo)\n\t\tcomparar = match(c[0],nodo)\n\t\tprint 'comp', comparar\n\t\tif comparar:\n\t\t\tif isinstance(comparar,list):\n\t\t\t\t#print 'Matcheo de listas', comparar[0],comparar[1] \n\t\t\t\t#print 'APPLY\\n @',cls,'\\n @',c[1],'\\n @',copy.deepcopy(cls.env)\n\t\t\t\tnuevo_env = copy.deepcopy(cls.env)\n\t\t\t\tfor n in comparar:\n\t\t\t\t\textend(nuevo_env,valor(n[0]),n[1])\n\t\t\t\tprint 'NE ',nuevo_env\n\t\t\t\t#print '#',nuevo_env\n\t\t\t\treturn eval(c[1],extend(nuevo_env,str(valor(c[0])),nodo))\n\t\t\t#print ' @@ ',eval(c[1],extend(cls.env,str(valor(c[0])),nodo))\n#return eval(c[1],extend(cls.env,str(valor(c[0])),valor(nodo)))\n\t\t\t#print 'retorno',valor(c[1])\n\t\t\telse : return eval(c[1],extend(copy.deepcopy(cls.env),str(valor(c[0])),nodo))\n\traise ParametrosError('Error de matching')\n\n#APPLY VIEJO\n# \tglobal num_clausura\n# \t#if isinstance(nodo1,Nodo.Nodo) and isinstance(nodo2,Nodo.Nodo): print 'APPLY',nodo1.type,nodo2.type\n# \t#print 'APPLY\\n -',nodo1,'\\n -',nodo2\n# \t#if nodo2 is None and nodo1 is None: return\n# \tif 'clausura' in env:\n# \t\t#print 'here'#, valor(env['clausura'][0][0]),valor(env['clausura'][1][0])\n# \t\t#print 'APPLY',nodo1,nodo2\n# \t\t#print env\n# \t\t#i=555\n# \t\tfor c in env['clausura']:\n# \t\t\tprint '+C0\\n +',nodo2,'\\n +',c[0],'\\n +',c[1],'\\n +',env['clausura'][0][1]\n# \t\t\tif match(nodo2,c[0]):\n# \t\t\t\tprint 'Macheo \\n *',c[1],'\\n *',extend(env,str(valor(c[0])),valor(nodo2))\n# \t\t\t\tprint valor(eval(c[1],extend(env,str(valor(c[0])),valor(nodo2))))\n# \t\t\t\treturn #eval(c[1],extend(env,str(valor(c[0])),valor(nodo2)))\n# \t\t#\telse: return False\n# \t\t#\ti+=111\n# \t\t#print 'ERROR',c[0],nodo2\n# \t\t#n = c[0]\n# \t\t#print n.type, nodo2.type\n# \t\t#while isinstance(n,Nodo.Nodo):\n# \t\t#\tprint n.type\n# \t\t#\tn = n.izquierdo\n# \t\traise 'AA'\n# \telse:\n# \t\t#print 'aqui \\n ',nodo1,'\\n ',nodo2,'\\n ' ,env\n# \t\t#print '1zzz'\n# \t\t#print 'ZZ', eval(nodo1,env)\n# \t\t#print '2zzz'\n# \t\treturn apply(eval(nodo1,env),eval(nodo2,env),env)\n# \t\t#return apply(eval(nodo2,eval(nodo2,env)),env\n\t\n# Obtener clausura de una funcion\ndef clausura(nodo,env,temp):\n\tif isinstance(nodo,Nodo.Nodo):\n\t\tif nodo.type == 'lfe':\n\t\t\t#print 'in lfe',nodo.izquierdo,nodo.derecho\n\t\t\ttemp.append((nodo.izquierdo,nodo.derecho))\n\t\tclausura(nodo.izquierdo,env,temp)\n\t\tclausura(nodo.derecho,env,temp)\n#\t\tprint '$$$\\n',CLS.CLS(env,temp),'\\n$$$'\n\treturn CLS.CLS(env,temp)\n\n\n# Obtener patrones de una lista de patrones\ndef patrones(nodo,listap):\n\tif isinstance(nodo,Nodo.Nodo):\n\t\t#print nodo\n\t\tif nodo.type == 'PATRON':\n\t\t\t#print 'p',nodo\n\t\t\tlistap.append(nodo)\n\t\tif isinstance(nodo.izquierdo,Nodo.Nodo):\n\t\t\tpatrones(nodo.izquierdo,listap)\n\t\tif isinstance(nodo.derecho,Nodo.Nodo):\n\t\t\tpatrones(nodo.derecho,listap)\n\treturn listap\n\n# Obtener cuerpo (listas de patrones y expresiones)\n# de una funcion\ndef cuerpo(nodo,body):\n\t#print 'body',body, nodo.type\n\tif isinstance(nodo,Nodo.Nodo):\n\t\tif nodo.type == 'lfe':\n\t\t\t#print 'in lfe',nodo.izquierdo,nodo.derecho\n\t\t\tbody.append((patrones(nodo.izquierdo,[]),nodo.derecho))\n\t\tcuerpo(nodo.izquierdo,body)\n\t\tcuerpo(nodo.derecho,body)\n#\t\tprint '$$$\\n',CLS.CLS(env,temp),'\\n$$$'\n\treturn body\n\n\n# Factorizar funcion\ndef factorizar(body):\n\tconjunto = []\n\tparticion = {}\n\tclave = {}\n\texp = {}\n\tp = 0\n\tq = 1\n\tfor b in body:\n\t\tprint b[0][0]\n\t\t#exp.append((b[0][0],b[1]))\n\t\tconjunto.append(b[0][0])\n\tp = 0\n\tq = 1\n\tprint 'len' ,len(conjunto)\n\twhile p < len(conjunto):\n\t\twhile q < len(conjunto):\n\t\t\tif match(conjunto[p],conjunto[q]) and match (conjunto[q],conjunto[p]):\n\t\t\t\tprint 'conjunto',conjunto[p],conjunto[q],p,q\n\t\t\t\tif p in clave:\n\t\t\t\t\tif clave[p] in particion:\n\t\t\t\t\t\tparticion[clave[p]].append(conjunto[q])\n\t\t\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\tparticion[clave[p]] = [conjunto[q]]\n\t\t\t\t\tif clave[p] in exp:\n\t\t\t\t\t\texp[clave[p]].append(body[p][1])\n\t\t\t\t\t\texp[clave[p]].append(body[q][1])\n\t\t\t\t\telse:\n\t\t\t\t\t\texp[clave[p]] = [body[p][1]]\n\t\t\t\t\t\texp[clave[p]].append(body[q][1])\n\t\t\t\t\tclave[q] = p\n\t\t\t\t\tclave[p] = q\n\n\t\t\t\telif q in clave:\n\t\t\t\t\tif clave[q] in particion:\n\t\t\t\t\t\tparticion[clave[q]].append(conjunto[p])\n\t\t\t\t\telse:\n\t\t\t\t\t\tparticion[clave[q]] = [conjunto[p]]\n\t\t\t\t\tif clave[q] in exp:\n\t\t\t\t\t\texp[clave[q]].append(body[p][1])\n\t\t\t\t\t\texp[clave[q]].append(body[q][1])\n\t\t\t\t\telse:\n\t\t\t\t\t\texp[clave[q]] = [body[p][1]]\n\t\t\t\t\t\texp[clave[q]].append(body[q][1])\n\t\t\t\t\tclave[p] = q\n\t\t\t\t\tclave[q] = p\n \t\t\t\telse:\n\t\t\t\t\tparticion[q] = [conjunto[q]]\n\t\t\t\t\texp[q]\n\t\t\t\t\tclave[q] = p\n\t\t\t\t\tclave[p] = p\n\t\t\telse:\n\t\t\t\tif p not in clave:\n\t\t\t\t\tclave[p] = p\n\t\t\t\t\tparticion[p] = [conjunto[p]]\n\t\t\t\tif q not in clave:\n\t\t\t\t\tclave[q] = q\n\t\t\t\t\tparticion[q] = [conjunto[q]]\n\t\t\tq += 1\n\t\tp +=1\n\tprint particion , exp #particion[0][0] ,particion[2][0]#particion[3][0\n\t\t\t\t\t\n\t\n\n# Eval\ndef es_entero(x,y):\n\tif isinstance(x,int) and isinstance(y,int) and not(isinstance(x,bool)) and not(isinstance(y,bool)):\n\t\treturn True\n\telse:\n\t\treturn False \ndef es_booleano(x,y):\n\tif isinstance(x,bool) and isinstance(y,bool):\n\t\treturn True\n\telse:\n\t\treturn False\n#definicion de excepcion: Error de tipo\n\n\n\ndef eval(nodo,env):\n#\tif isinstance(nodo,Nodo.Nodo):\n#\t\tif isinstance(nodo.izquierdo,Nodo.Nodo):\n#\t\t\tif isinstance(nodo.derecho,Nodo.Nodo):\n#\t\t\t\tprint nodo.type,'\\n I: ', nodo.izquierdo.type,'\\n D: ',nodo.derecho.type\n#\t\t\telse:\n#\t\t\t\tprint nodo.type,'\\n I: ', nodo.izquierdo.type\n#\t\telse: print nodo.typee\n\ttry:\n\t\n\t\tif not isinstance(nodo,Nodo.Nodo): return nodo\n\t\t#if nodo.type == 'lp' or nodo.type == 'arg' or nodo.type == 'arg2': return eval(nodo.izquierdo,env)\n\t\tif nodo.type == 'arg': \n\t\t\t#apply(nodo.izquierdo,nodo.derecho,env)\n\t\t\t#print 'Doble \\n ',nodo.izquierdo,'\\n ',nodo.derecho\n\t\t\teval(nodo.izquierdo,env)\n\t\t\teval(nodo.derecho,env)\n\t\t\t#apply(eval(nodo.izquierdo,env),eval(nodo.derecho,env))\n\t\t\t#print 'Doble2 \\n ',nodo.izquierdo,'\\n ',nodo.derecho\n\t\t#if nodo.type == 'lp' or nodo.type == 'arg2': return eval(nodo.izquierdo,env)\n\t\tif nodo.type == 'arg2': return eval(nodo.izquierdo,env)\n\t\tif nodo.type == 'lp':return nodo\n\t\telif nodo.type == 'FUN': \n\t\t\t#print 'In-Fun\\n', cuerpo(nodo,[])\n\t\t\tcuerpo_fun = cuerpo(nodo,[])\n\t\t\tif len(cuerpo_fun[0][0]) != 1:\n\t\t\t\t#factorizado = factorizar(cuerpo_fun)\n\t\t\t\n\t\t\t\tfun_factorizada = factorizar(nodo)\n\t\t\telse:\n\t\t\t\treturn clausura(nodo,env,[])\n\t\t\t#return eval(nodo.izquierdo,env)\n\t\t#elif nodo.type == 'LISTAPATRON': return eval(nodo.izquierdo,env)\n\t\telif nodo.type == 'IF':\n # print 'if'\n # print nodo.izquierdo.izquierdo.type\n # print nodo.izquierdo.izquierdo\n # print valor(eval(nodo.izquierdo.izquierdo,env))\n if valor(eval(nodo.izquierdo.izquierdo ,env)) == True:\n #print 'Hola'\n return eval(nodo.izquierdo.derecho,env)\n else:\n return eval(nodo.derecho,env)\n\t\telif nodo.type == 'LISTAPATRON' or nodo.type == 'LISTA': return nodo\n\t\telif nodo.type == 'no_terminal': return eval(nodo.izquierdo,env)\n\t\telif nodo.type == 'sub': return eval(nodo.izquierdo,env)\n\t\telif nodo.type == '': return eval(nodo.izquierdo,env)\n\t\t#elif nodo.type == 'CONSTANTE': return nodo.izquierdo.izquierdo\n\t\telif nodo.type == 'CONSTANTE' or nodo.type == 'ENTERO':\n\t\t\t#print 'kkk',nodo.type\n\t\t\treturn nodo\n\t\telif nodo.type == 'CONSTLV': \n\t\t\t#print nodo.izquierdo\n\t\t\t#return '[]'\n\t\t\t#print nodo.type\n\t\t\treturn nodo\n\t\telif nodo.type == 'MAS' :\n\t\t\t#print 'nodos \\n', nodo.izquierdo, nodo.derecho\n\t\t\ti = valor(eval(nodo.izquierdo,env))\n\t\t\td = valor(eval(nodo.derecho,env))\n\t\t\t#if es_entero(i,d):\n\t\t\tresultado = i + d\n\t\t\treturn Nodo.Nodo('CONSTANTE',Nodo.Nodo('ENTERO',resultado))\n\t\t\t#else: raise ParametrosError('Error de tipo en los parametros de la suma') \n\t\t\n\t\telif nodo.type == 'MENOS' :\n\t\t\ti = valor(eval(nodo.izquierdo,env))\n\t\t\td = valor(eval(nodo.derecho,env))\n\t\t\tif es_entero(i,d):\n\t\t\t\tresultado = i - d\n\t\t\t\treturn Nodo.Nodo('CONSTANTE',Nodo.Nodo('ENTERO',resultado))\n\t\t\telse: raise ParametrosError('Error de tipo en los parametros de la resta') \n\n\t\telif nodo.type == 'NEGATIVO' :\n\t\t\ti = valor(eval(nodo.izquierdo,env))\n\t\t\tif es_entero(i,1):\t\t\n\t\t\t\tresultado = -i\n\t\t\t\treturn Nodo.Nodo('NEGATIVO',resultado)\n\t\t\telse: raise ParametrosError('Error de tipo en el parametro de negativo')\n\t\t\n\t\t\n\t\telif nodo.type == 'PRODUCTO' :\n\t\t\ti = valor(eval(nodo.izquierdo,env))\n\t\t\td = valor(eval(nodo.derecho,env))\n\t\t\t#if es_entero(i,d):\n\t\t\tresultado = i * d\n\t\t\t#if es_entero(valor(eval(nodo.izquierdo,env)),valor(eval(nodo.derecho,env))):\n\t\t\t\t#resultado = valor(eval(nodo.izquierdo,env)) * valor(eval(nodo.derecho,env))\n\t\t\treturn Nodo.Nodo('CONSTANTE',Nodo.Nodo('ENTERO',resultado))\n\t\t\t#else: raise ParametrosError('Error de tipo en los parametros del producto') \n\t\telif nodo.type == 'COCIENTE' :\n\t\t\ti = valor(eval(nodo.izquierdo,env))#except ParametrosError, messag:\n\t#\tmessag = messag.messg\n\t#\tprint 'Error : ' + messag\n\t\t\td = valor(eval(nodo.derecho,env))\n\t\t\t#if es_entero(i,d):\n\t\t\tif (d == 0):\n\t\t\t \traise ParametrosError('Error: Division por cero') \n\t\t\t\t#else:\t\t\t\t\n\t\t\t\tresultado = i / d\n\t\t\t\treturn Nodo.Nodo('CONSTANTE',Nodo.Nodo('ENTERO',resultado))\n\t\t\t#else: raise ParametrosError('Error de tipo de los parametros de la division') \n\t\telif nodo.type == 'MENOR' :\n\t\t\ti = valor(eval(nodo.izquierdo,env))\n\t\t\td = valor(eval(nodo.derecho,env))\n\t\t\tif es_entero(i,d):\n\t\t\t\tresultado = (i < d)\n\t\t\t\treturn Nodo.Nodo('CONSTANTE',Nodo.Nodo('BOOLEANO',resultado))\n\t\t\telse: raise ParametrosError('Error de tipo en los parametros de: <') \n\t\t\n\t\telif nodo.type == 'MENOROIGUAL' :\n\t\t\ti = valor(eval(nodo.izquierdo,env))\n\t\t\td = valor(eval(nodo.derecho,env))\n\t\t\tif es_entero(i,d):\n\t\t\t\tresultado = (i <= d)\n\t\t\t\treturn Nodo.Nodo('CONSTANTE',Nodo.Nodo('BOOLEANO',resultado))\n\t\t\telse: raise ParametrosError('Error de tipo en los parametros de: =<')\n\t\telif nodo.type == 'MAYOR' :\n\t\t\ti = valor(eval(nodo.izquierdo,env))\n\t\t\td = valor(eval(nodo.derecho,env))\n\t\t\tif es_entero(i,d):\n\t\t\t\tresultado = (i > d)\n\t\t\t\treturn Nodo.Nodo('CONSTANTE',Nodo.Nodo('BOOLEANO',resultado))\n\t\t\telse: raise ParametrosError('Error de tipo en los parametros de: >')\n\t\telif nodo.type == 'MAYOROIGUAL' :\n\t\t\ti = valor(eval(nodo.izquierdo,env))\n\t\t\td = valor(eval(nodo.derecho,env))\n\t\t\tif es_entero(i,d):\n\t\t\t\tresultado = (i >= d)\n\t\t\t\treturn Nodo.Nodo('CONSTANTE',Nodo.Nodo('BOOLEANO',resultado))\n\t\t\telse: raise ParametrosError('Error de tipo en los parametros de: >=')\n\t\telif nodo.type == 'IGUAL' :\n\t\t\ti = valor(eval(nodo.izquierdo,env))\n\t\t\td = valor(eval(nodo.derecho,env))\n\t\t\t#print str(isinstance(i,str)) + ' instancia'\n\t\t\tif es_entero(i,d) or es_booleano(i,d):\n\t\t\t\tresultado = (i == d)\n\t\t\t\treturn Nodo.Nodo('CONSTANTE',Nodo.Nodo('BOOLEANO',str(resultado).upper()))\n\t\t\telse: raise ParametrosError('Error de tipo en los parametros de: =')\n\t\t\n\t\telif nodo.type == 'DISTINTO' :\n\t\t\ti = valor(eval(nodo.izquierdo,env))\n\t\t\td = valor(eval(nodo.derecho,env))\n\t\t\tif es_entero(i,d) or es_booleano(i,d):\n\t\t\t\tresultado = (i != d)\n\t\t\t\treturn Nodo.Nodo('CONSTANTE',Nodo.Nodo('BOOLEANO',str(resultado).upper()))\n\t\t\telse: raise ParametrosError('Error de tipo en los parametros de: <>')\n\t\telif nodo.type == 'NO' :\n\t\t\ti = valor(eval(nodo.izquierdo,env))\n\t\t\tif es_booleano(bool(i),True):\t\t\n\t\t\t\tresultado = not(i)\n\t\t\t\treturn Nodo.Nodo('CONSTANTE',Nodo.Nodo('BOOLEANO',str(resultado).upper()))\n\t\t\telse: raise 'ERROR: de tipo en la negacion'\n\t\telif nodo.type == 'OR' :\n\t\t\ti = valor(eval(nodo.izquierdo,env))\n\t\t\td = valor(eval(nodo.derecho,env))\n\t\t\tif es_booleano(i,d):\n\t\t\t\tresultado = (i or d)\n\t\t\t\treturn Nodo.Nodo('CONSTANTE',Nodo.Nodo('BOOLEANO',str(resultado).upper()))\n\t\t\telse: raise 'ERROR: de tipo en los parametros del OR'\n\t\telif nodo.type == 'AND' :\n\t\t\ti = valor(eval(nodo.izquierdo,env))\n\t\t\td = valor(eval(nodo.derecho,env))\n\t\t\tif es_booleano(i,d):\n\t\t\t\tresultado = (i and d)\n\t\t\t\treturn Nodo.Nodo('CONSTANTE',Nodo.Nodo('BOOLEANO',str(resultado).upper()))\n\t\t\telse: raise 'ERROR: de tipo en los parametros del AND'\n\t\telif nodo.type == 'VARIABLE':\n\t\t\t#print 'Pepe',env\n\t\t\t#if 'clausura' in env:\n\t\t\t\t#print 'pepe'\n\t\t\t\t#for c in env['clausura']:\n\t\t\t\t#print '+C0\\n +',nodo2,'\\n +',c[0]#,'\\n +',env['clausura']\n\t\t\t\t#\tif match(nodo,c[0]):\n\t\t\t\t\t#print 'Macheo',nodo2,c[0]\n\t\t\t\t#\t\tprint 'aaa', c[1]\n\t\t\t\t#\t\treturn c[1]\n\t\t\t#else:\n\t\t\treturn lookup(str(valor(nodo.izquierdo)),env)\t\n\t\t\t#return eval(lookup(str(valor(nodo.izquierdo)),env),env)\n\t\t#elif nodo.type == 'PATRON': return eval(nodo.izquierdo,env)\n\t\telif nodo.type == 'PATRON': return nodo\n\t\telif nodo.type == 'LET':\n\t\t\t#valor_patron = str(nodo.izquierdo.izquierdo.izquierdo.izquierdo.izquierdo)\n\t\t\t#env = extend(env,valor_patron,nodo.izquierdo.derecho)\n\t\t\tp = nodo.izquierdo.izquierdo\n\t\t\te1 = nodo.izquierdo.derecho\n\t\t\te2 = nodo.derecho\n\t\t\tenv1 = extend(env,p,'fake')\n\t\t\tv1 = eval(e1,env1)\n\t\t\treturn eval(e2,replace(env1,str(valor(p)),v1))\n\t\telif nodo.type == 'lfe':\n\t\t\t#print 'LFE \\n ===>', nodo.derecho\n\t\t\t#if 'clausura' in env:\n\t\t\t\t#extend(env,'clausura',env['clausura']+[(nodo.izquierdo,nodo.derecho)])\n\t\t\t\t#print 'a'\n\t\t\t#else:\n\t\t\t\t#extend(env,'clausura',[(nodo.izquierdo,nodo.derecho)])\n\t\t\t\t#print 'b'\n\t\t\t#print'ENV', env, nodo\n\t\t\treturn\n\t\telif nodo.type == 'APLICAR':\n\t\t\t#print 'APLICAR',nodo.izquierdo,nodo.derecho\n\t\t\t#apply(nodo.izquierdo,nodo.derecho,env)\n\t\t\treturn apply(eval(nodo.izquierdo,env),eval(nodo.derecho,env))\n\texcept ParametrosError, messag:\n\t\tmessag = messag.messg\n\t\tprint 'ERROR : ' + messag\n\n\n\n\t\t\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import urllib.request
def get_html(url):
"""
Returns the html of url or None if status code is not 200
"""
req = urllib.request.Request(
url,
headers={
'User-Agent': 'Python Learning Program',
'From': '[email protected]'
}
)
resp = urllib.request.urlopen(req)
if resp.code == 200:
return resp.read() # returns the html document
else:
return None
|
normal
|
{
"blob_id": "4572e243f75ad92c04f5cdc0b454df7389183a6a",
"index": 3238,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_html(url):\n \"\"\"\n Returns the html of url or None if status code is not 200\n \"\"\"\n req = urllib.request.Request(url, headers={'User-Agent':\n 'Python Learning Program', 'From': '[email protected]'})\n resp = urllib.request.urlopen(req)\n if resp.code == 200:\n return resp.read()\n else:\n return None\n",
"step-3": "import urllib.request\n\n\ndef get_html(url):\n \"\"\"\n Returns the html of url or None if status code is not 200\n \"\"\"\n req = urllib.request.Request(url, headers={'User-Agent':\n 'Python Learning Program', 'From': '[email protected]'})\n resp = urllib.request.urlopen(req)\n if resp.code == 200:\n return resp.read()\n else:\n return None\n",
"step-4": "import urllib.request\n\n\ndef get_html(url):\n \"\"\"\n Returns the html of url or None if status code is not 200\n \"\"\"\n req = urllib.request.Request(\n url,\n headers={\n 'User-Agent': 'Python Learning Program',\n 'From': '[email protected]'\n }\n )\n resp = urllib.request.urlopen(req)\n\n if resp.code == 200:\n return resp.read() # returns the html document\n else:\n return None\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if numero % 2 == 0:
p = numero
print(p, 'é um número par')
else:
i = numero
print(i, 'é um número ímpar')
<|reserved_special_token_1|>
p = 0
i = 0
numero = int(input('Insira um número: '))
if numero % 2 == 0:
p = numero
print(p, 'é um número par')
else:
i = numero
print(i, 'é um número ímpar')
<|reserved_special_token_1|>
#função: Definir se o número inserido é ímpar ou par
#autor: João Cândido
p = 0
i = 0
numero = int(input("Insira um número: "))
if numero % 2 == 0:
p = numero
print (p, "é um número par")
else:
i = numero
print (i, "é um número ímpar")
|
flexible
|
{
"blob_id": "382bc321c5fd35682bc735ca4d6e293d09be64ec",
"index": 9990,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif numero % 2 == 0:\n p = numero\n print(p, 'é um número par')\nelse:\n i = numero\n print(i, 'é um número ímpar')\n",
"step-3": "p = 0\ni = 0\nnumero = int(input('Insira um número: '))\nif numero % 2 == 0:\n p = numero\n print(p, 'é um número par')\nelse:\n i = numero\n print(i, 'é um número ímpar')\n",
"step-4": "#função: Definir se o número inserido é ímpar ou par\n#autor: João Cândido\n\np = 0\ni = 0\n\nnumero = int(input(\"Insira um número: \"))\n\nif numero % 2 == 0:\n\tp = numero\n\tprint (p, \"é um número par\")\nelse:\n\ti = numero\n\tprint (i, \"é um número ímpar\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# This simulation obtains dose on a cylindical disk phantom at various
# distances from a 14MeV photon source. Dose in millisieverts is found
# and compared to the yearly limit
# The model is built to have a human tissue and human height and volume which
# is typically referred to as a phantom.
# source details based on https://file.scirp.org/pdf/OJMSi_2014011414370625.pdf
import openmc
import math
import matplotlib.pyplot as plt
# Tissue Equivalent, MS20 from PNNL
mat_tissue = openmc.Material()
mat_tissue.add_element("O", 0.079013)
mat_tissue.add_element("C", 0.32948)
mat_tissue.add_element("H", 0.546359)
mat_tissue.add_element("N", 0.008619)
mat_tissue.add_element("Mg", 0.036358)
mat_tissue.add_element("Cl", 0.000172)
mat_tissue.set_density("g/cm3", 1.0)
mat_air = openmc.Material()
mat_air.add_element("C", 0.00015)
mat_air.add_element("N", 0.784431)
mat_air.add_element("O", 0.210748)
mat_air.add_element("Ar", 0.004671)
mat_air.set_density("g/cm3", 0.001205)
my_materials = openmc.Materials([mat_tissue, mat_air])
all_dose = []
distances_to_simulate = [50, 1000, 2000, 4000, 6000]
for distance_from_source in distances_to_simulate: # units of cm
# representing a human as a cylindrical phantom
# average human is 62,000cm3 volume
# average human height = 169.75
# resulting cylinder radius = 10.782
cylinder_surface = openmc.ZCylinder(r=10.782, x0=distance_from_source)
phantom_upper_surface = openmc.ZPlane(z0=169.75)
phantom_lower_surface = openmc.ZPlane(z0=0)
outer_surface = openmc.Sphere(r=10000000, boundary_type="vacuum")
phantom_region = -cylinder_surface & -phantom_upper_surface & +phantom_lower_surface
# void region is below the outer surface and not the phantom region
void_region = -outer_surface & ~phantom_region
void_cell = openmc.Cell(region=void_region)
void_cell.fill = mat_air
phantom_cell = openmc.Cell(region=phantom_region)
phantom_cell.fill = mat_tissue
my_geometry = openmc.Geometry([phantom_cell, void_cell])
# Instantiate a Settings object
my_settings = openmc.Settings()
my_settings.output = {"tallies": False}
my_settings.batches = 2
my_settings.inactive = 0
my_settings.particles = 500000
my_settings.photon_transport = True
my_settings.run_mode = "fixed source"
# Create a gamma point source
source = openmc.Source()
source.space = openmc.stats.Point((0, 0, 0))
source.angle = openmc.stats.Isotropic()
# This is a Co60 source, see the task on sources to understand it
source.energy = openmc.stats.Discrete([1.1732e6, 1.3325e6], [0.5, 0.5])
source.particle = "photon"
my_settings.source = source
# volume of cylinder V=πr^2h
# openmc native units for length are cm so volume is in cm3
phantom_volume = math.pi * math.pow(10.782, 2) * 169.75
# geometry argument refers to irradiation direction
# https://academic.oup.com/view-large/figure/119655666/ncx112f01.png
energy_bins_p, dose_coeffs_p = openmc.data.dose_coefficients(
particle="photon", geometry="AP"
)
energy_function_filter_p = openmc.EnergyFunctionFilter(energy_bins_p, dose_coeffs_p)
energy_function_filter_p.interpolation = "cubic" # cubic interpolation is recommended by ICRP
photon_particle_filter = openmc.ParticleFilter("photon")
cell_filter = openmc.CellFilter(phantom_cell)
# Create tally to score dose
dose_cell_tally = openmc.Tally(name="photon_dose_on_cell")
# note that the EnergyFunctionFilter is included as a filter
dose_cell_tally.filters = [
cell_filter,
photon_particle_filter,
energy_function_filter_p,
]
dose_cell_tally.scores = ["flux"]
my_tallies = openmc.Tallies([dose_cell_tally])
model = openmc.Model(my_geometry, my_materials, my_settings, my_tallies)
statepoint_filename = model.run()
with openmc.StatePoint(statepoint_filename) as statepoint:
photon_tally_result = statepoint.get_tally(
name="photon_dose_on_cell"
).mean.flatten()[0]
photons_per_second = 740000000000 # units of photons per second
# converts units from pSv-cm3/source_photon to pSv-cm3/second
dose = photon_tally_result * photons_per_second
# converts from pSv-cm3/second to pSv/second
dose = dose / phantom_volume
# converts from (pico) pSv/second to (micro) uSv/second
dose = dose * 1e-6
# converts from uSv/second to uSv/hour
dose = dose * 60 * 60
all_dose.append(dose)
plt.plot(distances_to_simulate, all_dose, label="dose on phantom")
plt.xlabel("Distance between photon source and phantom")
plt.ylabel("Dose [uSv per hour]")
plt.title("Dose on a phantom as a function of distance from a Co60 source\n")
plt.yscale("log")
plt.grid(True)
plt.show()
|
normal
|
{
"blob_id": "28bf11cb4205dd186b84cc7b7c8b9009f35fe408",
"index": 7415,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmat_tissue.add_element('O', 0.079013)\nmat_tissue.add_element('C', 0.32948)\nmat_tissue.add_element('H', 0.546359)\nmat_tissue.add_element('N', 0.008619)\nmat_tissue.add_element('Mg', 0.036358)\nmat_tissue.add_element('Cl', 0.000172)\nmat_tissue.set_density('g/cm3', 1.0)\n<mask token>\nmat_air.add_element('C', 0.00015)\nmat_air.add_element('N', 0.784431)\nmat_air.add_element('O', 0.210748)\nmat_air.add_element('Ar', 0.004671)\nmat_air.set_density('g/cm3', 0.001205)\n<mask token>\nfor distance_from_source in distances_to_simulate:\n cylinder_surface = openmc.ZCylinder(r=10.782, x0=distance_from_source)\n phantom_upper_surface = openmc.ZPlane(z0=169.75)\n phantom_lower_surface = openmc.ZPlane(z0=0)\n outer_surface = openmc.Sphere(r=10000000, boundary_type='vacuum')\n phantom_region = (-cylinder_surface & -phantom_upper_surface & +\n phantom_lower_surface)\n void_region = -outer_surface & ~phantom_region\n void_cell = openmc.Cell(region=void_region)\n void_cell.fill = mat_air\n phantom_cell = openmc.Cell(region=phantom_region)\n phantom_cell.fill = mat_tissue\n my_geometry = openmc.Geometry([phantom_cell, void_cell])\n my_settings = openmc.Settings()\n my_settings.output = {'tallies': False}\n my_settings.batches = 2\n my_settings.inactive = 0\n my_settings.particles = 500000\n my_settings.photon_transport = True\n my_settings.run_mode = 'fixed source'\n source = openmc.Source()\n source.space = openmc.stats.Point((0, 0, 0))\n source.angle = openmc.stats.Isotropic()\n source.energy = openmc.stats.Discrete([1173200.0, 1332500.0], [0.5, 0.5])\n source.particle = 'photon'\n my_settings.source = source\n phantom_volume = math.pi * math.pow(10.782, 2) * 169.75\n energy_bins_p, dose_coeffs_p = openmc.data.dose_coefficients(particle=\n 'photon', geometry='AP')\n energy_function_filter_p = openmc.EnergyFunctionFilter(energy_bins_p,\n dose_coeffs_p)\n energy_function_filter_p.interpolation = 'cubic'\n photon_particle_filter = openmc.ParticleFilter('photon')\n cell_filter = openmc.CellFilter(phantom_cell)\n dose_cell_tally = openmc.Tally(name='photon_dose_on_cell')\n dose_cell_tally.filters = [cell_filter, photon_particle_filter,\n energy_function_filter_p]\n dose_cell_tally.scores = ['flux']\n my_tallies = openmc.Tallies([dose_cell_tally])\n model = openmc.Model(my_geometry, my_materials, my_settings, my_tallies)\n statepoint_filename = model.run()\n with openmc.StatePoint(statepoint_filename) as statepoint:\n photon_tally_result = statepoint.get_tally(name='photon_dose_on_cell'\n ).mean.flatten()[0]\n photons_per_second = 740000000000\n dose = photon_tally_result * photons_per_second\n dose = dose / phantom_volume\n dose = dose * 1e-06\n dose = dose * 60 * 60\n all_dose.append(dose)\nplt.plot(distances_to_simulate, all_dose, label='dose on phantom')\nplt.xlabel('Distance between photon source and phantom')\nplt.ylabel('Dose [uSv per hour]')\nplt.title('Dose on a phantom as a function of distance from a Co60 source\\n')\nplt.yscale('log')\nplt.grid(True)\nplt.show()\n",
"step-3": "<mask token>\nmat_tissue = openmc.Material()\nmat_tissue.add_element('O', 0.079013)\nmat_tissue.add_element('C', 0.32948)\nmat_tissue.add_element('H', 0.546359)\nmat_tissue.add_element('N', 0.008619)\nmat_tissue.add_element('Mg', 0.036358)\nmat_tissue.add_element('Cl', 0.000172)\nmat_tissue.set_density('g/cm3', 1.0)\nmat_air = openmc.Material()\nmat_air.add_element('C', 0.00015)\nmat_air.add_element('N', 0.784431)\nmat_air.add_element('O', 0.210748)\nmat_air.add_element('Ar', 0.004671)\nmat_air.set_density('g/cm3', 0.001205)\nmy_materials = openmc.Materials([mat_tissue, mat_air])\nall_dose = []\ndistances_to_simulate = [50, 1000, 2000, 4000, 6000]\nfor distance_from_source in distances_to_simulate:\n cylinder_surface = openmc.ZCylinder(r=10.782, x0=distance_from_source)\n phantom_upper_surface = openmc.ZPlane(z0=169.75)\n phantom_lower_surface = openmc.ZPlane(z0=0)\n outer_surface = openmc.Sphere(r=10000000, boundary_type='vacuum')\n phantom_region = (-cylinder_surface & -phantom_upper_surface & +\n phantom_lower_surface)\n void_region = -outer_surface & ~phantom_region\n void_cell = openmc.Cell(region=void_region)\n void_cell.fill = mat_air\n phantom_cell = openmc.Cell(region=phantom_region)\n phantom_cell.fill = mat_tissue\n my_geometry = openmc.Geometry([phantom_cell, void_cell])\n my_settings = openmc.Settings()\n my_settings.output = {'tallies': False}\n my_settings.batches = 2\n my_settings.inactive = 0\n my_settings.particles = 500000\n my_settings.photon_transport = True\n my_settings.run_mode = 'fixed source'\n source = openmc.Source()\n source.space = openmc.stats.Point((0, 0, 0))\n source.angle = openmc.stats.Isotropic()\n source.energy = openmc.stats.Discrete([1173200.0, 1332500.0], [0.5, 0.5])\n source.particle = 'photon'\n my_settings.source = source\n phantom_volume = math.pi * math.pow(10.782, 2) * 169.75\n energy_bins_p, dose_coeffs_p = openmc.data.dose_coefficients(particle=\n 'photon', geometry='AP')\n energy_function_filter_p = openmc.EnergyFunctionFilter(energy_bins_p,\n dose_coeffs_p)\n energy_function_filter_p.interpolation = 'cubic'\n photon_particle_filter = openmc.ParticleFilter('photon')\n cell_filter = openmc.CellFilter(phantom_cell)\n dose_cell_tally = openmc.Tally(name='photon_dose_on_cell')\n dose_cell_tally.filters = [cell_filter, photon_particle_filter,\n energy_function_filter_p]\n dose_cell_tally.scores = ['flux']\n my_tallies = openmc.Tallies([dose_cell_tally])\n model = openmc.Model(my_geometry, my_materials, my_settings, my_tallies)\n statepoint_filename = model.run()\n with openmc.StatePoint(statepoint_filename) as statepoint:\n photon_tally_result = statepoint.get_tally(name='photon_dose_on_cell'\n ).mean.flatten()[0]\n photons_per_second = 740000000000\n dose = photon_tally_result * photons_per_second\n dose = dose / phantom_volume\n dose = dose * 1e-06\n dose = dose * 60 * 60\n all_dose.append(dose)\nplt.plot(distances_to_simulate, all_dose, label='dose on phantom')\nplt.xlabel('Distance between photon source and phantom')\nplt.ylabel('Dose [uSv per hour]')\nplt.title('Dose on a phantom as a function of distance from a Co60 source\\n')\nplt.yscale('log')\nplt.grid(True)\nplt.show()\n",
"step-4": "import openmc\nimport math\nimport matplotlib.pyplot as plt\nmat_tissue = openmc.Material()\nmat_tissue.add_element('O', 0.079013)\nmat_tissue.add_element('C', 0.32948)\nmat_tissue.add_element('H', 0.546359)\nmat_tissue.add_element('N', 0.008619)\nmat_tissue.add_element('Mg', 0.036358)\nmat_tissue.add_element('Cl', 0.000172)\nmat_tissue.set_density('g/cm3', 1.0)\nmat_air = openmc.Material()\nmat_air.add_element('C', 0.00015)\nmat_air.add_element('N', 0.784431)\nmat_air.add_element('O', 0.210748)\nmat_air.add_element('Ar', 0.004671)\nmat_air.set_density('g/cm3', 0.001205)\nmy_materials = openmc.Materials([mat_tissue, mat_air])\nall_dose = []\ndistances_to_simulate = [50, 1000, 2000, 4000, 6000]\nfor distance_from_source in distances_to_simulate:\n cylinder_surface = openmc.ZCylinder(r=10.782, x0=distance_from_source)\n phantom_upper_surface = openmc.ZPlane(z0=169.75)\n phantom_lower_surface = openmc.ZPlane(z0=0)\n outer_surface = openmc.Sphere(r=10000000, boundary_type='vacuum')\n phantom_region = (-cylinder_surface & -phantom_upper_surface & +\n phantom_lower_surface)\n void_region = -outer_surface & ~phantom_region\n void_cell = openmc.Cell(region=void_region)\n void_cell.fill = mat_air\n phantom_cell = openmc.Cell(region=phantom_region)\n phantom_cell.fill = mat_tissue\n my_geometry = openmc.Geometry([phantom_cell, void_cell])\n my_settings = openmc.Settings()\n my_settings.output = {'tallies': False}\n my_settings.batches = 2\n my_settings.inactive = 0\n my_settings.particles = 500000\n my_settings.photon_transport = True\n my_settings.run_mode = 'fixed source'\n source = openmc.Source()\n source.space = openmc.stats.Point((0, 0, 0))\n source.angle = openmc.stats.Isotropic()\n source.energy = openmc.stats.Discrete([1173200.0, 1332500.0], [0.5, 0.5])\n source.particle = 'photon'\n my_settings.source = source\n phantom_volume = math.pi * math.pow(10.782, 2) * 169.75\n energy_bins_p, dose_coeffs_p = openmc.data.dose_coefficients(particle=\n 'photon', geometry='AP')\n energy_function_filter_p = openmc.EnergyFunctionFilter(energy_bins_p,\n dose_coeffs_p)\n energy_function_filter_p.interpolation = 'cubic'\n photon_particle_filter = openmc.ParticleFilter('photon')\n cell_filter = openmc.CellFilter(phantom_cell)\n dose_cell_tally = openmc.Tally(name='photon_dose_on_cell')\n dose_cell_tally.filters = [cell_filter, photon_particle_filter,\n energy_function_filter_p]\n dose_cell_tally.scores = ['flux']\n my_tallies = openmc.Tallies([dose_cell_tally])\n model = openmc.Model(my_geometry, my_materials, my_settings, my_tallies)\n statepoint_filename = model.run()\n with openmc.StatePoint(statepoint_filename) as statepoint:\n photon_tally_result = statepoint.get_tally(name='photon_dose_on_cell'\n ).mean.flatten()[0]\n photons_per_second = 740000000000\n dose = photon_tally_result * photons_per_second\n dose = dose / phantom_volume\n dose = dose * 1e-06\n dose = dose * 60 * 60\n all_dose.append(dose)\nplt.plot(distances_to_simulate, all_dose, label='dose on phantom')\nplt.xlabel('Distance between photon source and phantom')\nplt.ylabel('Dose [uSv per hour]')\nplt.title('Dose on a phantom as a function of distance from a Co60 source\\n')\nplt.yscale('log')\nplt.grid(True)\nplt.show()\n",
"step-5": "# This simulation obtains dose on a cylindical disk phantom at various\n# distances from a 14MeV photon source. Dose in millisieverts is found\n# and compared to the yearly limit\n\n# The model is built to have a human tissue and human height and volume which\n# is typically referred to as a phantom.\n\n# source details based on https://file.scirp.org/pdf/OJMSi_2014011414370625.pdf\n\nimport openmc\nimport math\nimport matplotlib.pyplot as plt\n\n\n# Tissue Equivalent, MS20 from PNNL\nmat_tissue = openmc.Material()\nmat_tissue.add_element(\"O\", 0.079013)\nmat_tissue.add_element(\"C\", 0.32948)\nmat_tissue.add_element(\"H\", 0.546359)\nmat_tissue.add_element(\"N\", 0.008619)\nmat_tissue.add_element(\"Mg\", 0.036358)\nmat_tissue.add_element(\"Cl\", 0.000172)\nmat_tissue.set_density(\"g/cm3\", 1.0)\n\nmat_air = openmc.Material()\nmat_air.add_element(\"C\", 0.00015)\nmat_air.add_element(\"N\", 0.784431)\nmat_air.add_element(\"O\", 0.210748)\nmat_air.add_element(\"Ar\", 0.004671)\nmat_air.set_density(\"g/cm3\", 0.001205)\n\nmy_materials = openmc.Materials([mat_tissue, mat_air])\n\nall_dose = []\ndistances_to_simulate = [50, 1000, 2000, 4000, 6000]\nfor distance_from_source in distances_to_simulate: # units of cm\n\n # representing a human as a cylindrical phantom\n # average human is 62,000cm3 volume\n # average human height = 169.75\n # resulting cylinder radius = 10.782\n cylinder_surface = openmc.ZCylinder(r=10.782, x0=distance_from_source)\n phantom_upper_surface = openmc.ZPlane(z0=169.75)\n phantom_lower_surface = openmc.ZPlane(z0=0)\n\n outer_surface = openmc.Sphere(r=10000000, boundary_type=\"vacuum\")\n\n phantom_region = -cylinder_surface & -phantom_upper_surface & +phantom_lower_surface\n\n # void region is below the outer surface and not the phantom region\n void_region = -outer_surface & ~phantom_region\n\n void_cell = openmc.Cell(region=void_region)\n void_cell.fill = mat_air\n phantom_cell = openmc.Cell(region=phantom_region)\n phantom_cell.fill = mat_tissue\n\n my_geometry = openmc.Geometry([phantom_cell, void_cell])\n\n # Instantiate a Settings object\n my_settings = openmc.Settings()\n my_settings.output = {\"tallies\": False}\n my_settings.batches = 2\n my_settings.inactive = 0\n my_settings.particles = 500000\n my_settings.photon_transport = True\n my_settings.run_mode = \"fixed source\"\n\n # Create a gamma point source\n source = openmc.Source()\n source.space = openmc.stats.Point((0, 0, 0))\n source.angle = openmc.stats.Isotropic()\n # This is a Co60 source, see the task on sources to understand it\n source.energy = openmc.stats.Discrete([1.1732e6, 1.3325e6], [0.5, 0.5])\n source.particle = \"photon\"\n\n my_settings.source = source\n\n # volume of cylinder V=πr^2h\n # openmc native units for length are cm so volume is in cm3\n phantom_volume = math.pi * math.pow(10.782, 2) * 169.75\n\n # geometry argument refers to irradiation direction\n # https://academic.oup.com/view-large/figure/119655666/ncx112f01.png\n energy_bins_p, dose_coeffs_p = openmc.data.dose_coefficients(\n particle=\"photon\", geometry=\"AP\"\n )\n energy_function_filter_p = openmc.EnergyFunctionFilter(energy_bins_p, dose_coeffs_p)\n energy_function_filter_p.interpolation = \"cubic\" # cubic interpolation is recommended by ICRP\n\n photon_particle_filter = openmc.ParticleFilter(\"photon\")\n cell_filter = openmc.CellFilter(phantom_cell)\n\n # Create tally to score dose\n dose_cell_tally = openmc.Tally(name=\"photon_dose_on_cell\")\n # note that the EnergyFunctionFilter is included as a filter\n dose_cell_tally.filters = [\n cell_filter,\n photon_particle_filter,\n energy_function_filter_p,\n ]\n dose_cell_tally.scores = [\"flux\"]\n my_tallies = openmc.Tallies([dose_cell_tally])\n\n model = openmc.Model(my_geometry, my_materials, my_settings, my_tallies)\n\n statepoint_filename = model.run()\n\n with openmc.StatePoint(statepoint_filename) as statepoint:\n\n photon_tally_result = statepoint.get_tally(\n name=\"photon_dose_on_cell\"\n ).mean.flatten()[0]\n\n photons_per_second = 740000000000 # units of photons per second\n\n # converts units from pSv-cm3/source_photon to pSv-cm3/second\n dose = photon_tally_result * photons_per_second\n\n # converts from pSv-cm3/second to pSv/second\n dose = dose / phantom_volume\n\n # converts from (pico) pSv/second to (micro) uSv/second\n dose = dose * 1e-6\n\n # converts from uSv/second to uSv/hour\n dose = dose * 60 * 60\n\n all_dose.append(dose)\n\nplt.plot(distances_to_simulate, all_dose, label=\"dose on phantom\")\nplt.xlabel(\"Distance between photon source and phantom\")\nplt.ylabel(\"Dose [uSv per hour]\")\nplt.title(\"Dose on a phantom as a function of distance from a Co60 source\\n\")\nplt.yscale(\"log\")\nplt.grid(True)\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Tile(pygame.sprite.Sprite):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, sprite, x, y, surface):
super().__init__()
self.image = pygame.image.load(sprite).convert_alpha()
self.rect = self.image.get_rect()
self.x = x
self.y = y
self.parentSurface = surface
self.parentSurface.tileGroup.add(self)
def update(self):
self.parentSurface.surface.blit(self.image, (self.x, self.y))
class Dirt(Tile):
def __init__(self, x, y, surface):
spriteVariant = randint(1, 3)
super().__init__('./assets/dirt0' + str(spriteVariant) + '.png', x,
y, surface)
class Air(Tile):
def __init__(self, x, y, surface):
super().__init__('./assets/air.png', x, y, surface)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TileSurface:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
self.surface = pygame.Surface((width, height))
def updatePos(self, x, y):
self.x = self.x
self.y = self.y
def generateTiles(self):
tiles = []
x = 0
y = 368
for i in range(0, 150):
row = []
for j in range(0, 150):
newTile = Dirt(x, y, self)
newTile.rect.x = x
newTile.rect.y = y
row.append(newTile)
x += 16
x = 0
y += 16
tiles.append(row)
self.tileGrid = tiles
def drawTiles(self):
for i in range(0, len(self.tileGrid)):
for j in range(0, len(self.tileGrid[i])):
self.tileGrid[i][j].update()
class Tile(pygame.sprite.Sprite):
x = 0
y = 0
def __init__(self, sprite, x, y, surface):
super().__init__()
self.image = pygame.image.load(sprite).convert_alpha()
self.rect = self.image.get_rect()
self.x = x
self.y = y
self.parentSurface = surface
self.parentSurface.tileGroup.add(self)
def update(self):
self.parentSurface.surface.blit(self.image, (self.x, self.y))
class Dirt(Tile):
def __init__(self, x, y, surface):
spriteVariant = randint(1, 3)
super().__init__('./assets/dirt0' + str(spriteVariant) + '.png', x,
y, surface)
class Air(Tile):
def __init__(self, x, y, surface):
super().__init__('./assets/air.png', x, y, surface)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TileSurface:
tileGroup = pygame.sprite.Group()
tileGrid = []
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
self.surface = pygame.Surface((width, height))
def updatePos(self, x, y):
self.x = self.x
self.y = self.y
def generateTiles(self):
tiles = []
x = 0
y = 368
for i in range(0, 150):
row = []
for j in range(0, 150):
newTile = Dirt(x, y, self)
newTile.rect.x = x
newTile.rect.y = y
row.append(newTile)
x += 16
x = 0
y += 16
tiles.append(row)
self.tileGrid = tiles
def drawTiles(self):
for i in range(0, len(self.tileGrid)):
for j in range(0, len(self.tileGrid[i])):
self.tileGrid[i][j].update()
class Tile(pygame.sprite.Sprite):
x = 0
y = 0
def __init__(self, sprite, x, y, surface):
super().__init__()
self.image = pygame.image.load(sprite).convert_alpha()
self.rect = self.image.get_rect()
self.x = x
self.y = y
self.parentSurface = surface
self.parentSurface.tileGroup.add(self)
def update(self):
self.parentSurface.surface.blit(self.image, (self.x, self.y))
class Dirt(Tile):
def __init__(self, x, y, surface):
spriteVariant = randint(1, 3)
super().__init__('./assets/dirt0' + str(spriteVariant) + '.png', x,
y, surface)
class Air(Tile):
def __init__(self, x, y, surface):
super().__init__('./assets/air.png', x, y, surface)
<|reserved_special_token_1|>
import pygame
import utils
from random import randint
class TileSurface:
tileGroup = pygame.sprite.Group()
tileGrid = []
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
self.surface = pygame.Surface((width, height))
def updatePos(self, x, y):
self.x = self.x
self.y = self.y
def generateTiles(self):
tiles = []
x = 0
y = 368
for i in range(0, 150):
row = []
for j in range(0, 150):
newTile = Dirt(x, y, self)
newTile.rect.x = x
newTile.rect.y = y
row.append(newTile)
x += 16
x = 0
y += 16
tiles.append(row)
self.tileGrid = tiles
def drawTiles(self):
for i in range(0, len(self.tileGrid)):
for j in range(0, len(self.tileGrid[i])):
self.tileGrid[i][j].update()
class Tile(pygame.sprite.Sprite):
x = 0
y = 0
def __init__(self, sprite, x, y, surface):
super().__init__()
self.image = pygame.image.load(sprite).convert_alpha()
self.rect = self.image.get_rect()
self.x = x
self.y = y
self.parentSurface = surface
self.parentSurface.tileGroup.add(self)
def update(self):
self.parentSurface.surface.blit(self.image, (self.x, self.y))
class Dirt(Tile):
def __init__(self, x, y, surface):
spriteVariant = randint(1, 3)
super().__init__('./assets/dirt0' + str(spriteVariant) + '.png', x,
y, surface)
class Air(Tile):
def __init__(self, x, y, surface):
super().__init__('./assets/air.png', x, y, surface)
<|reserved_special_token_1|>
import pygame
import utils
from random import randint
class TileSurface():
tileGroup = pygame.sprite.Group()
tileGrid = []
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
self.surface = pygame.Surface((width, height))
def updatePos(self, x, y):
self.x = self.x
self.y = self.y
def generateTiles(self):
tiles = []
x = 0
y = 368
for i in range(0, 150):
row = []
for j in range(0, 150):
newTile = Dirt(x, y, self)
newTile.rect.x = x
newTile.rect.y = y
row.append(newTile)
x += 16
x = 0
y += 16
tiles.append(row)
self.tileGrid = tiles
def drawTiles(self):
for i in range(0, len(self.tileGrid)):
for j in range(0, len(self.tileGrid[i])):
self.tileGrid[i][j].update()
class Tile(pygame.sprite.Sprite):
x = 0
y = 0
def __init__(self, sprite, x, y, surface):
# Call pygame sprite init method
super().__init__()
self.image = pygame.image.load(sprite).convert_alpha() #load a sprite image
self.rect = self.image.get_rect() # set collision rectangle
self.x = x
self.y = y
self.parentSurface = surface
self.parentSurface.tileGroup.add(self)
def update(self):
self.parentSurface.surface.blit(self.image, (self.x, self.y))
class Dirt(Tile):
def __init__(self, x, y, surface):
spriteVariant = randint(1, 3)
super().__init__("./assets/dirt0" + str(spriteVariant) + ".png", x, y, surface)
class Air(Tile):
def __init__(self, x, y, surface):
super().__init__("./assets/air.png", x, y, surface)
|
flexible
|
{
"blob_id": "0c8eb90c1d8a58f54186a30ce98a67310955a367",
"index": 3024,
"step-1": "<mask token>\n\n\nclass Tile(pygame.sprite.Sprite):\n <mask token>\n <mask token>\n\n def __init__(self, sprite, x, y, surface):\n super().__init__()\n self.image = pygame.image.load(sprite).convert_alpha()\n self.rect = self.image.get_rect()\n self.x = x\n self.y = y\n self.parentSurface = surface\n self.parentSurface.tileGroup.add(self)\n\n def update(self):\n self.parentSurface.surface.blit(self.image, (self.x, self.y))\n\n\nclass Dirt(Tile):\n\n def __init__(self, x, y, surface):\n spriteVariant = randint(1, 3)\n super().__init__('./assets/dirt0' + str(spriteVariant) + '.png', x,\n y, surface)\n\n\nclass Air(Tile):\n\n def __init__(self, x, y, surface):\n super().__init__('./assets/air.png', x, y, surface)\n",
"step-2": "<mask token>\n\n\nclass TileSurface:\n <mask token>\n <mask token>\n\n def __init__(self, x, y, width, height):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.surface = pygame.Surface((width, height))\n\n def updatePos(self, x, y):\n self.x = self.x\n self.y = self.y\n\n def generateTiles(self):\n tiles = []\n x = 0\n y = 368\n for i in range(0, 150):\n row = []\n for j in range(0, 150):\n newTile = Dirt(x, y, self)\n newTile.rect.x = x\n newTile.rect.y = y\n row.append(newTile)\n x += 16\n x = 0\n y += 16\n tiles.append(row)\n self.tileGrid = tiles\n\n def drawTiles(self):\n for i in range(0, len(self.tileGrid)):\n for j in range(0, len(self.tileGrid[i])):\n self.tileGrid[i][j].update()\n\n\nclass Tile(pygame.sprite.Sprite):\n x = 0\n y = 0\n\n def __init__(self, sprite, x, y, surface):\n super().__init__()\n self.image = pygame.image.load(sprite).convert_alpha()\n self.rect = self.image.get_rect()\n self.x = x\n self.y = y\n self.parentSurface = surface\n self.parentSurface.tileGroup.add(self)\n\n def update(self):\n self.parentSurface.surface.blit(self.image, (self.x, self.y))\n\n\nclass Dirt(Tile):\n\n def __init__(self, x, y, surface):\n spriteVariant = randint(1, 3)\n super().__init__('./assets/dirt0' + str(spriteVariant) + '.png', x,\n y, surface)\n\n\nclass Air(Tile):\n\n def __init__(self, x, y, surface):\n super().__init__('./assets/air.png', x, y, surface)\n",
"step-3": "<mask token>\n\n\nclass TileSurface:\n tileGroup = pygame.sprite.Group()\n tileGrid = []\n\n def __init__(self, x, y, width, height):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.surface = pygame.Surface((width, height))\n\n def updatePos(self, x, y):\n self.x = self.x\n self.y = self.y\n\n def generateTiles(self):\n tiles = []\n x = 0\n y = 368\n for i in range(0, 150):\n row = []\n for j in range(0, 150):\n newTile = Dirt(x, y, self)\n newTile.rect.x = x\n newTile.rect.y = y\n row.append(newTile)\n x += 16\n x = 0\n y += 16\n tiles.append(row)\n self.tileGrid = tiles\n\n def drawTiles(self):\n for i in range(0, len(self.tileGrid)):\n for j in range(0, len(self.tileGrid[i])):\n self.tileGrid[i][j].update()\n\n\nclass Tile(pygame.sprite.Sprite):\n x = 0\n y = 0\n\n def __init__(self, sprite, x, y, surface):\n super().__init__()\n self.image = pygame.image.load(sprite).convert_alpha()\n self.rect = self.image.get_rect()\n self.x = x\n self.y = y\n self.parentSurface = surface\n self.parentSurface.tileGroup.add(self)\n\n def update(self):\n self.parentSurface.surface.blit(self.image, (self.x, self.y))\n\n\nclass Dirt(Tile):\n\n def __init__(self, x, y, surface):\n spriteVariant = randint(1, 3)\n super().__init__('./assets/dirt0' + str(spriteVariant) + '.png', x,\n y, surface)\n\n\nclass Air(Tile):\n\n def __init__(self, x, y, surface):\n super().__init__('./assets/air.png', x, y, surface)\n",
"step-4": "import pygame\nimport utils\nfrom random import randint\n\n\nclass TileSurface:\n tileGroup = pygame.sprite.Group()\n tileGrid = []\n\n def __init__(self, x, y, width, height):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.surface = pygame.Surface((width, height))\n\n def updatePos(self, x, y):\n self.x = self.x\n self.y = self.y\n\n def generateTiles(self):\n tiles = []\n x = 0\n y = 368\n for i in range(0, 150):\n row = []\n for j in range(0, 150):\n newTile = Dirt(x, y, self)\n newTile.rect.x = x\n newTile.rect.y = y\n row.append(newTile)\n x += 16\n x = 0\n y += 16\n tiles.append(row)\n self.tileGrid = tiles\n\n def drawTiles(self):\n for i in range(0, len(self.tileGrid)):\n for j in range(0, len(self.tileGrid[i])):\n self.tileGrid[i][j].update()\n\n\nclass Tile(pygame.sprite.Sprite):\n x = 0\n y = 0\n\n def __init__(self, sprite, x, y, surface):\n super().__init__()\n self.image = pygame.image.load(sprite).convert_alpha()\n self.rect = self.image.get_rect()\n self.x = x\n self.y = y\n self.parentSurface = surface\n self.parentSurface.tileGroup.add(self)\n\n def update(self):\n self.parentSurface.surface.blit(self.image, (self.x, self.y))\n\n\nclass Dirt(Tile):\n\n def __init__(self, x, y, surface):\n spriteVariant = randint(1, 3)\n super().__init__('./assets/dirt0' + str(spriteVariant) + '.png', x,\n y, surface)\n\n\nclass Air(Tile):\n\n def __init__(self, x, y, surface):\n super().__init__('./assets/air.png', x, y, surface)\n",
"step-5": "import pygame\nimport utils\nfrom random import randint\n\nclass TileSurface():\n\n\ttileGroup = pygame.sprite.Group()\n\n\ttileGrid = []\n\n\tdef __init__(self, x, y, width, height):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.width = width\n\t\tself.height = height\n\t\tself.surface = pygame.Surface((width, height))\n\n\tdef updatePos(self, x, y):\n\t\tself.x = self.x\n\t\tself.y = self.y\n\n\tdef generateTiles(self):\n\t\ttiles = []\n\t\tx = 0\n\t\ty = 368\n\n\t\tfor i in range(0, 150):\n\t\t\trow = []\n\t\t\tfor j in range(0, 150):\n\t\t\t\tnewTile = Dirt(x, y, self)\n\t\t\t\tnewTile.rect.x = x\n\t\t\t\tnewTile.rect.y = y\n\t\t\t\trow.append(newTile)\n\t\t\t\tx += 16\n\t\t\tx = 0\n\t\t\ty += 16\n\t\t\ttiles.append(row)\n\n\t\tself.tileGrid = tiles\n\n\tdef drawTiles(self):\n\t\tfor i in range(0, len(self.tileGrid)):\n\t\t\tfor j in range(0, len(self.tileGrid[i])):\n\t\t\t\tself.tileGrid[i][j].update()\n\n\n\n\nclass Tile(pygame.sprite.Sprite):\n\tx = 0\n\ty = 0\n\n\tdef __init__(self, sprite, x, y, surface):\n\t\t# Call pygame sprite init method\n\t\tsuper().__init__()\n\t\tself.image = pygame.image.load(sprite).convert_alpha() #load a sprite image\n\t\tself.rect = self.image.get_rect() # set collision rectangle\t\t\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.parentSurface = surface\n\n\t\tself.parentSurface.tileGroup.add(self)\n\n\tdef update(self):\n\t\tself.parentSurface.surface.blit(self.image, (self.x, self.y))\t\t\t\n\n\n\nclass Dirt(Tile):\n\tdef __init__(self, x, y, surface):\n\t\tspriteVariant = randint(1, 3)\n\t\tsuper().__init__(\"./assets/dirt0\" + str(spriteVariant) + \".png\", x, y, surface)\n\nclass Air(Tile):\n\tdef __init__(self, x, y, surface):\n\t\tsuper().__init__(\"./assets/air.png\", x, y, surface)",
"step-ids": [
7,
13,
14,
15,
16
]
}
|
[
7,
13,
14,
15,
16
] |
<|reserved_special_token_0|>
def _get_site_name(f, i):
data_file = f + '\\' + 'new_desc_sele_data.csv'
site_name = pd.read_csv(data_file)['SITE_ID'][i]
return site_name
<|reserved_special_token_0|>
def _get_version_res_folder(f, version, site_name=None, i=None):
import os
version_folder = f + '\\' + version
if i:
site_name = _get_site_name(f, i)
elif site_name:
site_name = site_name
if os.path.exists(version_folder):
site_version_res_folder = version_folder + '\\' + site_name
if os.path.exists(site_version_res_folder):
return site_version_res_folder
else:
os.mkdir(site_version_res_folder)
return site_version_res_folder
def _get_site_folder(f, i=None, feature_name=None):
data_file = f + '\\' + 'new_desc_sele_data_origin.csv'
data_content = pd.read_csv(data_file)
print(feature_name)
if type(i) is int:
site_path = data_content['SITE_PATH'][i]
return site_path
elif type(feature_name) is str:
site_path = data_content['SITE_PATH'][data_content['SITE_ID'] ==
feature_name].values[0]
return site_path
else:
print('lack of index or feature_name.')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _get_site_name(f, i):
data_file = f + '\\' + 'new_desc_sele_data.csv'
site_name = pd.read_csv(data_file)['SITE_ID'][i]
return site_name
def _get_site_DD_dataset_csv(f, i):
"""获取经过全部数据集(经过全部的特征选择)"""
site_path = _get_site_folder(f, i)
data_path = site_path + '\\data_confirm.csv'
data = pd.read_csv(data_path)
return data
<|reserved_special_token_0|>
def _get_version_res_folder(f, version, site_name=None, i=None):
import os
version_folder = f + '\\' + version
if i:
site_name = _get_site_name(f, i)
elif site_name:
site_name = site_name
if os.path.exists(version_folder):
site_version_res_folder = version_folder + '\\' + site_name
if os.path.exists(site_version_res_folder):
return site_version_res_folder
else:
os.mkdir(site_version_res_folder)
return site_version_res_folder
def _get_site_folder(f, i=None, feature_name=None):
data_file = f + '\\' + 'new_desc_sele_data_origin.csv'
data_content = pd.read_csv(data_file)
print(feature_name)
if type(i) is int:
site_path = data_content['SITE_PATH'][i]
return site_path
elif type(feature_name) is str:
site_path = data_content['SITE_PATH'][data_content['SITE_ID'] ==
feature_name].values[0]
return site_path
else:
print('lack of index or feature_name.')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _get_site_name(f, i):
data_file = f + '\\' + 'new_desc_sele_data.csv'
site_name = pd.read_csv(data_file)['SITE_ID'][i]
return site_name
def _get_site_DD_dataset_csv(f, i):
"""获取经过全部数据集(经过全部的特征选择)"""
site_path = _get_site_folder(f, i)
data_path = site_path + '\\data_confirm.csv'
data = pd.read_csv(data_path)
return data
def _get_site_IGBP(f, i):
data_file = f + '\\' + 'new_desc_sele_data_origin.csv'
site_IGBP = pd.read_csv(data_file)['IGBP'][i]
return site_IGBP
def _get_site_feature_ale(f, i, feauture):
site_path = _get_site_folder(f, i)
prefix = 'ale_1_'
if type(feauture) is str:
ale_path = site_path + '\\' + prefix + feauture + '.csv'
ale_data = pd.read_csv(ale_path)
return ale_data
def _get_version_res_folder(f, version, site_name=None, i=None):
import os
version_folder = f + '\\' + version
if i:
site_name = _get_site_name(f, i)
elif site_name:
site_name = site_name
if os.path.exists(version_folder):
site_version_res_folder = version_folder + '\\' + site_name
if os.path.exists(site_version_res_folder):
return site_version_res_folder
else:
os.mkdir(site_version_res_folder)
return site_version_res_folder
def _get_site_folder(f, i=None, feature_name=None):
data_file = f + '\\' + 'new_desc_sele_data_origin.csv'
data_content = pd.read_csv(data_file)
print(feature_name)
if type(i) is int:
site_path = data_content['SITE_PATH'][i]
return site_path
elif type(feature_name) is str:
site_path = data_content['SITE_PATH'][data_content['SITE_ID'] ==
feature_name].values[0]
return site_path
else:
print('lack of index or feature_name.')
<|reserved_special_token_1|>
import pandas as pd
def _get_site_name(f, i):
data_file = f + '\\' + 'new_desc_sele_data.csv'
site_name = pd.read_csv(data_file)['SITE_ID'][i]
return site_name
def _get_site_DD_dataset_csv(f, i):
"""获取经过全部数据集(经过全部的特征选择)"""
site_path = _get_site_folder(f, i)
data_path = site_path + '\\data_confirm.csv'
data = pd.read_csv(data_path)
return data
def _get_site_IGBP(f, i):
data_file = f + '\\' + 'new_desc_sele_data_origin.csv'
site_IGBP = pd.read_csv(data_file)['IGBP'][i]
return site_IGBP
def _get_site_feature_ale(f, i, feauture):
site_path = _get_site_folder(f, i)
prefix = 'ale_1_'
if type(feauture) is str:
ale_path = site_path + '\\' + prefix + feauture + '.csv'
ale_data = pd.read_csv(ale_path)
return ale_data
def _get_version_res_folder(f, version, site_name=None, i=None):
import os
version_folder = f + '\\' + version
if i:
site_name = _get_site_name(f, i)
elif site_name:
site_name = site_name
if os.path.exists(version_folder):
site_version_res_folder = version_folder + '\\' + site_name
if os.path.exists(site_version_res_folder):
return site_version_res_folder
else:
os.mkdir(site_version_res_folder)
return site_version_res_folder
def _get_site_folder(f, i=None, feature_name=None):
data_file = f + '\\' + 'new_desc_sele_data_origin.csv'
data_content = pd.read_csv(data_file)
print(feature_name)
if type(i) is int:
site_path = data_content['SITE_PATH'][i]
return site_path
elif type(feature_name) is str:
site_path = data_content['SITE_PATH'][data_content['SITE_ID'] ==
feature_name].values[0]
return site_path
else:
print('lack of index or feature_name.')
<|reserved_special_token_1|>
import pandas as pd
def _get_site_name(f,i):
data_file = f +"\\"+"new_desc_sele_data.csv"
site_name=pd.read_csv(data_file)["SITE_ID"][i]
return site_name
def _get_site_DD_dataset_csv(f,i):
'''获取经过全部数据集(经过全部的特征选择)'''
site_path=_get_site_folder(f,i)
data_path=site_path+"\\data_confirm.csv"
data=pd.read_csv(data_path)
return data
def _get_site_IGBP(f,i):
data_file = f +"\\"+"new_desc_sele_data_origin.csv"
site_IGBP=pd.read_csv(data_file)["IGBP"][i]
return site_IGBP
def _get_site_feature_ale(f,i,feauture):
site_path=_get_site_folder(f,i)
prefix="ale_1_"
if type(feauture) is str:
ale_path=site_path+"\\"+prefix+feauture+".csv"
ale_data=pd.read_csv(ale_path)
return ale_data
def _get_version_res_folder(f,version,site_name=None,i=None):
import os
version_folder=f+"\\"+version
if i:
site_name=_get_site_name(f,i)
elif site_name:
site_name = site_name
if os.path.exists(version_folder):
site_version_res_folder=version_folder+"\\"+site_name
if os.path.exists(site_version_res_folder):
return site_version_res_folder
else:
os.mkdir(site_version_res_folder)
return site_version_res_folder
def _get_site_folder(f,i=None,feature_name=None):
data_file = f + "\\" + "new_desc_sele_data_origin.csv"
data_content = pd.read_csv(data_file)
print(feature_name)
if type(i) is int:
site_path=data_content["SITE_PATH"][i]
return site_path
elif type(feature_name) is str:
site_path = data_content["SITE_PATH"][data_content["SITE_ID"]==feature_name].values[0]
return site_path
else:
print("lack of index or feature_name.")
|
flexible
|
{
"blob_id": "c034fba0b9204545b00ba972a17e63cf9c20854e",
"index": 3930,
"step-1": "<mask token>\n\n\ndef _get_site_name(f, i):\n data_file = f + '\\\\' + 'new_desc_sele_data.csv'\n site_name = pd.read_csv(data_file)['SITE_ID'][i]\n return site_name\n\n\n<mask token>\n\n\ndef _get_version_res_folder(f, version, site_name=None, i=None):\n import os\n version_folder = f + '\\\\' + version\n if i:\n site_name = _get_site_name(f, i)\n elif site_name:\n site_name = site_name\n if os.path.exists(version_folder):\n site_version_res_folder = version_folder + '\\\\' + site_name\n if os.path.exists(site_version_res_folder):\n return site_version_res_folder\n else:\n os.mkdir(site_version_res_folder)\n return site_version_res_folder\n\n\ndef _get_site_folder(f, i=None, feature_name=None):\n data_file = f + '\\\\' + 'new_desc_sele_data_origin.csv'\n data_content = pd.read_csv(data_file)\n print(feature_name)\n if type(i) is int:\n site_path = data_content['SITE_PATH'][i]\n return site_path\n elif type(feature_name) is str:\n site_path = data_content['SITE_PATH'][data_content['SITE_ID'] ==\n feature_name].values[0]\n return site_path\n else:\n print('lack of index or feature_name.')\n",
"step-2": "<mask token>\n\n\ndef _get_site_name(f, i):\n data_file = f + '\\\\' + 'new_desc_sele_data.csv'\n site_name = pd.read_csv(data_file)['SITE_ID'][i]\n return site_name\n\n\ndef _get_site_DD_dataset_csv(f, i):\n \"\"\"获取经过全部数据集(经过全部的特征选择)\"\"\"\n site_path = _get_site_folder(f, i)\n data_path = site_path + '\\\\data_confirm.csv'\n data = pd.read_csv(data_path)\n return data\n\n\n<mask token>\n\n\ndef _get_version_res_folder(f, version, site_name=None, i=None):\n import os\n version_folder = f + '\\\\' + version\n if i:\n site_name = _get_site_name(f, i)\n elif site_name:\n site_name = site_name\n if os.path.exists(version_folder):\n site_version_res_folder = version_folder + '\\\\' + site_name\n if os.path.exists(site_version_res_folder):\n return site_version_res_folder\n else:\n os.mkdir(site_version_res_folder)\n return site_version_res_folder\n\n\ndef _get_site_folder(f, i=None, feature_name=None):\n data_file = f + '\\\\' + 'new_desc_sele_data_origin.csv'\n data_content = pd.read_csv(data_file)\n print(feature_name)\n if type(i) is int:\n site_path = data_content['SITE_PATH'][i]\n return site_path\n elif type(feature_name) is str:\n site_path = data_content['SITE_PATH'][data_content['SITE_ID'] ==\n feature_name].values[0]\n return site_path\n else:\n print('lack of index or feature_name.')\n",
"step-3": "<mask token>\n\n\ndef _get_site_name(f, i):\n data_file = f + '\\\\' + 'new_desc_sele_data.csv'\n site_name = pd.read_csv(data_file)['SITE_ID'][i]\n return site_name\n\n\ndef _get_site_DD_dataset_csv(f, i):\n \"\"\"获取经过全部数据集(经过全部的特征选择)\"\"\"\n site_path = _get_site_folder(f, i)\n data_path = site_path + '\\\\data_confirm.csv'\n data = pd.read_csv(data_path)\n return data\n\n\ndef _get_site_IGBP(f, i):\n data_file = f + '\\\\' + 'new_desc_sele_data_origin.csv'\n site_IGBP = pd.read_csv(data_file)['IGBP'][i]\n return site_IGBP\n\n\ndef _get_site_feature_ale(f, i, feauture):\n site_path = _get_site_folder(f, i)\n prefix = 'ale_1_'\n if type(feauture) is str:\n ale_path = site_path + '\\\\' + prefix + feauture + '.csv'\n ale_data = pd.read_csv(ale_path)\n return ale_data\n\n\ndef _get_version_res_folder(f, version, site_name=None, i=None):\n import os\n version_folder = f + '\\\\' + version\n if i:\n site_name = _get_site_name(f, i)\n elif site_name:\n site_name = site_name\n if os.path.exists(version_folder):\n site_version_res_folder = version_folder + '\\\\' + site_name\n if os.path.exists(site_version_res_folder):\n return site_version_res_folder\n else:\n os.mkdir(site_version_res_folder)\n return site_version_res_folder\n\n\ndef _get_site_folder(f, i=None, feature_name=None):\n data_file = f + '\\\\' + 'new_desc_sele_data_origin.csv'\n data_content = pd.read_csv(data_file)\n print(feature_name)\n if type(i) is int:\n site_path = data_content['SITE_PATH'][i]\n return site_path\n elif type(feature_name) is str:\n site_path = data_content['SITE_PATH'][data_content['SITE_ID'] ==\n feature_name].values[0]\n return site_path\n else:\n print('lack of index or feature_name.')\n",
"step-4": "import pandas as pd\n\n\ndef _get_site_name(f, i):\n data_file = f + '\\\\' + 'new_desc_sele_data.csv'\n site_name = pd.read_csv(data_file)['SITE_ID'][i]\n return site_name\n\n\ndef _get_site_DD_dataset_csv(f, i):\n \"\"\"获取经过全部数据集(经过全部的特征选择)\"\"\"\n site_path = _get_site_folder(f, i)\n data_path = site_path + '\\\\data_confirm.csv'\n data = pd.read_csv(data_path)\n return data\n\n\ndef _get_site_IGBP(f, i):\n data_file = f + '\\\\' + 'new_desc_sele_data_origin.csv'\n site_IGBP = pd.read_csv(data_file)['IGBP'][i]\n return site_IGBP\n\n\ndef _get_site_feature_ale(f, i, feauture):\n site_path = _get_site_folder(f, i)\n prefix = 'ale_1_'\n if type(feauture) is str:\n ale_path = site_path + '\\\\' + prefix + feauture + '.csv'\n ale_data = pd.read_csv(ale_path)\n return ale_data\n\n\ndef _get_version_res_folder(f, version, site_name=None, i=None):\n import os\n version_folder = f + '\\\\' + version\n if i:\n site_name = _get_site_name(f, i)\n elif site_name:\n site_name = site_name\n if os.path.exists(version_folder):\n site_version_res_folder = version_folder + '\\\\' + site_name\n if os.path.exists(site_version_res_folder):\n return site_version_res_folder\n else:\n os.mkdir(site_version_res_folder)\n return site_version_res_folder\n\n\ndef _get_site_folder(f, i=None, feature_name=None):\n data_file = f + '\\\\' + 'new_desc_sele_data_origin.csv'\n data_content = pd.read_csv(data_file)\n print(feature_name)\n if type(i) is int:\n site_path = data_content['SITE_PATH'][i]\n return site_path\n elif type(feature_name) is str:\n site_path = data_content['SITE_PATH'][data_content['SITE_ID'] ==\n feature_name].values[0]\n return site_path\n else:\n print('lack of index or feature_name.')\n",
"step-5": "import pandas as pd\n\n\ndef _get_site_name(f,i):\n data_file = f +\"\\\\\"+\"new_desc_sele_data.csv\"\n site_name=pd.read_csv(data_file)[\"SITE_ID\"][i]\n return site_name\n\ndef _get_site_DD_dataset_csv(f,i):\n '''获取经过全部数据集(经过全部的特征选择)'''\n site_path=_get_site_folder(f,i)\n data_path=site_path+\"\\\\data_confirm.csv\"\n data=pd.read_csv(data_path)\n return data\n\n\ndef _get_site_IGBP(f,i):\n data_file = f +\"\\\\\"+\"new_desc_sele_data_origin.csv\"\n site_IGBP=pd.read_csv(data_file)[\"IGBP\"][i]\n return site_IGBP\n\ndef _get_site_feature_ale(f,i,feauture):\n site_path=_get_site_folder(f,i)\n prefix=\"ale_1_\"\n if type(feauture) is str:\n ale_path=site_path+\"\\\\\"+prefix+feauture+\".csv\"\n ale_data=pd.read_csv(ale_path)\n return ale_data\n\ndef _get_version_res_folder(f,version,site_name=None,i=None):\n import os\n version_folder=f+\"\\\\\"+version\n if i:\n site_name=_get_site_name(f,i)\n elif site_name:\n site_name = site_name\n if os.path.exists(version_folder):\n site_version_res_folder=version_folder+\"\\\\\"+site_name\n if os.path.exists(site_version_res_folder):\n return site_version_res_folder\n else:\n os.mkdir(site_version_res_folder)\n return site_version_res_folder\n\ndef _get_site_folder(f,i=None,feature_name=None):\n data_file = f + \"\\\\\" + \"new_desc_sele_data_origin.csv\"\n data_content = pd.read_csv(data_file)\n print(feature_name)\n if type(i) is int:\n site_path=data_content[\"SITE_PATH\"][i]\n return site_path\n elif type(feature_name) is str:\n site_path = data_content[\"SITE_PATH\"][data_content[\"SITE_ID\"]==feature_name].values[0]\n return site_path\n else:\n print(\"lack of index or feature_name.\")\n\n\n\n\n\n\n\n\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
<|reserved_special_token_0|>
class PersonTemporaryCode(models.Model):
person = models.ForeignKey(Person, on_delete=models.CASCADE)
code = models.IntegerField()
expiration_date = models.DateTimeField()
def __str__(self):
return f'{self.person} - {self.code} -- {self.expiration_date}'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Person(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class PersonTemporaryCode(models.Model):
person = models.ForeignKey(Person, on_delete=models.CASCADE)
code = models.IntegerField()
expiration_date = models.DateTimeField()
def __str__(self):
return f'{self.person} - {self.code} -- {self.expiration_date}'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Person(models.Model):
user = models.ForeignKey(User, related_name='person', on_delete=models.
CASCADE, blank=True, null=True)
event = models.ForeignKey(Event, on_delete=models.CASCADE)
name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
email = models.EmailField(unique=True, validators=[validate_email])
university = models.ForeignKey(University, on_delete=models.PROTECT)
rut = models.CharField(max_length=13, unique=True)
phone_number = models.CharField(max_length=20)
emergency_phone_number = models.CharField(max_length=20, null=True)
avatar = models.ImageField(upload_to='person_avatars/', blank=True)
pending_messages = models.IntegerField(default=0)
def __str__(self):
return '{} {}'.format(self.name, self.last_name)
class PersonTemporaryCode(models.Model):
person = models.ForeignKey(Person, on_delete=models.CASCADE)
code = models.IntegerField()
expiration_date = models.DateTimeField()
def __str__(self):
return f'{self.person} - {self.code} -- {self.expiration_date}'
<|reserved_special_token_1|>
from django.db import models
from django.contrib.auth.models import User
from Event.models import Event
from University.models import University
from django.core.validators import validate_email
class Person(models.Model):
user = models.ForeignKey(User, related_name='person', on_delete=models.
CASCADE, blank=True, null=True)
event = models.ForeignKey(Event, on_delete=models.CASCADE)
name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
email = models.EmailField(unique=True, validators=[validate_email])
university = models.ForeignKey(University, on_delete=models.PROTECT)
rut = models.CharField(max_length=13, unique=True)
phone_number = models.CharField(max_length=20)
emergency_phone_number = models.CharField(max_length=20, null=True)
avatar = models.ImageField(upload_to='person_avatars/', blank=True)
pending_messages = models.IntegerField(default=0)
def __str__(self):
return '{} {}'.format(self.name, self.last_name)
class PersonTemporaryCode(models.Model):
person = models.ForeignKey(Person, on_delete=models.CASCADE)
code = models.IntegerField()
expiration_date = models.DateTimeField()
def __str__(self):
return f'{self.person} - {self.code} -- {self.expiration_date}'
|
flexible
|
{
"blob_id": "28f4f14c3c29ee96c370ffe71c268549552b915e",
"index": 2419,
"step-1": "<mask token>\n\n\nclass PersonTemporaryCode(models.Model):\n person = models.ForeignKey(Person, on_delete=models.CASCADE)\n code = models.IntegerField()\n expiration_date = models.DateTimeField()\n\n def __str__(self):\n return f'{self.person} - {self.code} -- {self.expiration_date}'\n",
"step-2": "<mask token>\n\n\nclass Person(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass PersonTemporaryCode(models.Model):\n person = models.ForeignKey(Person, on_delete=models.CASCADE)\n code = models.IntegerField()\n expiration_date = models.DateTimeField()\n\n def __str__(self):\n return f'{self.person} - {self.code} -- {self.expiration_date}'\n",
"step-3": "<mask token>\n\n\nclass Person(models.Model):\n user = models.ForeignKey(User, related_name='person', on_delete=models.\n CASCADE, blank=True, null=True)\n event = models.ForeignKey(Event, on_delete=models.CASCADE)\n name = models.CharField(max_length=100)\n last_name = models.CharField(max_length=100)\n email = models.EmailField(unique=True, validators=[validate_email])\n university = models.ForeignKey(University, on_delete=models.PROTECT)\n rut = models.CharField(max_length=13, unique=True)\n phone_number = models.CharField(max_length=20)\n emergency_phone_number = models.CharField(max_length=20, null=True)\n avatar = models.ImageField(upload_to='person_avatars/', blank=True)\n pending_messages = models.IntegerField(default=0)\n\n def __str__(self):\n return '{} {}'.format(self.name, self.last_name)\n\n\nclass PersonTemporaryCode(models.Model):\n person = models.ForeignKey(Person, on_delete=models.CASCADE)\n code = models.IntegerField()\n expiration_date = models.DateTimeField()\n\n def __str__(self):\n return f'{self.person} - {self.code} -- {self.expiration_date}'\n",
"step-4": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom Event.models import Event\nfrom University.models import University\nfrom django.core.validators import validate_email\n\n\nclass Person(models.Model):\n user = models.ForeignKey(User, related_name='person', on_delete=models.\n CASCADE, blank=True, null=True)\n event = models.ForeignKey(Event, on_delete=models.CASCADE)\n name = models.CharField(max_length=100)\n last_name = models.CharField(max_length=100)\n email = models.EmailField(unique=True, validators=[validate_email])\n university = models.ForeignKey(University, on_delete=models.PROTECT)\n rut = models.CharField(max_length=13, unique=True)\n phone_number = models.CharField(max_length=20)\n emergency_phone_number = models.CharField(max_length=20, null=True)\n avatar = models.ImageField(upload_to='person_avatars/', blank=True)\n pending_messages = models.IntegerField(default=0)\n\n def __str__(self):\n return '{} {}'.format(self.name, self.last_name)\n\n\nclass PersonTemporaryCode(models.Model):\n person = models.ForeignKey(Person, on_delete=models.CASCADE)\n code = models.IntegerField()\n expiration_date = models.DateTimeField()\n\n def __str__(self):\n return f'{self.person} - {self.code} -- {self.expiration_date}'\n",
"step-5": null,
"step-ids": [
3,
4,
6,
7
]
}
|
[
3,
4,
6,
7
] |
<|reserved_special_token_0|>
class Truss:
def __init__(self, node1, node2, size, result, ax):
self.node1 = node1
self.node2 = node2
self.rod = Rod.Rod(node1, node2, result)
self.size = size
self.result = result
self.ax = ax
self.length = math.sqrt((node1.x - node2.x) ** 2 + (node1.y - node2
.y) ** 2)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def SaveTrussFig(self):
plt.savefig('truss.png', dpi=600)
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Truss:
def __init__(self, node1, node2, size, result, ax):
self.node1 = node1
self.node2 = node2
self.rod = Rod.Rod(node1, node2, result)
self.size = size
self.result = result
self.ax = ax
self.length = math.sqrt((node1.x - node2.x) ** 2 + (node1.y - node2
.y) ** 2)
def PlotCalculatedTruss(self):
self.node1.PlotNode()
self.node1.PlotSupport()
self.node1.PlotForce()
self.node2.PlotNode()
self.node2.PlotSupport()
self.node2.PlotForce()
self.rod.PlotRod()
self.rod.PlotResult()
<|reserved_special_token_0|>
def SaveTrussFig(self):
plt.savefig('truss.png', dpi=600)
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Truss:
def __init__(self, node1, node2, size, result, ax):
self.node1 = node1
self.node2 = node2
self.rod = Rod.Rod(node1, node2, result)
self.size = size
self.result = result
self.ax = ax
self.length = math.sqrt((node1.x - node2.x) ** 2 + (node1.y - node2
.y) ** 2)
def PlotCalculatedTruss(self):
self.node1.PlotNode()
self.node1.PlotSupport()
self.node1.PlotForce()
self.node2.PlotNode()
self.node2.PlotSupport()
self.node2.PlotForce()
self.rod.PlotRod()
self.rod.PlotResult()
def PlotUncalculatedTruss(self):
self.node1.PlotNode()
self.node1.PlotSupport()
self.node1.PlotForce()
self.node2.PlotNode()
self.node2.PlotSupport()
self.node2.PlotForce()
self.rod.PlotRod()
def SaveTrussFig(self):
plt.savefig('truss.png', dpi=600)
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from __future__ import division
import Rod
import matplotlib.pyplot as plt
import math
class Truss:
def __init__(self, node1, node2, size, result, ax):
self.node1 = node1
self.node2 = node2
self.rod = Rod.Rod(node1, node2, result)
self.size = size
self.result = result
self.ax = ax
self.length = math.sqrt((node1.x - node2.x) ** 2 + (node1.y - node2
.y) ** 2)
def PlotCalculatedTruss(self):
self.node1.PlotNode()
self.node1.PlotSupport()
self.node1.PlotForce()
self.node2.PlotNode()
self.node2.PlotSupport()
self.node2.PlotForce()
self.rod.PlotRod()
self.rod.PlotResult()
def PlotUncalculatedTruss(self):
self.node1.PlotNode()
self.node1.PlotSupport()
self.node1.PlotForce()
self.node2.PlotNode()
self.node2.PlotSupport()
self.node2.PlotForce()
self.rod.PlotRod()
def SaveTrussFig(self):
plt.savefig('truss.png', dpi=600)
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Thu May 24 18:18:36 2018
@author: Nicole
"""
from __future__ import division
import Rod
import matplotlib.pyplot as plt
import math
class Truss:
def __init__(self,node1,node2,size,result,ax):
self.node1=node1
self.node2=node2
self.rod=Rod.Rod(node1,node2,result)
self.size=size
self.result=result
self.ax=ax
self.length=math.sqrt((node1.x-node2.x)**2+(node1.y-node2.y)**2)
def PlotCalculatedTruss(self):
self.node1.PlotNode()
self.node1.PlotSupport()
self.node1.PlotForce()
self.node2.PlotNode()
self.node2.PlotSupport()
self.node2.PlotForce()
self.rod.PlotRod()
self.rod.PlotResult()
def PlotUncalculatedTruss(self):
self.node1.PlotNode()
self.node1.PlotSupport()
self.node1.PlotForce()
self.node2.PlotNode()
self.node2.PlotSupport()
self.node2.PlotForce()
self.rod.PlotRod()
def SaveTrussFig(self):
plt.savefig('truss.png',dpi=600)
plt.show()
'''
pud=UnitPostProcess(1.8,1.4,3.4,3.2,1,1,1,0,5,0,0,8,8.0,48.6667)
pud.setfig()
pud.plot()
pud=UnitPostProcess(3.4,3.2,7.4,3.2,0,0,1,1,0,0,0,0,8.0,23.3333)
pud.plot()
pud.savefig()
'''
|
flexible
|
{
"blob_id": "f01a1b6d0de4ba685c489af2742159447f943d2d",
"index": 5605,
"step-1": "<mask token>\n\n\nclass Truss:\n\n def __init__(self, node1, node2, size, result, ax):\n self.node1 = node1\n self.node2 = node2\n self.rod = Rod.Rod(node1, node2, result)\n self.size = size\n self.result = result\n self.ax = ax\n self.length = math.sqrt((node1.x - node2.x) ** 2 + (node1.y - node2\n .y) ** 2)\n <mask token>\n <mask token>\n\n def SaveTrussFig(self):\n plt.savefig('truss.png', dpi=600)\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Truss:\n\n def __init__(self, node1, node2, size, result, ax):\n self.node1 = node1\n self.node2 = node2\n self.rod = Rod.Rod(node1, node2, result)\n self.size = size\n self.result = result\n self.ax = ax\n self.length = math.sqrt((node1.x - node2.x) ** 2 + (node1.y - node2\n .y) ** 2)\n\n def PlotCalculatedTruss(self):\n self.node1.PlotNode()\n self.node1.PlotSupport()\n self.node1.PlotForce()\n self.node2.PlotNode()\n self.node2.PlotSupport()\n self.node2.PlotForce()\n self.rod.PlotRod()\n self.rod.PlotResult()\n <mask token>\n\n def SaveTrussFig(self):\n plt.savefig('truss.png', dpi=600)\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Truss:\n\n def __init__(self, node1, node2, size, result, ax):\n self.node1 = node1\n self.node2 = node2\n self.rod = Rod.Rod(node1, node2, result)\n self.size = size\n self.result = result\n self.ax = ax\n self.length = math.sqrt((node1.x - node2.x) ** 2 + (node1.y - node2\n .y) ** 2)\n\n def PlotCalculatedTruss(self):\n self.node1.PlotNode()\n self.node1.PlotSupport()\n self.node1.PlotForce()\n self.node2.PlotNode()\n self.node2.PlotSupport()\n self.node2.PlotForce()\n self.rod.PlotRod()\n self.rod.PlotResult()\n\n def PlotUncalculatedTruss(self):\n self.node1.PlotNode()\n self.node1.PlotSupport()\n self.node1.PlotForce()\n self.node2.PlotNode()\n self.node2.PlotSupport()\n self.node2.PlotForce()\n self.rod.PlotRod()\n\n def SaveTrussFig(self):\n plt.savefig('truss.png', dpi=600)\n plt.show()\n\n\n<mask token>\n",
"step-4": "<mask token>\nfrom __future__ import division\nimport Rod\nimport matplotlib.pyplot as plt\nimport math\n\n\nclass Truss:\n\n def __init__(self, node1, node2, size, result, ax):\n self.node1 = node1\n self.node2 = node2\n self.rod = Rod.Rod(node1, node2, result)\n self.size = size\n self.result = result\n self.ax = ax\n self.length = math.sqrt((node1.x - node2.x) ** 2 + (node1.y - node2\n .y) ** 2)\n\n def PlotCalculatedTruss(self):\n self.node1.PlotNode()\n self.node1.PlotSupport()\n self.node1.PlotForce()\n self.node2.PlotNode()\n self.node2.PlotSupport()\n self.node2.PlotForce()\n self.rod.PlotRod()\n self.rod.PlotResult()\n\n def PlotUncalculatedTruss(self):\n self.node1.PlotNode()\n self.node1.PlotSupport()\n self.node1.PlotForce()\n self.node2.PlotNode()\n self.node2.PlotSupport()\n self.node2.PlotForce()\n self.rod.PlotRod()\n\n def SaveTrussFig(self):\n plt.savefig('truss.png', dpi=600)\n plt.show()\n\n\n<mask token>\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 24 18:18:36 2018\n\n@author: Nicole\n\"\"\"\n\n\nfrom __future__ import division\nimport Rod\nimport matplotlib.pyplot as plt\nimport math\n\nclass Truss:\n def __init__(self,node1,node2,size,result,ax):\n self.node1=node1\n self.node2=node2\n self.rod=Rod.Rod(node1,node2,result)\n self.size=size\n self.result=result\n self.ax=ax\n self.length=math.sqrt((node1.x-node2.x)**2+(node1.y-node2.y)**2)\n def PlotCalculatedTruss(self): \n self.node1.PlotNode()\n self.node1.PlotSupport()\n self.node1.PlotForce()\n self.node2.PlotNode()\n self.node2.PlotSupport()\n self.node2.PlotForce()\n self.rod.PlotRod()\n self.rod.PlotResult()\n def PlotUncalculatedTruss(self):\n self.node1.PlotNode()\n self.node1.PlotSupport()\n self.node1.PlotForce()\n self.node2.PlotNode()\n self.node2.PlotSupport()\n self.node2.PlotForce()\n self.rod.PlotRod()\n def SaveTrussFig(self):\n plt.savefig('truss.png',dpi=600)\n plt.show()\n\n'''\npud=UnitPostProcess(1.8,1.4,3.4,3.2,1,1,1,0,5,0,0,8,8.0,48.6667)\npud.setfig()\npud.plot()\npud=UnitPostProcess(3.4,3.2,7.4,3.2,0,0,1,1,0,0,0,0,8.0,23.3333)\npud.plot()\npud.savefig()\n'''",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns += [url('^api-auth/', include('rest_framework.urls', namespace=
'rest_framework'))]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [url('^admin/', admin.site.urls), url('^', include(
'books.urls')), url('^', include('borrowed_books.urls')), url('^',
include('reviews.urls')), url('^', include('api_root.urls')), url(
'^api-token-auth/', obtain_jwt_token), url('^', include(
'django.contrib.auth.urls')), url('^account/', include('rest_auth.urls'
)), url('^account/registration/', include('rest_auth.registration.urls'))]
urlpatterns += [url('^api-auth/', include('rest_framework.urls', namespace=
'rest_framework'))]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework_jwt.views import obtain_jwt_token
urlpatterns = [url('^admin/', admin.site.urls), url('^', include(
'books.urls')), url('^', include('borrowed_books.urls')), url('^',
include('reviews.urls')), url('^', include('api_root.urls')), url(
'^api-token-auth/', obtain_jwt_token), url('^', include(
'django.contrib.auth.urls')), url('^account/', include('rest_auth.urls'
)), url('^account/registration/', include('rest_auth.registration.urls'))]
urlpatterns += [url('^api-auth/', include('rest_framework.urls', namespace=
'rest_framework'))]
<|reserved_special_token_1|>
"""lendbooks URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework_jwt.views import obtain_jwt_token
urlpatterns = [
url(r'^admin/', admin.site.urls), # Django Admin
url(r'^', include('books.urls')), # Books Management
url(r'^', include('borrowed_books.urls')), # Borrow Books
url(r'^', include('reviews.urls')), # Reviews
url(r'^', include('api_root.urls')),
url(r'^api-token-auth/', obtain_jwt_token), # JWT
url(r'^', include('django.contrib.auth.urls')), # Django's own Auth'
url(r'^account/', include('rest_auth.urls')), # Account Management
url(r'^account/registration/', include('rest_auth.registration.urls')), # Account Registration
]
urlpatterns += [
url(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework')),
]
|
flexible
|
{
"blob_id": "9e950f6fe895cfd497e94139397e8a0f19725dc0",
"index": 1902,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns += [url('^api-auth/', include('rest_framework.urls', namespace=\n 'rest_framework'))]\n",
"step-3": "<mask token>\nurlpatterns = [url('^admin/', admin.site.urls), url('^', include(\n 'books.urls')), url('^', include('borrowed_books.urls')), url('^',\n include('reviews.urls')), url('^', include('api_root.urls')), url(\n '^api-token-auth/', obtain_jwt_token), url('^', include(\n 'django.contrib.auth.urls')), url('^account/', include('rest_auth.urls'\n )), url('^account/registration/', include('rest_auth.registration.urls'))]\nurlpatterns += [url('^api-auth/', include('rest_framework.urls', namespace=\n 'rest_framework'))]\n",
"step-4": "<mask token>\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom rest_framework_jwt.views import obtain_jwt_token\nurlpatterns = [url('^admin/', admin.site.urls), url('^', include(\n 'books.urls')), url('^', include('borrowed_books.urls')), url('^',\n include('reviews.urls')), url('^', include('api_root.urls')), url(\n '^api-token-auth/', obtain_jwt_token), url('^', include(\n 'django.contrib.auth.urls')), url('^account/', include('rest_auth.urls'\n )), url('^account/registration/', include('rest_auth.registration.urls'))]\nurlpatterns += [url('^api-auth/', include('rest_framework.urls', namespace=\n 'rest_framework'))]\n",
"step-5": "\"\"\"lendbooks URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom rest_framework_jwt.views import obtain_jwt_token\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls), # Django Admin\n url(r'^', include('books.urls')), # Books Management\n url(r'^', include('borrowed_books.urls')), # Borrow Books\n url(r'^', include('reviews.urls')), # Reviews\n url(r'^', include('api_root.urls')), \n url(r'^api-token-auth/', obtain_jwt_token), # JWT\n url(r'^', include('django.contrib.auth.urls')), # Django's own Auth'\n url(r'^account/', include('rest_auth.urls')), # Account Management\n url(r'^account/registration/', include('rest_auth.registration.urls')), # Account Registration\n]\n\nurlpatterns += [\n url(r'^api-auth/', include('rest_framework.urls',\n namespace='rest_framework')),\n]\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#! /usr/bin/env python
def see_great_place_about_large_man(str_arg):
own_day_and_last_person(str_arg)
print('own_case')
def own_day_and_last_person(str_arg):
print(str_arg)
if __name__ == '__main__':
see_great_place_about_large_man('use_work_of_next_way')
|
normal
|
{
"blob_id": "515c14fcf2c3e9da31f6aba4b49296b18f04f262",
"index": 4786,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef own_day_and_last_person(str_arg):\n print(str_arg)\n\n\n<mask token>\n",
"step-3": "def see_great_place_about_large_man(str_arg):\n own_day_and_last_person(str_arg)\n print('own_case')\n\n\ndef own_day_and_last_person(str_arg):\n print(str_arg)\n\n\n<mask token>\n",
"step-4": "def see_great_place_about_large_man(str_arg):\n own_day_and_last_person(str_arg)\n print('own_case')\n\n\ndef own_day_and_last_person(str_arg):\n print(str_arg)\n\n\nif __name__ == '__main__':\n see_great_place_about_large_man('use_work_of_next_way')\n",
"step-5": "\n#! /usr/bin/env python\n\ndef see_great_place_about_large_man(str_arg):\n own_day_and_last_person(str_arg)\n print('own_case')\n\ndef own_day_and_last_person(str_arg):\n print(str_arg)\n\nif __name__ == '__main__':\n see_great_place_about_large_man('use_work_of_next_way')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print('content-type: text/html')
print()
<|reserved_special_token_0|>
if status == 0:
print('{} OS is stopped succesfully....'.format(osname))
else:
print('some error: {}'.format(info))
<|reserved_special_token_1|>
print('content-type: text/html')
print()
<|reserved_special_token_0|>
form = cgi.FieldStorage()
osname = form.getvalue('x')
command = 'sudo docker stop {}'.format(osname)
output = subprocess.getstatusoutput(command)
status = output[0]
info = output[1]
if status == 0:
print('{} OS is stopped succesfully....'.format(osname))
else:
print('some error: {}'.format(info))
<|reserved_special_token_1|>
print('content-type: text/html')
print()
import subprocess
import cgi
form = cgi.FieldStorage()
osname = form.getvalue('x')
command = 'sudo docker stop {}'.format(osname)
output = subprocess.getstatusoutput(command)
status = output[0]
info = output[1]
if status == 0:
print('{} OS is stopped succesfully....'.format(osname))
else:
print('some error: {}'.format(info))
<|reserved_special_token_1|>
#!/usr/bin/python3
print("content-type: text/html")
print()
import subprocess
import cgi
form=cgi.FieldStorage()
osname=form.getvalue("x")
command="sudo docker stop {}".format(osname)
output=subprocess.getstatusoutput(command)
status=output[0]
info=output[1]
if status==0:
print("{} OS is stopped succesfully....".format(osname))
else:
print("some error: {}".format(info))
|
flexible
|
{
"blob_id": "1d2dae7f1d937bdd9a6044b23f8f1897e61dac23",
"index": 6330,
"step-1": "<mask token>\n",
"step-2": "print('content-type: text/html')\nprint()\n<mask token>\nif status == 0:\n print('{} OS is stopped succesfully....'.format(osname))\nelse:\n print('some error: {}'.format(info))\n",
"step-3": "print('content-type: text/html')\nprint()\n<mask token>\nform = cgi.FieldStorage()\nosname = form.getvalue('x')\ncommand = 'sudo docker stop {}'.format(osname)\noutput = subprocess.getstatusoutput(command)\nstatus = output[0]\ninfo = output[1]\nif status == 0:\n print('{} OS is stopped succesfully....'.format(osname))\nelse:\n print('some error: {}'.format(info))\n",
"step-4": "print('content-type: text/html')\nprint()\nimport subprocess\nimport cgi\nform = cgi.FieldStorage()\nosname = form.getvalue('x')\ncommand = 'sudo docker stop {}'.format(osname)\noutput = subprocess.getstatusoutput(command)\nstatus = output[0]\ninfo = output[1]\nif status == 0:\n print('{} OS is stopped succesfully....'.format(osname))\nelse:\n print('some error: {}'.format(info))\n",
"step-5": "#!/usr/bin/python3\nprint(\"content-type: text/html\")\nprint()\nimport subprocess\nimport cgi\nform=cgi.FieldStorage()\nosname=form.getvalue(\"x\")\ncommand=\"sudo docker stop {}\".format(osname)\noutput=subprocess.getstatusoutput(command)\nstatus=output[0]\ninfo=output[1]\nif status==0:\n print(\"{} OS is stopped succesfully....\".format(osname))\nelse:\n print(\"some error: {}\".format(info))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from pylab import *
def f(x,y): return (1-x/2+x**5+y**3)*np.exp(-x**2-y**2)
n = 256
x = np.linspace(-3,3,n)
y = np.linspace(-3,3,n)
X,Y = np.meshgrid(x,y)
axes([0.025,0.025,0.95,0.95])
contourf(X, Y, f(X,Y), 8, alpha=.75, cmap=cm.hot)
C = contour(X, Y, f(X,Y), 8, colors='black', linewidth=.5)
clabel(C, inline=1, fontsize=10)
xticks([]), yticks([])
savefig('../figures/contour_ex.png',dpi=48)
show()
|
normal
|
{
"blob_id": "e9c439eafac8fd689980ffcb562f3b5ee903dd56",
"index": 2604,
"step-1": "<mask token>\n\n\ndef f(x, y):\n return (1 - x / 2 + x ** 5 + y ** 3) * np.exp(-x ** 2 - y ** 2)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef f(x, y):\n return (1 - x / 2 + x ** 5 + y ** 3) * np.exp(-x ** 2 - y ** 2)\n\n\n<mask token>\naxes([0.025, 0.025, 0.95, 0.95])\ncontourf(X, Y, f(X, Y), 8, alpha=0.75, cmap=cm.hot)\n<mask token>\nclabel(C, inline=1, fontsize=10)\nxticks([]), yticks([])\nsavefig('../figures/contour_ex.png', dpi=48)\nshow()\n",
"step-3": "<mask token>\n\n\ndef f(x, y):\n return (1 - x / 2 + x ** 5 + y ** 3) * np.exp(-x ** 2 - y ** 2)\n\n\nn = 256\nx = np.linspace(-3, 3, n)\ny = np.linspace(-3, 3, n)\nX, Y = np.meshgrid(x, y)\naxes([0.025, 0.025, 0.95, 0.95])\ncontourf(X, Y, f(X, Y), 8, alpha=0.75, cmap=cm.hot)\nC = contour(X, Y, f(X, Y), 8, colors='black', linewidth=0.5)\nclabel(C, inline=1, fontsize=10)\nxticks([]), yticks([])\nsavefig('../figures/contour_ex.png', dpi=48)\nshow()\n",
"step-4": "from pylab import *\n\n\ndef f(x, y):\n return (1 - x / 2 + x ** 5 + y ** 3) * np.exp(-x ** 2 - y ** 2)\n\n\nn = 256\nx = np.linspace(-3, 3, n)\ny = np.linspace(-3, 3, n)\nX, Y = np.meshgrid(x, y)\naxes([0.025, 0.025, 0.95, 0.95])\ncontourf(X, Y, f(X, Y), 8, alpha=0.75, cmap=cm.hot)\nC = contour(X, Y, f(X, Y), 8, colors='black', linewidth=0.5)\nclabel(C, inline=1, fontsize=10)\nxticks([]), yticks([])\nsavefig('../figures/contour_ex.png', dpi=48)\nshow()\n",
"step-5": "from pylab import *\n\ndef f(x,y): return (1-x/2+x**5+y**3)*np.exp(-x**2-y**2)\n\nn = 256\nx = np.linspace(-3,3,n)\ny = np.linspace(-3,3,n)\nX,Y = np.meshgrid(x,y)\n\naxes([0.025,0.025,0.95,0.95])\n\ncontourf(X, Y, f(X,Y), 8, alpha=.75, cmap=cm.hot)\nC = contour(X, Y, f(X,Y), 8, colors='black', linewidth=.5)\nclabel(C, inline=1, fontsize=10)\n\nxticks([]), yticks([])\nsavefig('../figures/contour_ex.png',dpi=48)\nshow()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def isValidSudoku(self, board: List[List[str]]) ->bool:
cells = {}
for i in range(9):
for j in range(9):
if board[i][j] != '.':
val = board[i][j]
for k in range(j - 1, -1, -1):
if val == board[i][k]:
return False
for k in range(j + 1, 9):
if val == board[i][k]:
return False
for k in range(i - 1, -1, -1):
if val == board[k][j]:
return False
for k in range(i + 1, 9):
if val == board[k][j]:
return False
idx = i // 3 * 3 + j // 3
if idx in cells:
if val in cells[idx]:
return False
else:
cells[idx].append(val)
else:
cells[idx] = [val]
return True
<|reserved_special_token_1|>
from typing import *
class Solution:
def isValidSudoku(self, board: List[List[str]]) ->bool:
cells = {}
for i in range(9):
for j in range(9):
if board[i][j] != '.':
val = board[i][j]
for k in range(j - 1, -1, -1):
if val == board[i][k]:
return False
for k in range(j + 1, 9):
if val == board[i][k]:
return False
for k in range(i - 1, -1, -1):
if val == board[k][j]:
return False
for k in range(i + 1, 9):
if val == board[k][j]:
return False
idx = i // 3 * 3 + j // 3
if idx in cells:
if val in cells[idx]:
return False
else:
cells[idx].append(val)
else:
cells[idx] = [val]
return True
<|reserved_special_token_1|>
from typing import *
class Solution:
def isValidSudoku(self, board: List[List[str]]) -> bool:
cells = {}
for i in range(9):
for j in range(9):
if board[i][j] != ".":
val = board[i][j]
# is unique in row
for k in range(j-1, -1, -1):
if val == board[i][k]:
return False
for k in range(j+1, 9):
if val == board[i][k]:
return False
# is unique in col
for k in range(i-1, -1, -1):
if val == board[k][j]:
return False
for k in range(i+1, 9):
if val == board[k][j]:
return False
idx = i // 3 * 3 + j // 3
if idx in cells:
if val in cells[idx]:
return False
else:
cells[idx].append(val)
else:
cells[idx] = [val]
return True
|
flexible
|
{
"blob_id": "57c911c9a10f9d116f1b7099c5202377e16050f1",
"index": 7871,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def isValidSudoku(self, board: List[List[str]]) ->bool:\n cells = {}\n for i in range(9):\n for j in range(9):\n if board[i][j] != '.':\n val = board[i][j]\n for k in range(j - 1, -1, -1):\n if val == board[i][k]:\n return False\n for k in range(j + 1, 9):\n if val == board[i][k]:\n return False\n for k in range(i - 1, -1, -1):\n if val == board[k][j]:\n return False\n for k in range(i + 1, 9):\n if val == board[k][j]:\n return False\n idx = i // 3 * 3 + j // 3\n if idx in cells:\n if val in cells[idx]:\n return False\n else:\n cells[idx].append(val)\n else:\n cells[idx] = [val]\n return True\n",
"step-4": "from typing import *\n\n\nclass Solution:\n\n def isValidSudoku(self, board: List[List[str]]) ->bool:\n cells = {}\n for i in range(9):\n for j in range(9):\n if board[i][j] != '.':\n val = board[i][j]\n for k in range(j - 1, -1, -1):\n if val == board[i][k]:\n return False\n for k in range(j + 1, 9):\n if val == board[i][k]:\n return False\n for k in range(i - 1, -1, -1):\n if val == board[k][j]:\n return False\n for k in range(i + 1, 9):\n if val == board[k][j]:\n return False\n idx = i // 3 * 3 + j // 3\n if idx in cells:\n if val in cells[idx]:\n return False\n else:\n cells[idx].append(val)\n else:\n cells[idx] = [val]\n return True\n",
"step-5": "from typing import *\n\n\nclass Solution:\n def isValidSudoku(self, board: List[List[str]]) -> bool:\n cells = {}\n \n for i in range(9):\n for j in range(9):\n if board[i][j] != \".\":\n val = board[i][j]\n \n # is unique in row\n for k in range(j-1, -1, -1):\n if val == board[i][k]:\n return False\n \n for k in range(j+1, 9):\n if val == board[i][k]:\n return False\n \n # is unique in col\n for k in range(i-1, -1, -1):\n if val == board[k][j]:\n return False\n \n for k in range(i+1, 9):\n if val == board[k][j]:\n return False\n \n idx = i // 3 * 3 + j // 3\n \n if idx in cells:\n if val in cells[idx]:\n return False\n else:\n cells[idx].append(val)\n else:\n cells[idx] = [val]\n \n return True\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Solution:
def isMonotonic(self, A: List[int]) ->bool:
flag = 0
for i in range(1, len(A)):
diff = A[i] - A[i - 1]
if diff * flag < 0:
return False
if flag == 0:
flag = diff
return True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def isMonotonic(self, A: List[int]) ->bool:
flag = 0
for i in range(1, len(A)):
diff = A[i] - A[i - 1]
if diff * flag < 0:
return False
if flag == 0:
flag = diff
return True
<|reserved_special_token_0|>
print(sl.isMonotonic(inp))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def isMonotonic(self, A: List[int]) ->bool:
flag = 0
for i in range(1, len(A)):
diff = A[i] - A[i - 1]
if diff * flag < 0:
return False
if flag == 0:
flag = diff
return True
sl = Solution()
inp = [1, 2, 2, 2, 1]
print(sl.isMonotonic(inp))
<|reserved_special_token_1|>
from typing import *
class Solution:
def isMonotonic(self, A: List[int]) ->bool:
flag = 0
for i in range(1, len(A)):
diff = A[i] - A[i - 1]
if diff * flag < 0:
return False
if flag == 0:
flag = diff
return True
sl = Solution()
inp = [1, 2, 2, 2, 1]
print(sl.isMonotonic(inp))
|
flexible
|
{
"blob_id": "a55d1286485e66a64aa78259ad1b1922c5c4c831",
"index": 4385,
"step-1": "<mask token>\n\n\nclass Solution:\n\n def isMonotonic(self, A: List[int]) ->bool:\n flag = 0\n for i in range(1, len(A)):\n diff = A[i] - A[i - 1]\n if diff * flag < 0:\n return False\n if flag == 0:\n flag = diff\n return True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n\n def isMonotonic(self, A: List[int]) ->bool:\n flag = 0\n for i in range(1, len(A)):\n diff = A[i] - A[i - 1]\n if diff * flag < 0:\n return False\n if flag == 0:\n flag = diff\n return True\n\n\n<mask token>\nprint(sl.isMonotonic(inp))\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def isMonotonic(self, A: List[int]) ->bool:\n flag = 0\n for i in range(1, len(A)):\n diff = A[i] - A[i - 1]\n if diff * flag < 0:\n return False\n if flag == 0:\n flag = diff\n return True\n\n\nsl = Solution()\ninp = [1, 2, 2, 2, 1]\nprint(sl.isMonotonic(inp))\n",
"step-4": "from typing import *\n\n\nclass Solution:\n\n def isMonotonic(self, A: List[int]) ->bool:\n flag = 0\n for i in range(1, len(A)):\n diff = A[i] - A[i - 1]\n if diff * flag < 0:\n return False\n if flag == 0:\n flag = diff\n return True\n\n\nsl = Solution()\ninp = [1, 2, 2, 2, 1]\nprint(sl.isMonotonic(inp))\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class SetGetMixin:
def get(path_variable):
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
if not self.zk_client.exists(getattr(self, path_variable)):
return None
return func(self, *args, **kwargs)
return wrapper
return decorator
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SetGetMixin:
def get(path_variable):
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
if not self.zk_client.exists(getattr(self, path_variable)):
return None
return func(self, *args, **kwargs)
return wrapper
return decorator
def set(path_variable):
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
self.zk_client.ensure_path(getattr(self, path_variable))
return func(self, *args, **kwargs)
return wrapper
return decorator
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def create_new_sequence_node(zk_client, base_path, prefix, is_ephemeral=False):
if not zk_client.exists(base_path):
zk_client.ensure_path(base_path)
new_node = zk_client.create(base_path + '/' + prefix, ''.encode('utf-8'
), sequence=True, ephemeral=is_ephemeral)
return new_node
class SetGetMixin:
def get(path_variable):
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
if not self.zk_client.exists(getattr(self, path_variable)):
return None
return func(self, *args, **kwargs)
return wrapper
return decorator
def set(path_variable):
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
self.zk_client.ensure_path(getattr(self, path_variable))
return func(self, *args, **kwargs)
return wrapper
return decorator
<|reserved_special_token_1|>
from functools import wraps
def create_new_sequence_node(zk_client, base_path, prefix, is_ephemeral=False):
if not zk_client.exists(base_path):
zk_client.ensure_path(base_path)
new_node = zk_client.create(base_path + '/' + prefix, ''.encode('utf-8'
), sequence=True, ephemeral=is_ephemeral)
return new_node
class SetGetMixin:
def get(path_variable):
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
if not self.zk_client.exists(getattr(self, path_variable)):
return None
return func(self, *args, **kwargs)
return wrapper
return decorator
def set(path_variable):
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
self.zk_client.ensure_path(getattr(self, path_variable))
return func(self, *args, **kwargs)
return wrapper
return decorator
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# @Time : 2019/9/17 17:48
# @Author : ZhangYang
# @Email : [email protected]
from functools import wraps
def create_new_sequence_node(zk_client, base_path, prefix, is_ephemeral=False):
if not zk_client.exists(base_path):
zk_client.ensure_path(base_path)
new_node = zk_client.create( base_path+'/'+prefix, ''.encode('utf-8'), sequence=True, ephemeral=is_ephemeral )
return new_node
class SetGetMixin():
def get(path_variable):
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
if not self.zk_client.exists(getattr(self, path_variable)):
return None
return func(self, *args, **kwargs)
return wrapper
return decorator
def set(path_variable):
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
self.zk_client.ensure_path(getattr(self, path_variable))
return func(self, *args, **kwargs)
return wrapper
return decorator
|
flexible
|
{
"blob_id": "f9a0c3b643c2ee6bb6778477bf8fc21564812081",
"index": 3373,
"step-1": "<mask token>\n\n\nclass SetGetMixin:\n\n def get(path_variable):\n\n def decorator(func):\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n if not self.zk_client.exists(getattr(self, path_variable)):\n return None\n return func(self, *args, **kwargs)\n return wrapper\n return decorator\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass SetGetMixin:\n\n def get(path_variable):\n\n def decorator(func):\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n if not self.zk_client.exists(getattr(self, path_variable)):\n return None\n return func(self, *args, **kwargs)\n return wrapper\n return decorator\n\n def set(path_variable):\n\n def decorator(func):\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n self.zk_client.ensure_path(getattr(self, path_variable))\n return func(self, *args, **kwargs)\n return wrapper\n return decorator\n",
"step-3": "<mask token>\n\n\ndef create_new_sequence_node(zk_client, base_path, prefix, is_ephemeral=False):\n if not zk_client.exists(base_path):\n zk_client.ensure_path(base_path)\n new_node = zk_client.create(base_path + '/' + prefix, ''.encode('utf-8'\n ), sequence=True, ephemeral=is_ephemeral)\n return new_node\n\n\nclass SetGetMixin:\n\n def get(path_variable):\n\n def decorator(func):\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n if not self.zk_client.exists(getattr(self, path_variable)):\n return None\n return func(self, *args, **kwargs)\n return wrapper\n return decorator\n\n def set(path_variable):\n\n def decorator(func):\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n self.zk_client.ensure_path(getattr(self, path_variable))\n return func(self, *args, **kwargs)\n return wrapper\n return decorator\n",
"step-4": "from functools import wraps\n\n\ndef create_new_sequence_node(zk_client, base_path, prefix, is_ephemeral=False):\n if not zk_client.exists(base_path):\n zk_client.ensure_path(base_path)\n new_node = zk_client.create(base_path + '/' + prefix, ''.encode('utf-8'\n ), sequence=True, ephemeral=is_ephemeral)\n return new_node\n\n\nclass SetGetMixin:\n\n def get(path_variable):\n\n def decorator(func):\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n if not self.zk_client.exists(getattr(self, path_variable)):\n return None\n return func(self, *args, **kwargs)\n return wrapper\n return decorator\n\n def set(path_variable):\n\n def decorator(func):\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n self.zk_client.ensure_path(getattr(self, path_variable))\n return func(self, *args, **kwargs)\n return wrapper\n return decorator\n",
"step-5": "# -*- coding: utf-8 -*-\n# @Time : 2019/9/17 17:48\n# @Author : ZhangYang\n# @Email : [email protected]\nfrom functools import wraps\n\n\ndef create_new_sequence_node(zk_client, base_path, prefix, is_ephemeral=False):\n if not zk_client.exists(base_path):\n zk_client.ensure_path(base_path)\n\n new_node = zk_client.create( base_path+'/'+prefix, ''.encode('utf-8'), sequence=True, ephemeral=is_ephemeral )\n return new_node\n\nclass SetGetMixin():\n def get(path_variable):\n def decorator(func):\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n if not self.zk_client.exists(getattr(self, path_variable)):\n return None\n return func(self, *args, **kwargs)\n\n return wrapper\n return decorator\n\n def set(path_variable):\n def decorator(func):\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n self.zk_client.ensure_path(getattr(self, path_variable))\n return func(self, *args, **kwargs)\n return wrapper\n return decorator",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/env python
# Copyright (c) 2016, SafeBreach
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import urllib2
import argparse
import time
import datetime
import email.utils
import binascii
import csv
import multiprocessing.pool
####################
# Global Variables #
####################
__version__ = "1.0"
__author__ = "Itzik Kotler"
__copyright__ = "Copyright 2016, SafeBreach"
#############
# Functions #
#############
def __wait_till_next_minute():
sleeptime = 60 - datetime.datetime.utcnow().second
time.sleep(sleeptime)
def __calc_delta(expires_field, date_field):
now_date = datetime.datetime(*email.utils.parsedate(date_field)[:6])
expires_date = datetime.datetime(*email.utils.parsedate(expires_field)[:6])
return expires_date - now_date
def __str2bits(string):
bits = []
if string.startswith('0b'):
bits = list(string[2:])
else:
# Convert text to binary, use the str repr to convert to list, skip 2 bytes to jump over '0b' prefix
bits = list(bin(int(binascii.hexlify(string), 16)))[2:]
# We're using .pop() so it's reverse() the order of the list
bits.reverse()
return bits
def main(args):
parser = argparse.ArgumentParser(prog='cachetalk')
parser.add_argument('url', metavar='URL', type=str, help='dead drop URL')
parser.add_argument('poll_interval', metavar='SECONDS', nargs='?', type=int,
help='polling intervals (i.e. the delta)')
parser.add_argument('-s', '--always-sync', action='store_true', help='always start on the top of the minute')
parser.add_argument('-f', '--force-start', action='store_true', help='start immediately without synchronizing')
parser.add_argument('-v', '--verbose', action='store_true', help='verbose output')
parser.add_argument('-q', '--quiet', action='store_true', help='less output')
parser.add_argument('-1', '--try-once', action='store_true', help='try to write once and stop')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-w', '--write', nargs=1, type=str, metavar='DATA', help='connect to URL and write DATA')
group.add_argument('-r', '--read', nargs=1, type=int, metavar='LEN', help='monitor URL and read LEN amount of bits')
group.add_argument('-t', '--test', action='store_true', help='print HTTP Server Expires and calculate the delta')
group.add_argument('-b', '--batch', nargs=2, type=str, metavar=('FILE.CSV', 'R|W'), help='In batch mode you can supply a file with a list of URLs, DELTAs, and 1/0\'s')
args = parser.parse_args(args=args[1:])
if not args.url.startswith('http'):
args.url = 'http://' + args.url
if args.verbose:
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPHandler(debuglevel=1)))
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPSHandler(debuglevel=1)))
req_headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36'}
req = urllib2.Request(args.url, headers=req_headers)
if args.batch:
print "START BATCH MODE"
pool = multiprocessing.pool.ThreadPool(processes=8)
threads = []
batch_mode = args.batch[1].lower()
results = []
with open(args.batch[0], 'r') as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
batch_argv = [sys.argv[0], '-1', '-s']
if batch_mode == 'r':
batch_argv.append('-r 1')
else:
batch_argv.append('-w0b' + row[2])
batch_argv.append(row[0])
batch_argv.append(row[1])
print "Calling Thread w/ %s" % (batch_argv[1:])
threads.append(pool.apply_async(main,(batch_argv,)))
for result in threads:
results.append(result.get())
# That's what happened when you commit code the night before the talk ;-)
results = reduce(lambda x,y: x+y, map(lambda x: str(x), reduce(lambda x,y: x+y, results)))
print "END OF BATCH MODE\n\n"
print ">>> RESULT: %s <<<" % results
elif args.test:
# Test-mode
try:
http_response = urllib2.urlopen(req)
http_response.read()
print '\n' + args.url + ':'
print "=" * (len(args.url) + 1) + '\n'
print "Expires equal to: %s" % http_response.headers['Expires']
print "Date equal to: %s\n" % http_response.headers['Date']
# Every hit changes Expires? Can't use URL for cache talking ...
if http_response.headers['Expires'] == http_response.headers['Date']:
print "NOT GOOD!"
else:
print "MAYBE ... (DELTA equals %s)" % __calc_delta(http_response.headers['Expires'],
http_response.headers['Date'])
except TypeError:
# expires_date = datetime.datetime(*email.utils.parsedate(expires_field)[:6])
# TypeError: 'NoneType' object has no attribute '__getitem__'
print "`Expires' Value is Number and not a Date! Can't calculate delta ...\n"
except KeyError:
# Maybe it's not Expires?
print "Can't find `Expires' Header in HTTP Response ...\n"
except urllib2.HTTPError as e:
# Connection error
print "ERROR: %s for %s" % (str(e), args.url)
else:
# Write/Read Mode
first_sync = args.force_start
bits = []
if not args.read:
bits = __str2bits(args.write[0])
if not args.quiet:
print "--- INPUT (%s) ---" % args.write[0]
print ''.join(bits)
print "--- INPUT = %d BITS --" % (len(bits))
initial_poll_interval = args.poll_interval
last_input_bit = -1
last_poll_interval = -1
after_fp = False
sliding_delta = 0
if args.read:
if args.poll_interval < 11:
sliding_delta = 1
else:
sliding_delta = 10
args.poll_interval = args.poll_interval + sliding_delta
while True:
if not first_sync or args.always_sync:
if not args.quiet:
print "[%s]: Synchronizing ..." % time.asctime()
__wait_till_next_minute()
first_sync = True
print "[%s]: Synchronized! Need to sleep another %d second(s) ..." % (time.asctime(), args.poll_interval)
time.sleep(args.poll_interval)
print "[%s]: Work time!" % time.asctime()
observed_delta = None
if args.read:
# Read, append bit to bits array depends on the HTTP response
input_bit = 0
http_response = urllib2.urlopen(req)
http_response.read()
# Negative delta? (Minus sliding_delta, as read interval is always + sliding_delta to give the writer a buffer)
observed_delta = __calc_delta(http_response.headers['Expires'], http_response.headers['Date'])
if observed_delta.total_seconds() < args.poll_interval - sliding_delta:
input_bit = 1
print "(READING | R#: %d | E: %s | D: %s | D2: %s): BIT %d" % (
http_response.getcode(), http_response.headers['Expires'], http_response.headers['Date'],
observed_delta.total_seconds(), input_bit)
if last_input_bit == 0 and input_bit == 1 and last_poll_interval == observed_delta.total_seconds():
args.poll_interval = observed_delta.total_seconds()
print "*** FALSE POSITIVE! (Ignored; Changed to 0)"
bits.append(0)
last_input_bit = 0
after_fp = True
else:
args.poll_interval = observed_delta.total_seconds() + (sliding_delta + 1)
if after_fp:
# After False-positive and bit 1? Writer back online!
if input_bit == 1:
after_fp = False
else:
# After False-positive and bit 0? It's still False-positive ... Go back to original cycle!
args.poll_interval = initial_poll_interval
bits.append(input_bit)
last_input_bit = input_bit
last_poll_interval = args.poll_interval - (sliding_delta + 1)
if len(bits) == args.read[0]:
break
else:
# Write, pop bit form the bits array
try:
output_bit = bits.pop()
if output_bit == '0':
print "(WRITING | R#: =OFFLINE= | E: =OFFLINE= | D: =OFFLINE=): BIT 0"
if len(bits) == 0:
break
continue
while True:
http_response = urllib2.urlopen(req)
http_response.read()
observed_delta = __calc_delta(http_response.headers['Expires'], http_response.headers['Date'])
print "(WRITING | R#: %d | E: %s | D: %s | D2: %s): BIT 1" % (
http_response.getcode(), http_response.headers['Expires'], http_response.headers['Date'],
observed_delta.total_seconds())
if observed_delta.total_seconds() != args.poll_interval and not args.try_once:
print "*** RETRY!"
retry_sleep = observed_delta.total_seconds()
if retry_sleep == 0:
retry_sleep = 1
time.sleep(retry_sleep)
continue
# Do-while Writer is not aligned w/ Expires
break
if len(bits) == 0:
break
except IndexError:
break
if not args.quiet:
print "!!! EOF !!!"
if not bits:
bits = __str2bits(args.write[0])
if not args.quiet:
print "--- OUTPUT ---"
print ''.join(map(str, bits))
print "--- OUTPUT = %d BITS --" % (len(bits))
print " "
n = int(''.join(map(str, bits)), 2)
try:
print binascii.unhexlify('%x' % n)
except TypeError:
# TypeError: Odd-length string if n = 0 or 1
if len(bits) == 1:
pass
else:
raise
return bits
###############
# Entry Point #
###############
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
normal
|
{
"blob_id": "278f0ece7cc2c7bb2ec1a3a2a7401bf3bc09611d",
"index": 2659,
"step-1": "#!/usr/bin/env python\n# Copyright (c) 2016, SafeBreach\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport sys\nimport urllib2\nimport argparse\nimport time\nimport datetime\nimport email.utils\nimport binascii\nimport csv\nimport multiprocessing.pool\n\n####################\n# Global Variables #\n####################\n\n__version__ = \"1.0\"\n__author__ = \"Itzik Kotler\"\n__copyright__ = \"Copyright 2016, SafeBreach\"\n\n#############\n# Functions #\n#############\n\ndef __wait_till_next_minute():\n sleeptime = 60 - datetime.datetime.utcnow().second\n time.sleep(sleeptime)\n\n\ndef __calc_delta(expires_field, date_field):\n now_date = datetime.datetime(*email.utils.parsedate(date_field)[:6])\n expires_date = datetime.datetime(*email.utils.parsedate(expires_field)[:6])\n return expires_date - now_date\n\n\ndef __str2bits(string):\n bits = []\n if string.startswith('0b'):\n bits = list(string[2:])\n else:\n # Convert text to binary, use the str repr to convert to list, skip 2 bytes to jump over '0b' prefix\n bits = list(bin(int(binascii.hexlify(string), 16)))[2:]\n # We're using .pop() so it's reverse() the order of the list\n bits.reverse()\n return bits\n\n\ndef main(args):\n parser = argparse.ArgumentParser(prog='cachetalk')\n parser.add_argument('url', metavar='URL', type=str, help='dead drop URL')\n parser.add_argument('poll_interval', metavar='SECONDS', nargs='?', type=int,\n help='polling intervals (i.e. the delta)')\n parser.add_argument('-s', '--always-sync', action='store_true', help='always start on the top of the minute')\n parser.add_argument('-f', '--force-start', action='store_true', help='start immediately without synchronizing')\n parser.add_argument('-v', '--verbose', action='store_true', help='verbose output')\n parser.add_argument('-q', '--quiet', action='store_true', help='less output')\n parser.add_argument('-1', '--try-once', action='store_true', help='try to write once and stop')\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument('-w', '--write', nargs=1, type=str, metavar='DATA', help='connect to URL and write DATA')\n group.add_argument('-r', '--read', nargs=1, type=int, metavar='LEN', help='monitor URL and read LEN amount of bits')\n group.add_argument('-t', '--test', action='store_true', help='print HTTP Server Expires and calculate the delta')\n group.add_argument('-b', '--batch', nargs=2, type=str, metavar=('FILE.CSV', 'R|W'), help='In batch mode you can supply a file with a list of URLs, DELTAs, and 1/0\\'s')\n args = parser.parse_args(args=args[1:])\n\n if not args.url.startswith('http'):\n args.url = 'http://' + args.url\n\n if args.verbose:\n urllib2.install_opener(urllib2.build_opener(urllib2.HTTPHandler(debuglevel=1)))\n urllib2.install_opener(urllib2.build_opener(urllib2.HTTPSHandler(debuglevel=1)))\n\n req_headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36'}\n req = urllib2.Request(args.url, headers=req_headers)\n\n if args.batch:\n\n print \"START BATCH MODE\"\n\n pool = multiprocessing.pool.ThreadPool(processes=8)\n threads = []\n batch_mode = args.batch[1].lower()\n results = []\n with open(args.batch[0], 'r') as csvfile:\n csvreader = csv.reader(csvfile)\n for row in csvreader:\n batch_argv = [sys.argv[0], '-1', '-s']\n if batch_mode == 'r':\n batch_argv.append('-r 1')\n else:\n batch_argv.append('-w0b' + row[2])\n batch_argv.append(row[0])\n batch_argv.append(row[1])\n print \"Calling Thread w/ %s\" % (batch_argv[1:])\n threads.append(pool.apply_async(main,(batch_argv,)))\n\n for result in threads:\n results.append(result.get())\n\n # That's what happened when you commit code the night before the talk ;-)\n results = reduce(lambda x,y: x+y, map(lambda x: str(x), reduce(lambda x,y: x+y, results)))\n\n print \"END OF BATCH MODE\\n\\n\"\n print \">>> RESULT: %s <<<\" % results\n\n elif args.test:\n # Test-mode\n try:\n http_response = urllib2.urlopen(req)\n http_response.read()\n print '\\n' + args.url + ':'\n print \"=\" * (len(args.url) + 1) + '\\n'\n print \"Expires equal to: %s\" % http_response.headers['Expires']\n print \"Date equal to: %s\\n\" % http_response.headers['Date']\n # Every hit changes Expires? Can't use URL for cache talking ...\n if http_response.headers['Expires'] == http_response.headers['Date']:\n print \"NOT GOOD!\"\n else:\n print \"MAYBE ... (DELTA equals %s)\" % __calc_delta(http_response.headers['Expires'],\n http_response.headers['Date'])\n except TypeError:\n # expires_date = datetime.datetime(*email.utils.parsedate(expires_field)[:6])\n # TypeError: 'NoneType' object has no attribute '__getitem__'\n print \"`Expires' Value is Number and not a Date! Can't calculate delta ...\\n\"\n except KeyError:\n # Maybe it's not Expires?\n print \"Can't find `Expires' Header in HTTP Response ...\\n\"\n except urllib2.HTTPError as e:\n # Connection error\n print \"ERROR: %s for %s\" % (str(e), args.url)\n else:\n # Write/Read Mode\n first_sync = args.force_start\n\n bits = []\n if not args.read:\n bits = __str2bits(args.write[0])\n if not args.quiet:\n print \"--- INPUT (%s) ---\" % args.write[0]\n print ''.join(bits)\n print \"--- INPUT = %d BITS --\" % (len(bits))\n\n initial_poll_interval = args.poll_interval\n last_input_bit = -1\n last_poll_interval = -1\n after_fp = False\n sliding_delta = 0\n\n if args.read:\n if args.poll_interval < 11:\n sliding_delta = 1\n else:\n sliding_delta = 10\n args.poll_interval = args.poll_interval + sliding_delta\n\n while True:\n if not first_sync or args.always_sync:\n if not args.quiet:\n print \"[%s]: Synchronizing ...\" % time.asctime()\n __wait_till_next_minute()\n first_sync = True\n\n print \"[%s]: Synchronized! Need to sleep another %d second(s) ...\" % (time.asctime(), args.poll_interval)\n time.sleep(args.poll_interval)\n print \"[%s]: Work time!\" % time.asctime()\n\n observed_delta = None\n\n if args.read:\n # Read, append bit to bits array depends on the HTTP response\n input_bit = 0\n http_response = urllib2.urlopen(req)\n http_response.read()\n # Negative delta? (Minus sliding_delta, as read interval is always + sliding_delta to give the writer a buffer)\n observed_delta = __calc_delta(http_response.headers['Expires'], http_response.headers['Date'])\n if observed_delta.total_seconds() < args.poll_interval - sliding_delta:\n input_bit = 1\n print \"(READING | R#: %d | E: %s | D: %s | D2: %s): BIT %d\" % (\n http_response.getcode(), http_response.headers['Expires'], http_response.headers['Date'],\n observed_delta.total_seconds(), input_bit)\n if last_input_bit == 0 and input_bit == 1 and last_poll_interval == observed_delta.total_seconds():\n args.poll_interval = observed_delta.total_seconds()\n print \"*** FALSE POSITIVE! (Ignored; Changed to 0)\"\n bits.append(0)\n last_input_bit = 0\n after_fp = True\n else:\n args.poll_interval = observed_delta.total_seconds() + (sliding_delta + 1)\n if after_fp:\n # After False-positive and bit 1? Writer back online!\n if input_bit == 1:\n after_fp = False\n else:\n # After False-positive and bit 0? It's still False-positive ... Go back to original cycle!\n args.poll_interval = initial_poll_interval\n bits.append(input_bit)\n last_input_bit = input_bit\n last_poll_interval = args.poll_interval - (sliding_delta + 1)\n if len(bits) == args.read[0]:\n break\n else:\n # Write, pop bit form the bits array\n try:\n output_bit = bits.pop()\n if output_bit == '0':\n print \"(WRITING | R#: =OFFLINE= | E: =OFFLINE= | D: =OFFLINE=): BIT 0\"\n if len(bits) == 0:\n break\n continue\n while True:\n http_response = urllib2.urlopen(req)\n http_response.read()\n observed_delta = __calc_delta(http_response.headers['Expires'], http_response.headers['Date'])\n print \"(WRITING | R#: %d | E: %s | D: %s | D2: %s): BIT 1\" % (\n http_response.getcode(), http_response.headers['Expires'], http_response.headers['Date'],\n observed_delta.total_seconds())\n if observed_delta.total_seconds() != args.poll_interval and not args.try_once:\n print \"*** RETRY!\"\n retry_sleep = observed_delta.total_seconds()\n if retry_sleep == 0:\n retry_sleep = 1\n time.sleep(retry_sleep)\n continue\n # Do-while Writer is not aligned w/ Expires\n break\n if len(bits) == 0:\n break\n except IndexError:\n break\n\n if not args.quiet:\n print \"!!! EOF !!!\"\n\n if not bits:\n bits = __str2bits(args.write[0])\n\n if not args.quiet:\n print \"--- OUTPUT ---\"\n print ''.join(map(str, bits))\n print \"--- OUTPUT = %d BITS --\" % (len(bits))\n print \" \"\n n = int(''.join(map(str, bits)), 2)\n try:\n print binascii.unhexlify('%x' % n)\n except TypeError:\n # TypeError: Odd-length string if n = 0 or 1\n if len(bits) == 1:\n pass\n else:\n raise\n\n return bits\n\n###############\n# Entry Point #\n###############\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv))\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
"""
Created on Wed May 16 10:17:32 2018
@author: pearseb
"""
#%% imporst
import os
import numpy as np
import netCDF4 as nc
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import cmocean.cm as cmo
import seaborn as sb
sb.set(style='ticks')
import mpl_toolkits.basemap as bm
import pandas as pd
# move to working directory
os.chdir("C://Users/pearseb/Dropbox/PhD/My articles/nitrogen-carbon cycles/data_for_publication")
#%% get data
data = nc.Dataset('BGC_Mk3Lpi_Fe-mod_default_N15active.nc', 'r')
mk3lpi_no3 = data.variables['no3'][...]
mk3lpi_n15 = data.variables['no3_15'][...]
mk3lpi_no3 = np.ma.masked_where(mk3lpi_no3<0.1, mk3lpi_no3)
mk3lpi_n15 = np.ma.masked_where(mk3lpi_no3<0.1, mk3lpi_n15)
mk3lpi_d15n = (mk3lpi_n15/(mk3lpi_no3-mk3lpi_n15)-1)*1000
mk3lpi_d15org = data.variables['sed_d15n'][...]
data = nc.Dataset('BGC_Mk3Lpi_Fe-2500per_default_N15active.nc', 'r')
mk3lpidust_no3 = data.variables['no3'][...]
mk3lpidust_n15 = data.variables['no3_15'][...]
mk3lpidust_no3 = np.ma.masked_where(mk3lpidust_no3<0.1, mk3lpidust_no3)
mk3lpidust_n15 = np.ma.masked_where(mk3lpidust_no3<0.1, mk3lpidust_n15)
mk3lpidust_d15n = (mk3lpidust_n15/(mk3lpidust_no3-mk3lpidust_n15)-1)*1000
mk3lpidust_d15org = data.variables['sed_d15n'][...]
data = nc.Dataset('BGC_Mk3Llgm_Fe-mod_default_N15active.nc', 'r')
mk3llgm_no3 = data.variables['no3'][...]
mk3llgm_n15 = data.variables['no3_15'][...]
mk3llgm_no3 = np.ma.masked_where(mk3llgm_no3<0.1, mk3llgm_no3)
mk3llgm_n15 = np.ma.masked_where(mk3llgm_no3<0.1, mk3llgm_n15)
mk3llgm_d15n = (mk3llgm_n15/(mk3llgm_no3-mk3llgm_n15)-1)*1000
mk3llgm_d15org = data.variables['sed_d15n'][...]
data = nc.Dataset('BGC_Mk3Llgm_Fe-2500per_default_N15active.nc', 'r')
mk3llgmdust_no3 = data.variables['no3'][...]
mk3llgmdust_n15 = data.variables['no3_15'][...]
mk3llgmdust_no3 = np.ma.masked_where(mk3llgmdust_no3<0.1, mk3llgmdust_no3)
mk3llgmdust_n15 = np.ma.masked_where(mk3llgmdust_no3<0.1, mk3llgmdust_n15)
mk3llgmdust_d15n = (mk3llgmdust_n15/(mk3llgmdust_no3-mk3llgmdust_n15)-1)*1000
mk3llgmdust_d15org = data.variables['sed_d15n'][...]
grid = nc.Dataset('grid_spec_mk3l_128_112_21_v2.nc', 'r')
dvts = grid.variables['dvts'][...]
dats = grid.variables['dats'][...]
lats = grid.variables['latts'][...]
lat_bnds = grid.variables['latts_bnds'][...]
lons = grid.variables['lonts'][...]
lon_bnds = grid.variables['lonts_bnds'][...]
lon_bnds[:,0] += 360.0
deps = grid.variables['zts'][...]
dep_bnds = grid.variables['zts_bnds'][...]
zts = dvts/dats
#%% apply depth correction to d15N of organic matter (see Robinson et al., 2012, Paleoceanography)
deps3d = np.cumsum(zts,axis=0)
# correction
mk3lpi_d15org_cor = mk3lpi_d15org + 0.9*(deps3d*1e-3)
mk3lpidust_d15org_cor = mk3lpidust_d15org + 0.9*(deps3d*1e-3)
mk3llgm_d15org_cor = mk3llgm_d15org + 0.9*(deps3d*1e-3)
mk3llgmdust_d15org_cor = mk3llgmdust_d15org + 0.9*(deps3d*1e-3)
# average over all depths
mk3lpi_d15org_corz = np.ma.average(mk3lpi_d15org_cor, axis=0, weights=zts)
mk3lpidust_d15org_corz = np.ma.average(mk3lpidust_d15org_cor, axis=0, weights=zts)
mk3llgm_d15org_corz = np.ma.average(mk3llgm_d15org_cor, axis=0, weights=zts)
mk3llgmdust_d15org_corz = np.ma.average(mk3llgmdust_d15org_cor, axis=0, weights=zts)
#%% collect prepared compilation of sedimentary d15N records
df = pd.read_csv('Supplementary Data 1.csv')
print(df)
records = df[~np.isnan(df['d15n_LateH'])]
bulk_records = records[records['type']=='bulk']
bound_records = records[records['type']!='bulk']
#%%
lat_labs = ['80$^{\circ}$S', '60$^{\circ}$S', '40$^{\circ}$S', '20$^{\circ}$S', '0$^{\circ}$', \
'20$^{\circ}$N', '40$^{\circ}$N', '60$^{\circ}$N', '80$^{\circ}$N']
lon_labs = ['0$^{\circ}$E', '50$^{\circ}$E', '100$^{\circ}$E', '150$^{\circ}$E', '200$^{\circ}$E', \
'250$^{\circ}$E', '300$^{\circ}$E', '350$^{\circ}$E']
domain = [-45,0,45,355]
domain_draw = [-40,0,40,360]
dlat=20
dlon=60
xx,yy = np.meshgrid(lons, lats)
#%%
levs = np.arange(-5,5.1,0.5)
conts = [-1,1]
fig = plt.figure(facecolor='w', figsize=(10,4))
plt.title('Glacial minus Late Holocene change in $\delta^{15}$N$_{org}$', family='sans-serif', fontsize=12)
proj = bm.Basemap(projection='merc', llcrnrlat=domain[0], llcrnrlon=domain[1], urcrnrlat=domain[2], urcrnrlon=domain[3], resolution='c')
lonproj, latproj = proj(xx, yy)
bulk_x, bulk_y = proj(np.array(bulk_records['lon']),np.array(bulk_records['lat']))
bound_x, bound_y = proj(np.array(bound_records['lon']),np.array(bound_records['lat']))
proj.drawcoastlines(linewidth=0.5, color='k')
proj.fillcontinents(color='grey')
p3 = plt.contourf(lonproj, latproj, mk3lpidust_d15org_corz-mk3lpi_d15org_corz, cmap=cmo.balance, corner_mask=False, \
levels=levs, vmin=np.ma.min(levs), vmax=np.ma.max(levs), extend='both')
c3 = plt.contour(lonproj, latproj, mk3lpidust_d15org_corz-mk3lpi_d15org_corz, colors='black', levels=conts, alpha=0.8, linewidths=0.5, linestyle='-')
s31 = plt.scatter(bound_x, bound_y, s=150, c=bound_records['d15n_LGM']-bound_records['d15n_LateH'], \
marker='*', vmin=np.ma.min(levs), vmax=np.ma.max(levs), cmap=cmo.balance, \
alpha=0.75, edgecolor='k', linewidths=1.0, zorder=3)
s32 = plt.scatter(bulk_x, bulk_y, s=40, c=bulk_records['d15n_LGM']-bulk_records['d15n_LateH'], \
marker='o', vmin=np.ma.min(levs), vmax=np.ma.max(levs), cmap=cmo.balance, \
alpha=0.75, edgecolor='k', linewidths=1.0, zorder=2)
proj.drawparallels(range(domain_draw[0],domain_draw[2]+1,dlat), labels=[True,False,False,False], color=(.3,.3,.3), linewidth=0, fontsize=12, family='sans-serif')
proj.drawmeridians(range(domain_draw[1],domain_draw[3]+1,dlon), labels=[True,False,False,True], color=(.3,.3,.3), linewidth=0, fontsize=12)
from matplotlib.lines import Line2D
elements = [Line2D([0], [0], marker='o', markerfacecolor='w', markeredgecolor='k', color='w', linewidth=1.0, markersize=15, label='Bulk organic N'),\
Line2D([0], [0], marker='*', markerfacecolor='w', markeredgecolor='k', color='w', linewidth=1.0, markersize=20, label='Bound organic N')]
plt.legend(handles=elements, loc='center', bbox_to_anchor=(0.5,-0.25), ncol=2, frameon=False, scatterpoints=1)
plt.subplots_adjust(bottom=0.1, top=0.95, left=0.075, right=0.85)
cbax = fig.add_axes([0.88, 0.25, 0.03, 0.55])
cbar = plt.colorbar(p3, cax=cbax, orientation='vertical')
cbar.ax.set_ylabel(u'$\delta^{15}$N ‰ vs air', fontsize=12, family='sans-serif')
plt.clabel(c3, manual=True, fmt='%i', fontsize=10, colors='k', inline=True)
fig.savefig('figures_for_publication/fig4.pdf',dpi=300,bbox_inches='tight')
fig.savefig('figures_for_publication/fig4.png',dpi=300,bbox_inches='tight')
|
normal
|
{
"blob_id": "635b02e03578d44f13530bd57ab1a99987d4909d",
"index": 5987,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsb.set(style='ticks')\n<mask token>\nos.chdir(\n 'C://Users/pearseb/Dropbox/PhD/My articles/nitrogen-carbon cycles/data_for_publication'\n )\n<mask token>\nlon_bnds[:, 0] += 360.0\n<mask token>\nprint(df)\n<mask token>\nplt.title('Glacial minus Late Holocene change in $\\\\delta^{15}$N$_{org}$',\n family='sans-serif', fontsize=12)\n<mask token>\nproj.drawcoastlines(linewidth=0.5, color='k')\nproj.fillcontinents(color='grey')\n<mask token>\nproj.drawparallels(range(domain_draw[0], domain_draw[2] + 1, dlat), labels=\n [True, False, False, False], color=(0.3, 0.3, 0.3), linewidth=0,\n fontsize=12, family='sans-serif')\nproj.drawmeridians(range(domain_draw[1], domain_draw[3] + 1, dlon), labels=\n [True, False, False, True], color=(0.3, 0.3, 0.3), linewidth=0, fontsize=12\n )\n<mask token>\nplt.legend(handles=elements, loc='center', bbox_to_anchor=(0.5, -0.25),\n ncol=2, frameon=False, scatterpoints=1)\nplt.subplots_adjust(bottom=0.1, top=0.95, left=0.075, right=0.85)\n<mask token>\ncbar.ax.set_ylabel(u'$\\\\delta^{15}$N ‰ vs air', fontsize=12, family=\n 'sans-serif')\nplt.clabel(c3, manual=True, fmt='%i', fontsize=10, colors='k', inline=True)\nfig.savefig('figures_for_publication/fig4.pdf', dpi=300, bbox_inches='tight')\nfig.savefig('figures_for_publication/fig4.png', dpi=300, bbox_inches='tight')\n",
"step-3": "<mask token>\nsb.set(style='ticks')\n<mask token>\nos.chdir(\n 'C://Users/pearseb/Dropbox/PhD/My articles/nitrogen-carbon cycles/data_for_publication'\n )\ndata = nc.Dataset('BGC_Mk3Lpi_Fe-mod_default_N15active.nc', 'r')\nmk3lpi_no3 = data.variables['no3'][...]\nmk3lpi_n15 = data.variables['no3_15'][...]\nmk3lpi_no3 = np.ma.masked_where(mk3lpi_no3 < 0.1, mk3lpi_no3)\nmk3lpi_n15 = np.ma.masked_where(mk3lpi_no3 < 0.1, mk3lpi_n15)\nmk3lpi_d15n = (mk3lpi_n15 / (mk3lpi_no3 - mk3lpi_n15) - 1) * 1000\nmk3lpi_d15org = data.variables['sed_d15n'][...]\ndata = nc.Dataset('BGC_Mk3Lpi_Fe-2500per_default_N15active.nc', 'r')\nmk3lpidust_no3 = data.variables['no3'][...]\nmk3lpidust_n15 = data.variables['no3_15'][...]\nmk3lpidust_no3 = np.ma.masked_where(mk3lpidust_no3 < 0.1, mk3lpidust_no3)\nmk3lpidust_n15 = np.ma.masked_where(mk3lpidust_no3 < 0.1, mk3lpidust_n15)\nmk3lpidust_d15n = (mk3lpidust_n15 / (mk3lpidust_no3 - mk3lpidust_n15) - 1\n ) * 1000\nmk3lpidust_d15org = data.variables['sed_d15n'][...]\ndata = nc.Dataset('BGC_Mk3Llgm_Fe-mod_default_N15active.nc', 'r')\nmk3llgm_no3 = data.variables['no3'][...]\nmk3llgm_n15 = data.variables['no3_15'][...]\nmk3llgm_no3 = np.ma.masked_where(mk3llgm_no3 < 0.1, mk3llgm_no3)\nmk3llgm_n15 = np.ma.masked_where(mk3llgm_no3 < 0.1, mk3llgm_n15)\nmk3llgm_d15n = (mk3llgm_n15 / (mk3llgm_no3 - mk3llgm_n15) - 1) * 1000\nmk3llgm_d15org = data.variables['sed_d15n'][...]\ndata = nc.Dataset('BGC_Mk3Llgm_Fe-2500per_default_N15active.nc', 'r')\nmk3llgmdust_no3 = data.variables['no3'][...]\nmk3llgmdust_n15 = data.variables['no3_15'][...]\nmk3llgmdust_no3 = np.ma.masked_where(mk3llgmdust_no3 < 0.1, mk3llgmdust_no3)\nmk3llgmdust_n15 = np.ma.masked_where(mk3llgmdust_no3 < 0.1, mk3llgmdust_n15)\nmk3llgmdust_d15n = (mk3llgmdust_n15 / (mk3llgmdust_no3 - mk3llgmdust_n15) - 1\n ) * 1000\nmk3llgmdust_d15org = data.variables['sed_d15n'][...]\ngrid = nc.Dataset('grid_spec_mk3l_128_112_21_v2.nc', 'r')\ndvts = grid.variables['dvts'][...]\ndats = grid.variables['dats'][...]\nlats = grid.variables['latts'][...]\nlat_bnds = grid.variables['latts_bnds'][...]\nlons = grid.variables['lonts'][...]\nlon_bnds = grid.variables['lonts_bnds'][...]\nlon_bnds[:, 0] += 360.0\ndeps = grid.variables['zts'][...]\ndep_bnds = grid.variables['zts_bnds'][...]\nzts = dvts / dats\ndeps3d = np.cumsum(zts, axis=0)\nmk3lpi_d15org_cor = mk3lpi_d15org + 0.9 * (deps3d * 0.001)\nmk3lpidust_d15org_cor = mk3lpidust_d15org + 0.9 * (deps3d * 0.001)\nmk3llgm_d15org_cor = mk3llgm_d15org + 0.9 * (deps3d * 0.001)\nmk3llgmdust_d15org_cor = mk3llgmdust_d15org + 0.9 * (deps3d * 0.001)\nmk3lpi_d15org_corz = np.ma.average(mk3lpi_d15org_cor, axis=0, weights=zts)\nmk3lpidust_d15org_corz = np.ma.average(mk3lpidust_d15org_cor, axis=0,\n weights=zts)\nmk3llgm_d15org_corz = np.ma.average(mk3llgm_d15org_cor, axis=0, weights=zts)\nmk3llgmdust_d15org_corz = np.ma.average(mk3llgmdust_d15org_cor, axis=0,\n weights=zts)\ndf = pd.read_csv('Supplementary Data 1.csv')\nprint(df)\nrecords = df[~np.isnan(df['d15n_LateH'])]\nbulk_records = records[records['type'] == 'bulk']\nbound_records = records[records['type'] != 'bulk']\nlat_labs = ['80$^{\\\\circ}$S', '60$^{\\\\circ}$S', '40$^{\\\\circ}$S',\n '20$^{\\\\circ}$S', '0$^{\\\\circ}$', '20$^{\\\\circ}$N', '40$^{\\\\circ}$N',\n '60$^{\\\\circ}$N', '80$^{\\\\circ}$N']\nlon_labs = ['0$^{\\\\circ}$E', '50$^{\\\\circ}$E', '100$^{\\\\circ}$E',\n '150$^{\\\\circ}$E', '200$^{\\\\circ}$E', '250$^{\\\\circ}$E',\n '300$^{\\\\circ}$E', '350$^{\\\\circ}$E']\ndomain = [-45, 0, 45, 355]\ndomain_draw = [-40, 0, 40, 360]\ndlat = 20\ndlon = 60\nxx, yy = np.meshgrid(lons, lats)\nlevs = np.arange(-5, 5.1, 0.5)\nconts = [-1, 1]\nfig = plt.figure(facecolor='w', figsize=(10, 4))\nplt.title('Glacial minus Late Holocene change in $\\\\delta^{15}$N$_{org}$',\n family='sans-serif', fontsize=12)\nproj = bm.Basemap(projection='merc', llcrnrlat=domain[0], llcrnrlon=domain[\n 1], urcrnrlat=domain[2], urcrnrlon=domain[3], resolution='c')\nlonproj, latproj = proj(xx, yy)\nbulk_x, bulk_y = proj(np.array(bulk_records['lon']), np.array(bulk_records[\n 'lat']))\nbound_x, bound_y = proj(np.array(bound_records['lon']), np.array(\n bound_records['lat']))\nproj.drawcoastlines(linewidth=0.5, color='k')\nproj.fillcontinents(color='grey')\np3 = plt.contourf(lonproj, latproj, mk3lpidust_d15org_corz -\n mk3lpi_d15org_corz, cmap=cmo.balance, corner_mask=False, levels=levs,\n vmin=np.ma.min(levs), vmax=np.ma.max(levs), extend='both')\nc3 = plt.contour(lonproj, latproj, mk3lpidust_d15org_corz -\n mk3lpi_d15org_corz, colors='black', levels=conts, alpha=0.8, linewidths\n =0.5, linestyle='-')\ns31 = plt.scatter(bound_x, bound_y, s=150, c=bound_records['d15n_LGM'] -\n bound_records['d15n_LateH'], marker='*', vmin=np.ma.min(levs), vmax=np.\n ma.max(levs), cmap=cmo.balance, alpha=0.75, edgecolor='k', linewidths=\n 1.0, zorder=3)\ns32 = plt.scatter(bulk_x, bulk_y, s=40, c=bulk_records['d15n_LGM'] -\n bulk_records['d15n_LateH'], marker='o', vmin=np.ma.min(levs), vmax=np.\n ma.max(levs), cmap=cmo.balance, alpha=0.75, edgecolor='k', linewidths=\n 1.0, zorder=2)\nproj.drawparallels(range(domain_draw[0], domain_draw[2] + 1, dlat), labels=\n [True, False, False, False], color=(0.3, 0.3, 0.3), linewidth=0,\n fontsize=12, family='sans-serif')\nproj.drawmeridians(range(domain_draw[1], domain_draw[3] + 1, dlon), labels=\n [True, False, False, True], color=(0.3, 0.3, 0.3), linewidth=0, fontsize=12\n )\n<mask token>\nelements = [Line2D([0], [0], marker='o', markerfacecolor='w',\n markeredgecolor='k', color='w', linewidth=1.0, markersize=15, label=\n 'Bulk organic N'), Line2D([0], [0], marker='*', markerfacecolor='w',\n markeredgecolor='k', color='w', linewidth=1.0, markersize=20, label=\n 'Bound organic N')]\nplt.legend(handles=elements, loc='center', bbox_to_anchor=(0.5, -0.25),\n ncol=2, frameon=False, scatterpoints=1)\nplt.subplots_adjust(bottom=0.1, top=0.95, left=0.075, right=0.85)\ncbax = fig.add_axes([0.88, 0.25, 0.03, 0.55])\ncbar = plt.colorbar(p3, cax=cbax, orientation='vertical')\ncbar.ax.set_ylabel(u'$\\\\delta^{15}$N ‰ vs air', fontsize=12, family=\n 'sans-serif')\nplt.clabel(c3, manual=True, fmt='%i', fontsize=10, colors='k', inline=True)\nfig.savefig('figures_for_publication/fig4.pdf', dpi=300, bbox_inches='tight')\nfig.savefig('figures_for_publication/fig4.png', dpi=300, bbox_inches='tight')\n",
"step-4": "<mask token>\nimport os\nimport numpy as np\nimport netCDF4 as nc\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport cmocean.cm as cmo\nimport seaborn as sb\nsb.set(style='ticks')\nimport mpl_toolkits.basemap as bm\nimport pandas as pd\nos.chdir(\n 'C://Users/pearseb/Dropbox/PhD/My articles/nitrogen-carbon cycles/data_for_publication'\n )\ndata = nc.Dataset('BGC_Mk3Lpi_Fe-mod_default_N15active.nc', 'r')\nmk3lpi_no3 = data.variables['no3'][...]\nmk3lpi_n15 = data.variables['no3_15'][...]\nmk3lpi_no3 = np.ma.masked_where(mk3lpi_no3 < 0.1, mk3lpi_no3)\nmk3lpi_n15 = np.ma.masked_where(mk3lpi_no3 < 0.1, mk3lpi_n15)\nmk3lpi_d15n = (mk3lpi_n15 / (mk3lpi_no3 - mk3lpi_n15) - 1) * 1000\nmk3lpi_d15org = data.variables['sed_d15n'][...]\ndata = nc.Dataset('BGC_Mk3Lpi_Fe-2500per_default_N15active.nc', 'r')\nmk3lpidust_no3 = data.variables['no3'][...]\nmk3lpidust_n15 = data.variables['no3_15'][...]\nmk3lpidust_no3 = np.ma.masked_where(mk3lpidust_no3 < 0.1, mk3lpidust_no3)\nmk3lpidust_n15 = np.ma.masked_where(mk3lpidust_no3 < 0.1, mk3lpidust_n15)\nmk3lpidust_d15n = (mk3lpidust_n15 / (mk3lpidust_no3 - mk3lpidust_n15) - 1\n ) * 1000\nmk3lpidust_d15org = data.variables['sed_d15n'][...]\ndata = nc.Dataset('BGC_Mk3Llgm_Fe-mod_default_N15active.nc', 'r')\nmk3llgm_no3 = data.variables['no3'][...]\nmk3llgm_n15 = data.variables['no3_15'][...]\nmk3llgm_no3 = np.ma.masked_where(mk3llgm_no3 < 0.1, mk3llgm_no3)\nmk3llgm_n15 = np.ma.masked_where(mk3llgm_no3 < 0.1, mk3llgm_n15)\nmk3llgm_d15n = (mk3llgm_n15 / (mk3llgm_no3 - mk3llgm_n15) - 1) * 1000\nmk3llgm_d15org = data.variables['sed_d15n'][...]\ndata = nc.Dataset('BGC_Mk3Llgm_Fe-2500per_default_N15active.nc', 'r')\nmk3llgmdust_no3 = data.variables['no3'][...]\nmk3llgmdust_n15 = data.variables['no3_15'][...]\nmk3llgmdust_no3 = np.ma.masked_where(mk3llgmdust_no3 < 0.1, mk3llgmdust_no3)\nmk3llgmdust_n15 = np.ma.masked_where(mk3llgmdust_no3 < 0.1, mk3llgmdust_n15)\nmk3llgmdust_d15n = (mk3llgmdust_n15 / (mk3llgmdust_no3 - mk3llgmdust_n15) - 1\n ) * 1000\nmk3llgmdust_d15org = data.variables['sed_d15n'][...]\ngrid = nc.Dataset('grid_spec_mk3l_128_112_21_v2.nc', 'r')\ndvts = grid.variables['dvts'][...]\ndats = grid.variables['dats'][...]\nlats = grid.variables['latts'][...]\nlat_bnds = grid.variables['latts_bnds'][...]\nlons = grid.variables['lonts'][...]\nlon_bnds = grid.variables['lonts_bnds'][...]\nlon_bnds[:, 0] += 360.0\ndeps = grid.variables['zts'][...]\ndep_bnds = grid.variables['zts_bnds'][...]\nzts = dvts / dats\ndeps3d = np.cumsum(zts, axis=0)\nmk3lpi_d15org_cor = mk3lpi_d15org + 0.9 * (deps3d * 0.001)\nmk3lpidust_d15org_cor = mk3lpidust_d15org + 0.9 * (deps3d * 0.001)\nmk3llgm_d15org_cor = mk3llgm_d15org + 0.9 * (deps3d * 0.001)\nmk3llgmdust_d15org_cor = mk3llgmdust_d15org + 0.9 * (deps3d * 0.001)\nmk3lpi_d15org_corz = np.ma.average(mk3lpi_d15org_cor, axis=0, weights=zts)\nmk3lpidust_d15org_corz = np.ma.average(mk3lpidust_d15org_cor, axis=0,\n weights=zts)\nmk3llgm_d15org_corz = np.ma.average(mk3llgm_d15org_cor, axis=0, weights=zts)\nmk3llgmdust_d15org_corz = np.ma.average(mk3llgmdust_d15org_cor, axis=0,\n weights=zts)\ndf = pd.read_csv('Supplementary Data 1.csv')\nprint(df)\nrecords = df[~np.isnan(df['d15n_LateH'])]\nbulk_records = records[records['type'] == 'bulk']\nbound_records = records[records['type'] != 'bulk']\nlat_labs = ['80$^{\\\\circ}$S', '60$^{\\\\circ}$S', '40$^{\\\\circ}$S',\n '20$^{\\\\circ}$S', '0$^{\\\\circ}$', '20$^{\\\\circ}$N', '40$^{\\\\circ}$N',\n '60$^{\\\\circ}$N', '80$^{\\\\circ}$N']\nlon_labs = ['0$^{\\\\circ}$E', '50$^{\\\\circ}$E', '100$^{\\\\circ}$E',\n '150$^{\\\\circ}$E', '200$^{\\\\circ}$E', '250$^{\\\\circ}$E',\n '300$^{\\\\circ}$E', '350$^{\\\\circ}$E']\ndomain = [-45, 0, 45, 355]\ndomain_draw = [-40, 0, 40, 360]\ndlat = 20\ndlon = 60\nxx, yy = np.meshgrid(lons, lats)\nlevs = np.arange(-5, 5.1, 0.5)\nconts = [-1, 1]\nfig = plt.figure(facecolor='w', figsize=(10, 4))\nplt.title('Glacial minus Late Holocene change in $\\\\delta^{15}$N$_{org}$',\n family='sans-serif', fontsize=12)\nproj = bm.Basemap(projection='merc', llcrnrlat=domain[0], llcrnrlon=domain[\n 1], urcrnrlat=domain[2], urcrnrlon=domain[3], resolution='c')\nlonproj, latproj = proj(xx, yy)\nbulk_x, bulk_y = proj(np.array(bulk_records['lon']), np.array(bulk_records[\n 'lat']))\nbound_x, bound_y = proj(np.array(bound_records['lon']), np.array(\n bound_records['lat']))\nproj.drawcoastlines(linewidth=0.5, color='k')\nproj.fillcontinents(color='grey')\np3 = plt.contourf(lonproj, latproj, mk3lpidust_d15org_corz -\n mk3lpi_d15org_corz, cmap=cmo.balance, corner_mask=False, levels=levs,\n vmin=np.ma.min(levs), vmax=np.ma.max(levs), extend='both')\nc3 = plt.contour(lonproj, latproj, mk3lpidust_d15org_corz -\n mk3lpi_d15org_corz, colors='black', levels=conts, alpha=0.8, linewidths\n =0.5, linestyle='-')\ns31 = plt.scatter(bound_x, bound_y, s=150, c=bound_records['d15n_LGM'] -\n bound_records['d15n_LateH'], marker='*', vmin=np.ma.min(levs), vmax=np.\n ma.max(levs), cmap=cmo.balance, alpha=0.75, edgecolor='k', linewidths=\n 1.0, zorder=3)\ns32 = plt.scatter(bulk_x, bulk_y, s=40, c=bulk_records['d15n_LGM'] -\n bulk_records['d15n_LateH'], marker='o', vmin=np.ma.min(levs), vmax=np.\n ma.max(levs), cmap=cmo.balance, alpha=0.75, edgecolor='k', linewidths=\n 1.0, zorder=2)\nproj.drawparallels(range(domain_draw[0], domain_draw[2] + 1, dlat), labels=\n [True, False, False, False], color=(0.3, 0.3, 0.3), linewidth=0,\n fontsize=12, family='sans-serif')\nproj.drawmeridians(range(domain_draw[1], domain_draw[3] + 1, dlon), labels=\n [True, False, False, True], color=(0.3, 0.3, 0.3), linewidth=0, fontsize=12\n )\nfrom matplotlib.lines import Line2D\nelements = [Line2D([0], [0], marker='o', markerfacecolor='w',\n markeredgecolor='k', color='w', linewidth=1.0, markersize=15, label=\n 'Bulk organic N'), Line2D([0], [0], marker='*', markerfacecolor='w',\n markeredgecolor='k', color='w', linewidth=1.0, markersize=20, label=\n 'Bound organic N')]\nplt.legend(handles=elements, loc='center', bbox_to_anchor=(0.5, -0.25),\n ncol=2, frameon=False, scatterpoints=1)\nplt.subplots_adjust(bottom=0.1, top=0.95, left=0.075, right=0.85)\ncbax = fig.add_axes([0.88, 0.25, 0.03, 0.55])\ncbar = plt.colorbar(p3, cax=cbax, orientation='vertical')\ncbar.ax.set_ylabel(u'$\\\\delta^{15}$N ‰ vs air', fontsize=12, family=\n 'sans-serif')\nplt.clabel(c3, manual=True, fmt='%i', fontsize=10, colors='k', inline=True)\nfig.savefig('figures_for_publication/fig4.pdf', dpi=300, bbox_inches='tight')\nfig.savefig('figures_for_publication/fig4.png', dpi=300, bbox_inches='tight')\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 16 10:17:32 2018\n\n@author: pearseb\n\"\"\"\n\n#%% imporst\n \nimport os\nimport numpy as np\nimport netCDF4 as nc\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport cmocean.cm as cmo\nimport seaborn as sb\nsb.set(style='ticks')\nimport mpl_toolkits.basemap as bm\nimport pandas as pd \n\n\n# move to working directory\nos.chdir(\"C://Users/pearseb/Dropbox/PhD/My articles/nitrogen-carbon cycles/data_for_publication\")\n\n\n#%% get data\n\ndata = nc.Dataset('BGC_Mk3Lpi_Fe-mod_default_N15active.nc', 'r')\nmk3lpi_no3 = data.variables['no3'][...]\nmk3lpi_n15 = data.variables['no3_15'][...]\nmk3lpi_no3 = np.ma.masked_where(mk3lpi_no3<0.1, mk3lpi_no3)\nmk3lpi_n15 = np.ma.masked_where(mk3lpi_no3<0.1, mk3lpi_n15)\nmk3lpi_d15n = (mk3lpi_n15/(mk3lpi_no3-mk3lpi_n15)-1)*1000\nmk3lpi_d15org = data.variables['sed_d15n'][...]\n\ndata = nc.Dataset('BGC_Mk3Lpi_Fe-2500per_default_N15active.nc', 'r')\nmk3lpidust_no3 = data.variables['no3'][...]\nmk3lpidust_n15 = data.variables['no3_15'][...]\nmk3lpidust_no3 = np.ma.masked_where(mk3lpidust_no3<0.1, mk3lpidust_no3)\nmk3lpidust_n15 = np.ma.masked_where(mk3lpidust_no3<0.1, mk3lpidust_n15)\nmk3lpidust_d15n = (mk3lpidust_n15/(mk3lpidust_no3-mk3lpidust_n15)-1)*1000\nmk3lpidust_d15org = data.variables['sed_d15n'][...]\n\ndata = nc.Dataset('BGC_Mk3Llgm_Fe-mod_default_N15active.nc', 'r')\nmk3llgm_no3 = data.variables['no3'][...]\nmk3llgm_n15 = data.variables['no3_15'][...]\nmk3llgm_no3 = np.ma.masked_where(mk3llgm_no3<0.1, mk3llgm_no3)\nmk3llgm_n15 = np.ma.masked_where(mk3llgm_no3<0.1, mk3llgm_n15)\nmk3llgm_d15n = (mk3llgm_n15/(mk3llgm_no3-mk3llgm_n15)-1)*1000\nmk3llgm_d15org = data.variables['sed_d15n'][...]\n\ndata = nc.Dataset('BGC_Mk3Llgm_Fe-2500per_default_N15active.nc', 'r')\nmk3llgmdust_no3 = data.variables['no3'][...]\nmk3llgmdust_n15 = data.variables['no3_15'][...]\nmk3llgmdust_no3 = np.ma.masked_where(mk3llgmdust_no3<0.1, mk3llgmdust_no3)\nmk3llgmdust_n15 = np.ma.masked_where(mk3llgmdust_no3<0.1, mk3llgmdust_n15)\nmk3llgmdust_d15n = (mk3llgmdust_n15/(mk3llgmdust_no3-mk3llgmdust_n15)-1)*1000\nmk3llgmdust_d15org = data.variables['sed_d15n'][...]\n\ngrid = nc.Dataset('grid_spec_mk3l_128_112_21_v2.nc', 'r')\ndvts = grid.variables['dvts'][...]\ndats = grid.variables['dats'][...]\nlats = grid.variables['latts'][...]\nlat_bnds = grid.variables['latts_bnds'][...]\nlons = grid.variables['lonts'][...]\nlon_bnds = grid.variables['lonts_bnds'][...]\nlon_bnds[:,0] += 360.0\ndeps = grid.variables['zts'][...]\ndep_bnds = grid.variables['zts_bnds'][...]\nzts = dvts/dats\n\n\n\n#%% apply depth correction to d15N of organic matter (see Robinson et al., 2012, Paleoceanography)\n\ndeps3d = np.cumsum(zts,axis=0)\n\n# correction\nmk3lpi_d15org_cor = mk3lpi_d15org + 0.9*(deps3d*1e-3)\nmk3lpidust_d15org_cor = mk3lpidust_d15org + 0.9*(deps3d*1e-3)\n\nmk3llgm_d15org_cor = mk3llgm_d15org + 0.9*(deps3d*1e-3)\nmk3llgmdust_d15org_cor = mk3llgmdust_d15org + 0.9*(deps3d*1e-3)\n\n\n# average over all depths\nmk3lpi_d15org_corz = np.ma.average(mk3lpi_d15org_cor, axis=0, weights=zts)\nmk3lpidust_d15org_corz = np.ma.average(mk3lpidust_d15org_cor, axis=0, weights=zts)\n\nmk3llgm_d15org_corz = np.ma.average(mk3llgm_d15org_cor, axis=0, weights=zts)\nmk3llgmdust_d15org_corz = np.ma.average(mk3llgmdust_d15org_cor, axis=0, weights=zts)\n\n\n\n#%% collect prepared compilation of sedimentary d15N records\n\ndf = pd.read_csv('Supplementary Data 1.csv')\nprint(df)\n\nrecords = df[~np.isnan(df['d15n_LateH'])]\nbulk_records = records[records['type']=='bulk']\nbound_records = records[records['type']!='bulk']\n\n\n#%%\n\nlat_labs = ['80$^{\\circ}$S', '60$^{\\circ}$S', '40$^{\\circ}$S', '20$^{\\circ}$S', '0$^{\\circ}$', \\\n '20$^{\\circ}$N', '40$^{\\circ}$N', '60$^{\\circ}$N', '80$^{\\circ}$N']\nlon_labs = ['0$^{\\circ}$E', '50$^{\\circ}$E', '100$^{\\circ}$E', '150$^{\\circ}$E', '200$^{\\circ}$E', \\\n '250$^{\\circ}$E', '300$^{\\circ}$E', '350$^{\\circ}$E']\n\ndomain = [-45,0,45,355] \ndomain_draw = [-40,0,40,360]\ndlat=20\ndlon=60\n\nxx,yy = np.meshgrid(lons, lats)\n\n\n#%%\n\nlevs = np.arange(-5,5.1,0.5)\nconts = [-1,1]\n\nfig = plt.figure(facecolor='w', figsize=(10,4))\n\nplt.title('Glacial minus Late Holocene change in $\\delta^{15}$N$_{org}$', family='sans-serif', fontsize=12)\nproj = bm.Basemap(projection='merc', llcrnrlat=domain[0], llcrnrlon=domain[1], urcrnrlat=domain[2], urcrnrlon=domain[3], resolution='c')\nlonproj, latproj = proj(xx, yy)\n\nbulk_x, bulk_y = proj(np.array(bulk_records['lon']),np.array(bulk_records['lat']))\nbound_x, bound_y = proj(np.array(bound_records['lon']),np.array(bound_records['lat']))\n\nproj.drawcoastlines(linewidth=0.5, color='k')\nproj.fillcontinents(color='grey')\np3 = plt.contourf(lonproj, latproj, mk3lpidust_d15org_corz-mk3lpi_d15org_corz, cmap=cmo.balance, corner_mask=False, \\\n levels=levs, vmin=np.ma.min(levs), vmax=np.ma.max(levs), extend='both')\nc3 = plt.contour(lonproj, latproj, mk3lpidust_d15org_corz-mk3lpi_d15org_corz, colors='black', levels=conts, alpha=0.8, linewidths=0.5, linestyle='-')\ns31 = plt.scatter(bound_x, bound_y, s=150, c=bound_records['d15n_LGM']-bound_records['d15n_LateH'], \\\n marker='*', vmin=np.ma.min(levs), vmax=np.ma.max(levs), cmap=cmo.balance, \\\n alpha=0.75, edgecolor='k', linewidths=1.0, zorder=3)\ns32 = plt.scatter(bulk_x, bulk_y, s=40, c=bulk_records['d15n_LGM']-bulk_records['d15n_LateH'], \\\n marker='o', vmin=np.ma.min(levs), vmax=np.ma.max(levs), cmap=cmo.balance, \\\n alpha=0.75, edgecolor='k', linewidths=1.0, zorder=2)\n\nproj.drawparallels(range(domain_draw[0],domain_draw[2]+1,dlat), labels=[True,False,False,False], color=(.3,.3,.3), linewidth=0, fontsize=12, family='sans-serif')\nproj.drawmeridians(range(domain_draw[1],domain_draw[3]+1,dlon), labels=[True,False,False,True], color=(.3,.3,.3), linewidth=0, fontsize=12)\n\n\nfrom matplotlib.lines import Line2D\nelements = [Line2D([0], [0], marker='o', markerfacecolor='w', markeredgecolor='k', color='w', linewidth=1.0, markersize=15, label='Bulk organic N'),\\\n Line2D([0], [0], marker='*', markerfacecolor='w', markeredgecolor='k', color='w', linewidth=1.0, markersize=20, label='Bound organic N')]\n\nplt.legend(handles=elements, loc='center', bbox_to_anchor=(0.5,-0.25), ncol=2, frameon=False, scatterpoints=1)\n\nplt.subplots_adjust(bottom=0.1, top=0.95, left=0.075, right=0.85)\ncbax = fig.add_axes([0.88, 0.25, 0.03, 0.55])\ncbar = plt.colorbar(p3, cax=cbax, orientation='vertical')\ncbar.ax.set_ylabel(u'$\\delta^{15}$N ‰ vs air', fontsize=12, family='sans-serif')\n\nplt.clabel(c3, manual=True, fmt='%i', fontsize=10, colors='k', inline=True)\n\n\nfig.savefig('figures_for_publication/fig4.pdf',dpi=300,bbox_inches='tight')\nfig.savefig('figures_for_publication/fig4.png',dpi=300,bbox_inches='tight')\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def make_dir(directory):
import os
import errno
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
<|reserved_special_token_0|>
def save_images(images, file_pattern, start_idx=1):
for i, image in enumerate(images):
file_path = file_pattern % (start_idx + i)
make_dir(file_path)
image_funcs.save_image(image, file_path)
def main():
optical_flows = compute_optical_flow_tvl1('')
frame_file_pattern = '%05d.jpg'
folder = ''
file_pattern = os.path.join(folder, frame_file_pattern)
save_images(optical_flows, file_pattern)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.insert(0, '..')
<|reserved_special_token_0|>
def make_dir(directory):
import os
import errno
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
<|reserved_special_token_0|>
flags.DEFINE_string('expected_rgb_frame_suffix', '.jpg',
"Expected RGB frame files' suffix.")
def compute_optical_flow_tvl1(video_frames_folder):
"""Compute the TV-L1 optical flow."""
TVL1 = DualTVL1()
rgb_frame_files = os.listdir(video_frames_folder)
rgb_frame_files = [frame_file for frame_file in rgb_frame_files if
frame_file.endswith(FLAGS.expected_rgb_frame_suffix)]
rgb_frame_files.sort()
num_frames = len(rgb_frame_files)
assert num_frames >= 2, 'Only find %d (<=2) RGB frames under %s.' % (
num_frames, video_frames_folder)
optical_flows = []
prev_frame = image_funcs.rgb_to_gray(image_funcs.read_image(
rgb_frame_files[0], to_float=False))
for i in range(1, num_frames):
cur_frame = image_funcs.rgb_to_gray(image_funcs.read_image(
rgb_frame_files[1], to_float=False))
cur_flow = TVL1.calc(prev_frame, cur_frame, None)
assert cur_flow.dtype == np.float32
optical_flows.append(cur_flow)
prev_frame = cur_frame
return optical_flows
def save_images(images, file_pattern, start_idx=1):
for i, image in enumerate(images):
file_path = file_pattern % (start_idx + i)
make_dir(file_path)
image_funcs.save_image(image, file_path)
def main():
optical_flows = compute_optical_flow_tvl1('')
frame_file_pattern = '%05d.jpg'
folder = ''
file_pattern = os.path.join(folder, frame_file_pattern)
save_images(optical_flows, file_pattern)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.insert(0, '..')
<|reserved_special_token_0|>
def make_dir(directory):
import os
import errno
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
FLAGS = flags.FLAGS
flags.DEFINE_string('expected_rgb_frame_suffix', '.jpg',
"Expected RGB frame files' suffix.")
def compute_optical_flow_tvl1(video_frames_folder):
"""Compute the TV-L1 optical flow."""
TVL1 = DualTVL1()
rgb_frame_files = os.listdir(video_frames_folder)
rgb_frame_files = [frame_file for frame_file in rgb_frame_files if
frame_file.endswith(FLAGS.expected_rgb_frame_suffix)]
rgb_frame_files.sort()
num_frames = len(rgb_frame_files)
assert num_frames >= 2, 'Only find %d (<=2) RGB frames under %s.' % (
num_frames, video_frames_folder)
optical_flows = []
prev_frame = image_funcs.rgb_to_gray(image_funcs.read_image(
rgb_frame_files[0], to_float=False))
for i in range(1, num_frames):
cur_frame = image_funcs.rgb_to_gray(image_funcs.read_image(
rgb_frame_files[1], to_float=False))
cur_flow = TVL1.calc(prev_frame, cur_frame, None)
assert cur_flow.dtype == np.float32
optical_flows.append(cur_flow)
prev_frame = cur_frame
return optical_flows
def save_images(images, file_pattern, start_idx=1):
for i, image in enumerate(images):
file_path = file_pattern % (start_idx + i)
make_dir(file_path)
image_funcs.save_image(image, file_path)
def main():
optical_flows = compute_optical_flow_tvl1('')
frame_file_pattern = '%05d.jpg'
folder = ''
file_pattern = os.path.join(folder, frame_file_pattern)
save_images(optical_flows, file_pattern)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from cv2 import DualTVL1OpticalFlow_create as DualTVL1
from tensorflow.python.platform import flags
import os
import sys
sys.path.insert(0, '..')
from utils import image_funcs
import numpy as np
def make_dir(directory):
import os
import errno
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
FLAGS = flags.FLAGS
flags.DEFINE_string('expected_rgb_frame_suffix', '.jpg',
"Expected RGB frame files' suffix.")
def compute_optical_flow_tvl1(video_frames_folder):
"""Compute the TV-L1 optical flow."""
TVL1 = DualTVL1()
rgb_frame_files = os.listdir(video_frames_folder)
rgb_frame_files = [frame_file for frame_file in rgb_frame_files if
frame_file.endswith(FLAGS.expected_rgb_frame_suffix)]
rgb_frame_files.sort()
num_frames = len(rgb_frame_files)
assert num_frames >= 2, 'Only find %d (<=2) RGB frames under %s.' % (
num_frames, video_frames_folder)
optical_flows = []
prev_frame = image_funcs.rgb_to_gray(image_funcs.read_image(
rgb_frame_files[0], to_float=False))
for i in range(1, num_frames):
cur_frame = image_funcs.rgb_to_gray(image_funcs.read_image(
rgb_frame_files[1], to_float=False))
cur_flow = TVL1.calc(prev_frame, cur_frame, None)
assert cur_flow.dtype == np.float32
optical_flows.append(cur_flow)
prev_frame = cur_frame
return optical_flows
def save_images(images, file_pattern, start_idx=1):
for i, image in enumerate(images):
file_path = file_pattern % (start_idx + i)
make_dir(file_path)
image_funcs.save_image(image, file_path)
def main():
optical_flows = compute_optical_flow_tvl1('')
frame_file_pattern = '%05d.jpg'
folder = ''
file_pattern = os.path.join(folder, frame_file_pattern)
save_images(optical_flows, file_pattern)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from cv2 import DualTVL1OpticalFlow_create as DualTVL1
from tensorflow.python.platform import flags
import os
import sys
sys.path.insert(0, '..')
from utils import image_funcs
import numpy as np
def make_dir(directory):
import os
import errno
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
FLAGS = flags.FLAGS
flags.DEFINE_string('expected_rgb_frame_suffix', ".jpg", 'Expected RGB frame files\' suffix.')
def compute_optical_flow_tvl1(video_frames_folder):
"""Compute the TV-L1 optical flow."""
TVL1 = DualTVL1()
# Collect RGB frame paths.
rgb_frame_files = os.listdir(video_frames_folder)
rgb_frame_files = [frame_file for frame_file in rgb_frame_files
if frame_file.endswith(FLAGS.expected_rgb_frame_suffix)]
rgb_frame_files.sort()
num_frames = len(rgb_frame_files)
assert num_frames >= 2, "Only find %d (<=2) RGB frames under %s." % (num_frames, video_frames_folder)
# Iteratively compute optical flows.
optical_flows = []
prev_frame = image_funcs.rgb_to_gray(image_funcs.read_image(rgb_frame_files[0], to_float=False))
for i in range(1, num_frames):
cur_frame = image_funcs.rgb_to_gray(image_funcs.read_image(rgb_frame_files[1], to_float=False))
cur_flow = TVL1.calc(prev_frame, cur_frame, None)
assert (cur_flow.dtype == np.float32)
optical_flows.append(cur_flow)
prev_frame = cur_frame
return optical_flows
def save_images(images, file_pattern, start_idx=1):
for i, image in enumerate(images):
file_path = file_pattern % (start_idx+i)
make_dir(file_path)
image_funcs.save_image(image, file_path)
def main():
optical_flows = compute_optical_flow_tvl1("")
frame_file_pattern = "%05d.jpg"
folder = ""
file_pattern = os.path.join(folder, frame_file_pattern)
save_images(optical_flows, file_pattern)
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "42a717591fb8fe480581d8996e9811d0292d0eb1",
"index": 7567,
"step-1": "<mask token>\n\n\ndef make_dir(directory):\n import os\n import errno\n try:\n os.makedirs(directory)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n\n<mask token>\n\n\ndef save_images(images, file_pattern, start_idx=1):\n for i, image in enumerate(images):\n file_path = file_pattern % (start_idx + i)\n make_dir(file_path)\n image_funcs.save_image(image, file_path)\n\n\ndef main():\n optical_flows = compute_optical_flow_tvl1('')\n frame_file_pattern = '%05d.jpg'\n folder = ''\n file_pattern = os.path.join(folder, frame_file_pattern)\n save_images(optical_flows, file_pattern)\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.insert(0, '..')\n<mask token>\n\n\ndef make_dir(directory):\n import os\n import errno\n try:\n os.makedirs(directory)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n\n<mask token>\nflags.DEFINE_string('expected_rgb_frame_suffix', '.jpg',\n \"Expected RGB frame files' suffix.\")\n\n\ndef compute_optical_flow_tvl1(video_frames_folder):\n \"\"\"Compute the TV-L1 optical flow.\"\"\"\n TVL1 = DualTVL1()\n rgb_frame_files = os.listdir(video_frames_folder)\n rgb_frame_files = [frame_file for frame_file in rgb_frame_files if\n frame_file.endswith(FLAGS.expected_rgb_frame_suffix)]\n rgb_frame_files.sort()\n num_frames = len(rgb_frame_files)\n assert num_frames >= 2, 'Only find %d (<=2) RGB frames under %s.' % (\n num_frames, video_frames_folder)\n optical_flows = []\n prev_frame = image_funcs.rgb_to_gray(image_funcs.read_image(\n rgb_frame_files[0], to_float=False))\n for i in range(1, num_frames):\n cur_frame = image_funcs.rgb_to_gray(image_funcs.read_image(\n rgb_frame_files[1], to_float=False))\n cur_flow = TVL1.calc(prev_frame, cur_frame, None)\n assert cur_flow.dtype == np.float32\n optical_flows.append(cur_flow)\n prev_frame = cur_frame\n return optical_flows\n\n\ndef save_images(images, file_pattern, start_idx=1):\n for i, image in enumerate(images):\n file_path = file_pattern % (start_idx + i)\n make_dir(file_path)\n image_funcs.save_image(image, file_path)\n\n\ndef main():\n optical_flows = compute_optical_flow_tvl1('')\n frame_file_pattern = '%05d.jpg'\n folder = ''\n file_pattern = os.path.join(folder, frame_file_pattern)\n save_images(optical_flows, file_pattern)\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nsys.path.insert(0, '..')\n<mask token>\n\n\ndef make_dir(directory):\n import os\n import errno\n try:\n os.makedirs(directory)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n\nFLAGS = flags.FLAGS\nflags.DEFINE_string('expected_rgb_frame_suffix', '.jpg',\n \"Expected RGB frame files' suffix.\")\n\n\ndef compute_optical_flow_tvl1(video_frames_folder):\n \"\"\"Compute the TV-L1 optical flow.\"\"\"\n TVL1 = DualTVL1()\n rgb_frame_files = os.listdir(video_frames_folder)\n rgb_frame_files = [frame_file for frame_file in rgb_frame_files if\n frame_file.endswith(FLAGS.expected_rgb_frame_suffix)]\n rgb_frame_files.sort()\n num_frames = len(rgb_frame_files)\n assert num_frames >= 2, 'Only find %d (<=2) RGB frames under %s.' % (\n num_frames, video_frames_folder)\n optical_flows = []\n prev_frame = image_funcs.rgb_to_gray(image_funcs.read_image(\n rgb_frame_files[0], to_float=False))\n for i in range(1, num_frames):\n cur_frame = image_funcs.rgb_to_gray(image_funcs.read_image(\n rgb_frame_files[1], to_float=False))\n cur_flow = TVL1.calc(prev_frame, cur_frame, None)\n assert cur_flow.dtype == np.float32\n optical_flows.append(cur_flow)\n prev_frame = cur_frame\n return optical_flows\n\n\ndef save_images(images, file_pattern, start_idx=1):\n for i, image in enumerate(images):\n file_path = file_pattern % (start_idx + i)\n make_dir(file_path)\n image_funcs.save_image(image, file_path)\n\n\ndef main():\n optical_flows = compute_optical_flow_tvl1('')\n frame_file_pattern = '%05d.jpg'\n folder = ''\n file_pattern = os.path.join(folder, frame_file_pattern)\n save_images(optical_flows, file_pattern)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom cv2 import DualTVL1OpticalFlow_create as DualTVL1\nfrom tensorflow.python.platform import flags\nimport os\nimport sys\nsys.path.insert(0, '..')\nfrom utils import image_funcs\nimport numpy as np\n\n\ndef make_dir(directory):\n import os\n import errno\n try:\n os.makedirs(directory)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n\nFLAGS = flags.FLAGS\nflags.DEFINE_string('expected_rgb_frame_suffix', '.jpg',\n \"Expected RGB frame files' suffix.\")\n\n\ndef compute_optical_flow_tvl1(video_frames_folder):\n \"\"\"Compute the TV-L1 optical flow.\"\"\"\n TVL1 = DualTVL1()\n rgb_frame_files = os.listdir(video_frames_folder)\n rgb_frame_files = [frame_file for frame_file in rgb_frame_files if\n frame_file.endswith(FLAGS.expected_rgb_frame_suffix)]\n rgb_frame_files.sort()\n num_frames = len(rgb_frame_files)\n assert num_frames >= 2, 'Only find %d (<=2) RGB frames under %s.' % (\n num_frames, video_frames_folder)\n optical_flows = []\n prev_frame = image_funcs.rgb_to_gray(image_funcs.read_image(\n rgb_frame_files[0], to_float=False))\n for i in range(1, num_frames):\n cur_frame = image_funcs.rgb_to_gray(image_funcs.read_image(\n rgb_frame_files[1], to_float=False))\n cur_flow = TVL1.calc(prev_frame, cur_frame, None)\n assert cur_flow.dtype == np.float32\n optical_flows.append(cur_flow)\n prev_frame = cur_frame\n return optical_flows\n\n\ndef save_images(images, file_pattern, start_idx=1):\n for i, image in enumerate(images):\n file_path = file_pattern % (start_idx + i)\n make_dir(file_path)\n image_funcs.save_image(image, file_path)\n\n\ndef main():\n optical_flows = compute_optical_flow_tvl1('')\n frame_file_pattern = '%05d.jpg'\n folder = ''\n file_pattern = os.path.join(folder, frame_file_pattern)\n save_images(optical_flows, file_pattern)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom cv2 import DualTVL1OpticalFlow_create as DualTVL1\nfrom tensorflow.python.platform import flags\nimport os\nimport sys\n\nsys.path.insert(0, '..')\nfrom utils import image_funcs\n\nimport numpy as np\n\n\ndef make_dir(directory):\n import os\n import errno\n try:\n os.makedirs(directory)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n\nFLAGS = flags.FLAGS\nflags.DEFINE_string('expected_rgb_frame_suffix', \".jpg\", 'Expected RGB frame files\\' suffix.')\n\n\ndef compute_optical_flow_tvl1(video_frames_folder):\n \"\"\"Compute the TV-L1 optical flow.\"\"\"\n TVL1 = DualTVL1()\n\n # Collect RGB frame paths.\n rgb_frame_files = os.listdir(video_frames_folder)\n rgb_frame_files = [frame_file for frame_file in rgb_frame_files\n if frame_file.endswith(FLAGS.expected_rgb_frame_suffix)]\n rgb_frame_files.sort()\n num_frames = len(rgb_frame_files)\n assert num_frames >= 2, \"Only find %d (<=2) RGB frames under %s.\" % (num_frames, video_frames_folder)\n\n # Iteratively compute optical flows.\n optical_flows = []\n prev_frame = image_funcs.rgb_to_gray(image_funcs.read_image(rgb_frame_files[0], to_float=False))\n for i in range(1, num_frames):\n cur_frame = image_funcs.rgb_to_gray(image_funcs.read_image(rgb_frame_files[1], to_float=False))\n cur_flow = TVL1.calc(prev_frame, cur_frame, None)\n assert (cur_flow.dtype == np.float32)\n optical_flows.append(cur_flow)\n prev_frame = cur_frame\n return optical_flows\n\n\ndef save_images(images, file_pattern, start_idx=1):\n for i, image in enumerate(images):\n file_path = file_pattern % (start_idx+i)\n make_dir(file_path)\n image_funcs.save_image(image, file_path)\n\n\ndef main():\n optical_flows = compute_optical_flow_tvl1(\"\")\n frame_file_pattern = \"%05d.jpg\"\n folder = \"\"\n file_pattern = os.path.join(folder, frame_file_pattern)\n save_images(optical_flows, file_pattern)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class MyDictTestCase(unittest.TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MyDictTestCase(unittest.TestCase):
def test_Dict(self):
actualDict1 = cldf.ConvertListsToDict([1, 2, 3], ['a', 'b', 'c'])
expectedDict1 = {(1): 'a', (2): 'b', (3): 'c'}
self.assertEqual(actualDict1, expectedDict1)
actualDict2 = cldf.ConvertListsToDict([1, 2, 3], ['a', 'b', 'c',
'd', 'e', 'f'])
expectedDict2 = {(1): 'a', (2): 'b', (3): 'c'}
self.assertEqual(actualDict2, expectedDict2)
actualDict2 = cldf.ConvertListsToDict([1, 2, 3, 4, 5, 6, 7], ['a',
'b', 'c'])
expectedDict2 = {(1): 'a', (2): 'b', (3): 'c', (4): None, (5): None,
(6): None, (7): None}
self.assertEqual(actualDict2, expectedDict2)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MyDictTestCase(unittest.TestCase):
def test_Dict(self):
actualDict1 = cldf.ConvertListsToDict([1, 2, 3], ['a', 'b', 'c'])
expectedDict1 = {(1): 'a', (2): 'b', (3): 'c'}
self.assertEqual(actualDict1, expectedDict1)
actualDict2 = cldf.ConvertListsToDict([1, 2, 3], ['a', 'b', 'c',
'd', 'e', 'f'])
expectedDict2 = {(1): 'a', (2): 'b', (3): 'c'}
self.assertEqual(actualDict2, expectedDict2)
actualDict2 = cldf.ConvertListsToDict([1, 2, 3, 4, 5, 6, 7], ['a',
'b', 'c'])
expectedDict2 = {(1): 'a', (2): 'b', (3): 'c', (4): None, (5): None,
(6): None, (7): None}
self.assertEqual(actualDict2, expectedDict2)
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
import unittest
import ConvertListToDict as cldf
class MyDictTestCase(unittest.TestCase):
def test_Dict(self):
actualDict1 = cldf.ConvertListsToDict([1, 2, 3], ['a', 'b', 'c'])
expectedDict1 = {(1): 'a', (2): 'b', (3): 'c'}
self.assertEqual(actualDict1, expectedDict1)
actualDict2 = cldf.ConvertListsToDict([1, 2, 3], ['a', 'b', 'c',
'd', 'e', 'f'])
expectedDict2 = {(1): 'a', (2): 'b', (3): 'c'}
self.assertEqual(actualDict2, expectedDict2)
actualDict2 = cldf.ConvertListsToDict([1, 2, 3, 4, 5, 6, 7], ['a',
'b', 'c'])
expectedDict2 = {(1): 'a', (2): 'b', (3): 'c', (4): None, (5): None,
(6): None, (7): None}
self.assertEqual(actualDict2, expectedDict2)
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
import unittest
import ConvertListToDict as cldf
class MyDictTestCase(unittest.TestCase):
def test_Dict(self):
# Testcase1 (len(keys) == len(values))
actualDict1 = cldf.ConvertListsToDict([1, 2, 3],['a','b','c'])
expectedDict1 = {1: 'a', 2: 'b', 3: 'c'}
self.assertEqual(actualDict1, expectedDict1)
# Testcase2 (len(keys) < len(values))
actualDict2 = cldf.ConvertListsToDict([1, 2, 3], ['a', 'b', 'c','d','e','f'])
expectedDict2 = {1: 'a', 2: 'b', 3: 'c'}
self.assertEqual(actualDict2, expectedDict2)
# Testcase3 (len(keys) > len(values))
actualDict2 = cldf.ConvertListsToDict([1, 2, 3, 4, 5, 6, 7], ['a', 'b', 'c'])
expectedDict2 = {1: 'a', 2: 'b', 3: 'c', 4: None, 5: None, 6: None, 7: None}
self.assertEqual(actualDict2, expectedDict2)
if __name__ == '__main__':
unittest.main()
|
flexible
|
{
"blob_id": "3421c3b839721694945bdbb4f17183bceaed5296",
"index": 786,
"step-1": "<mask token>\n\n\nclass MyDictTestCase(unittest.TestCase):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MyDictTestCase(unittest.TestCase):\n\n def test_Dict(self):\n actualDict1 = cldf.ConvertListsToDict([1, 2, 3], ['a', 'b', 'c'])\n expectedDict1 = {(1): 'a', (2): 'b', (3): 'c'}\n self.assertEqual(actualDict1, expectedDict1)\n actualDict2 = cldf.ConvertListsToDict([1, 2, 3], ['a', 'b', 'c',\n 'd', 'e', 'f'])\n expectedDict2 = {(1): 'a', (2): 'b', (3): 'c'}\n self.assertEqual(actualDict2, expectedDict2)\n actualDict2 = cldf.ConvertListsToDict([1, 2, 3, 4, 5, 6, 7], ['a',\n 'b', 'c'])\n expectedDict2 = {(1): 'a', (2): 'b', (3): 'c', (4): None, (5): None,\n (6): None, (7): None}\n self.assertEqual(actualDict2, expectedDict2)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MyDictTestCase(unittest.TestCase):\n\n def test_Dict(self):\n actualDict1 = cldf.ConvertListsToDict([1, 2, 3], ['a', 'b', 'c'])\n expectedDict1 = {(1): 'a', (2): 'b', (3): 'c'}\n self.assertEqual(actualDict1, expectedDict1)\n actualDict2 = cldf.ConvertListsToDict([1, 2, 3], ['a', 'b', 'c',\n 'd', 'e', 'f'])\n expectedDict2 = {(1): 'a', (2): 'b', (3): 'c'}\n self.assertEqual(actualDict2, expectedDict2)\n actualDict2 = cldf.ConvertListsToDict([1, 2, 3, 4, 5, 6, 7], ['a',\n 'b', 'c'])\n expectedDict2 = {(1): 'a', (2): 'b', (3): 'c', (4): None, (5): None,\n (6): None, (7): None}\n self.assertEqual(actualDict2, expectedDict2)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nimport ConvertListToDict as cldf\n\n\nclass MyDictTestCase(unittest.TestCase):\n\n def test_Dict(self):\n actualDict1 = cldf.ConvertListsToDict([1, 2, 3], ['a', 'b', 'c'])\n expectedDict1 = {(1): 'a', (2): 'b', (3): 'c'}\n self.assertEqual(actualDict1, expectedDict1)\n actualDict2 = cldf.ConvertListsToDict([1, 2, 3], ['a', 'b', 'c',\n 'd', 'e', 'f'])\n expectedDict2 = {(1): 'a', (2): 'b', (3): 'c'}\n self.assertEqual(actualDict2, expectedDict2)\n actualDict2 = cldf.ConvertListsToDict([1, 2, 3, 4, 5, 6, 7], ['a',\n 'b', 'c'])\n expectedDict2 = {(1): 'a', (2): 'b', (3): 'c', (4): None, (5): None,\n (6): None, (7): None}\n self.assertEqual(actualDict2, expectedDict2)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import unittest\nimport ConvertListToDict as cldf\n\nclass MyDictTestCase(unittest.TestCase):\n def test_Dict(self):\n # Testcase1 (len(keys) == len(values))\n actualDict1 = cldf.ConvertListsToDict([1, 2, 3],['a','b','c'])\n expectedDict1 = {1: 'a', 2: 'b', 3: 'c'}\n self.assertEqual(actualDict1, expectedDict1)\n\n # Testcase2 (len(keys) < len(values))\n actualDict2 = cldf.ConvertListsToDict([1, 2, 3], ['a', 'b', 'c','d','e','f'])\n expectedDict2 = {1: 'a', 2: 'b', 3: 'c'}\n self.assertEqual(actualDict2, expectedDict2)\n\n # Testcase3 (len(keys) > len(values))\n actualDict2 = cldf.ConvertListsToDict([1, 2, 3, 4, 5, 6, 7], ['a', 'b', 'c'])\n expectedDict2 = {1: 'a', 2: 'b', 3: 'c', 4: None, 5: None, 6: None, 7: None}\n self.assertEqual(actualDict2, expectedDict2)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class MNIST3dModel(nn.Module):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MNIST3dModel(nn.Module):
<|reserved_special_token_0|>
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.relu(x)
x = self.batchnorm1(x)
x = self.pool(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.relu(x)
x = self.batchnorm2(x)
x = self.pool(x)
x = self.dropout1(x)
x = x.view(x.size()[0], -1)
x = self.linear1(x)
x = self.relu(x)
x = self.dropout2(x)
x = self.linear2(x)
return x
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MNIST3dModel(nn.Module):
def __init__(self, input_c=3, num_filters=8, num_classes=10):
super().__init__()
self.conv1 = nn.Conv3d(in_channels=input_c, out_channels=
num_filters, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv3d(in_channels=num_filters, out_channels=
num_filters * 2, kernel_size=3, stride=1, padding=1)
self.batchnorm1 = nn.BatchNorm3d(16)
self.conv3 = nn.Conv3d(in_channels=num_filters * 2, out_channels=
num_filters * 4, kernel_size=3, stride=1, padding=1)
self.conv4 = nn.Conv3d(in_channels=num_filters * 4, out_channels=
num_filters * 8, kernel_size=3, stride=1, padding=1)
self.batchnorm2 = nn.BatchNorm3d(64)
self.pool = nn.MaxPool3d(2)
self.dropout1 = nn.Dropout(0.25)
self.relu = nn.ReLU()
self.linear1 = nn.Linear(4096, 1024)
self.dropout2 = nn.Dropout(0.5)
self.linear2 = nn.Linear(1024, num_classes)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.relu(x)
x = self.batchnorm1(x)
x = self.pool(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.relu(x)
x = self.batchnorm2(x)
x = self.pool(x)
x = self.dropout1(x)
x = x.view(x.size()[0], -1)
x = self.linear1(x)
x = self.relu(x)
x = self.dropout2(x)
x = self.linear2(x)
return x
<|reserved_special_token_1|>
from torch import nn
class MNIST3dModel(nn.Module):
def __init__(self, input_c=3, num_filters=8, num_classes=10):
super().__init__()
self.conv1 = nn.Conv3d(in_channels=input_c, out_channels=
num_filters, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv3d(in_channels=num_filters, out_channels=
num_filters * 2, kernel_size=3, stride=1, padding=1)
self.batchnorm1 = nn.BatchNorm3d(16)
self.conv3 = nn.Conv3d(in_channels=num_filters * 2, out_channels=
num_filters * 4, kernel_size=3, stride=1, padding=1)
self.conv4 = nn.Conv3d(in_channels=num_filters * 4, out_channels=
num_filters * 8, kernel_size=3, stride=1, padding=1)
self.batchnorm2 = nn.BatchNorm3d(64)
self.pool = nn.MaxPool3d(2)
self.dropout1 = nn.Dropout(0.25)
self.relu = nn.ReLU()
self.linear1 = nn.Linear(4096, 1024)
self.dropout2 = nn.Dropout(0.5)
self.linear2 = nn.Linear(1024, num_classes)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.relu(x)
x = self.batchnorm1(x)
x = self.pool(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.relu(x)
x = self.batchnorm2(x)
x = self.pool(x)
x = self.dropout1(x)
x = x.view(x.size()[0], -1)
x = self.linear1(x)
x = self.relu(x)
x = self.dropout2(x)
x = self.linear2(x)
return x
|
flexible
|
{
"blob_id": "f6838906c961a9ca7d91d2ab02fd2af72797b880",
"index": 4628,
"step-1": "<mask token>\n\n\nclass MNIST3dModel(nn.Module):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MNIST3dModel(nn.Module):\n <mask token>\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.relu(x)\n x = self.batchnorm1(x)\n x = self.pool(x)\n x = self.conv3(x)\n x = self.conv4(x)\n x = self.relu(x)\n x = self.batchnorm2(x)\n x = self.pool(x)\n x = self.dropout1(x)\n x = x.view(x.size()[0], -1)\n x = self.linear1(x)\n x = self.relu(x)\n x = self.dropout2(x)\n x = self.linear2(x)\n return x\n",
"step-3": "<mask token>\n\n\nclass MNIST3dModel(nn.Module):\n\n def __init__(self, input_c=3, num_filters=8, num_classes=10):\n super().__init__()\n self.conv1 = nn.Conv3d(in_channels=input_c, out_channels=\n num_filters, kernel_size=3, stride=1, padding=1)\n self.conv2 = nn.Conv3d(in_channels=num_filters, out_channels=\n num_filters * 2, kernel_size=3, stride=1, padding=1)\n self.batchnorm1 = nn.BatchNorm3d(16)\n self.conv3 = nn.Conv3d(in_channels=num_filters * 2, out_channels=\n num_filters * 4, kernel_size=3, stride=1, padding=1)\n self.conv4 = nn.Conv3d(in_channels=num_filters * 4, out_channels=\n num_filters * 8, kernel_size=3, stride=1, padding=1)\n self.batchnorm2 = nn.BatchNorm3d(64)\n self.pool = nn.MaxPool3d(2)\n self.dropout1 = nn.Dropout(0.25)\n self.relu = nn.ReLU()\n self.linear1 = nn.Linear(4096, 1024)\n self.dropout2 = nn.Dropout(0.5)\n self.linear2 = nn.Linear(1024, num_classes)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.relu(x)\n x = self.batchnorm1(x)\n x = self.pool(x)\n x = self.conv3(x)\n x = self.conv4(x)\n x = self.relu(x)\n x = self.batchnorm2(x)\n x = self.pool(x)\n x = self.dropout1(x)\n x = x.view(x.size()[0], -1)\n x = self.linear1(x)\n x = self.relu(x)\n x = self.dropout2(x)\n x = self.linear2(x)\n return x\n",
"step-4": "from torch import nn\n\n\nclass MNIST3dModel(nn.Module):\n\n def __init__(self, input_c=3, num_filters=8, num_classes=10):\n super().__init__()\n self.conv1 = nn.Conv3d(in_channels=input_c, out_channels=\n num_filters, kernel_size=3, stride=1, padding=1)\n self.conv2 = nn.Conv3d(in_channels=num_filters, out_channels=\n num_filters * 2, kernel_size=3, stride=1, padding=1)\n self.batchnorm1 = nn.BatchNorm3d(16)\n self.conv3 = nn.Conv3d(in_channels=num_filters * 2, out_channels=\n num_filters * 4, kernel_size=3, stride=1, padding=1)\n self.conv4 = nn.Conv3d(in_channels=num_filters * 4, out_channels=\n num_filters * 8, kernel_size=3, stride=1, padding=1)\n self.batchnorm2 = nn.BatchNorm3d(64)\n self.pool = nn.MaxPool3d(2)\n self.dropout1 = nn.Dropout(0.25)\n self.relu = nn.ReLU()\n self.linear1 = nn.Linear(4096, 1024)\n self.dropout2 = nn.Dropout(0.5)\n self.linear2 = nn.Linear(1024, num_classes)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.relu(x)\n x = self.batchnorm1(x)\n x = self.pool(x)\n x = self.conv3(x)\n x = self.conv4(x)\n x = self.relu(x)\n x = self.batchnorm2(x)\n x = self.pool(x)\n x = self.dropout1(x)\n x = x.view(x.size()[0], -1)\n x = self.linear1(x)\n x = self.relu(x)\n x = self.dropout2(x)\n x = self.linear2(x)\n return x\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
r_data_df = pd.read_csv('./Shootout Data/Shootout_Mac2017.csv')
em_data_df = pd.read_csv('./Shootout Data/Shootout_Emily.csv')
aishah_data_df = pd.read_csv('./Shootout Data/Shootout_Aishah_Mac2011.csv')
agni_data_df = pd.read_csv('./Shootout Data/Shootout_Agni.csv')
df = pd.concat([aishah_data_df.mean(),em_data_df.mean(),r_data_df.mean(),agni_data_df.mean()],axis=1).T
# Setting the positions and width for the bars
pos = list(range(len(df['Mersenne Twister'])))
width = 0.2
# Plotting the bars
fig, ax = plt.subplots(figsize=(10,5))
# Create a bar with pre_score data,
# in position pos,
plt.bar(pos,
#using df['pre_score'] data,
df['Mersenne Twister'],
# of width
width,
# with alpha 0.5
alpha=0.5,
# with color
color='#EE3224')
# with label the first value in first_name
#label=df['first_name'][0])
# Create a bar with mid_score data,
# in position pos + some width buffer,
plt.bar([p + width for p in pos],
#using df['mid_score'] data,
df['Xorshift 128+'],
# of width
width,
# with alpha 0.5
alpha=0.5,
# with color
color='#F78F1E')
# with label the second value in first_name
#label=df['first_name'][1])
# Create a bar with post_score data,
# in position pos + some width buffer,
plt.bar([p + width*2 for p in pos],
#using df['post_score'] data,
df['SPCG64'],
# of width
width,
# with alpha 0.5
#alpha=0.5,
# with color
color='#FFC222')
# with label the third value in first_name
#label=df['first_name'][2])
# Create a bar with post_score data,
# in position pos + some width buffer,
plt.bar([p + width*3 for p in pos],
#using df['post_score'] data,
df['Xoroshiro 128+'],
# of width
width,
# with alpha 0.5
#alpha=0.5,
# with color
color='#FF3300')
# with label the third value in first_name
#label=df['first_name'][2])
# Set the y axis label
ax.set_ylabel('Average MB/s',fontweight='bold')
# Set the chart's title
ax.set_title('Average MBs of Random Numbers Generated in a Second',fontweight='bold')
# Set the position of the x ticks
ax.set_xticks([p + 1.5 * width for p in pos])
# Set the labels for the x ticks
ax.set_xticklabels(['MacBook 2017','MacBook 2015','MacBook 2011','Ubuntu 18.04'])
# Setting the x-axis and y-axis limits
plt.xlim(min(pos)-width, max(pos)+width*4)
plt.ylim([0, 10000] )
# Adding the legend and showing the plot
plt.legend(['Mersenne Twister','Xorshift 128+', 'SPCG64','Xoroshiro 128+'], loc='upper left')
plt.grid()
#plt.show()
plt.savefig('barchart_compare.png')
|
normal
|
{
"blob_id": "467b919f6953737eedd3f99596df244bd1177575",
"index": 5411,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.bar(pos, df['Mersenne Twister'], width, alpha=0.5, color='#EE3224')\nplt.bar([(p + width) for p in pos], df['Xorshift 128+'], width, alpha=0.5,\n color='#F78F1E')\nplt.bar([(p + width * 2) for p in pos], df['SPCG64'], width, color='#FFC222')\nplt.bar([(p + width * 3) for p in pos], df['Xoroshiro 128+'], width, color=\n '#FF3300')\nax.set_ylabel('Average MB/s', fontweight='bold')\nax.set_title('Average MBs of Random Numbers Generated in a Second',\n fontweight='bold')\nax.set_xticks([(p + 1.5 * width) for p in pos])\nax.set_xticklabels(['MacBook 2017', 'MacBook 2015', 'MacBook 2011',\n 'Ubuntu 18.04'])\nplt.xlim(min(pos) - width, max(pos) + width * 4)\nplt.ylim([0, 10000])\nplt.legend(['Mersenne Twister', 'Xorshift 128+', 'SPCG64', 'Xoroshiro 128+'\n ], loc='upper left')\nplt.grid()\nplt.savefig('barchart_compare.png')\n",
"step-3": "<mask token>\nr_data_df = pd.read_csv('./Shootout Data/Shootout_Mac2017.csv')\nem_data_df = pd.read_csv('./Shootout Data/Shootout_Emily.csv')\naishah_data_df = pd.read_csv('./Shootout Data/Shootout_Aishah_Mac2011.csv')\nagni_data_df = pd.read_csv('./Shootout Data/Shootout_Agni.csv')\ndf = pd.concat([aishah_data_df.mean(), em_data_df.mean(), r_data_df.mean(),\n agni_data_df.mean()], axis=1).T\npos = list(range(len(df['Mersenne Twister'])))\nwidth = 0.2\nfig, ax = plt.subplots(figsize=(10, 5))\nplt.bar(pos, df['Mersenne Twister'], width, alpha=0.5, color='#EE3224')\nplt.bar([(p + width) for p in pos], df['Xorshift 128+'], width, alpha=0.5,\n color='#F78F1E')\nplt.bar([(p + width * 2) for p in pos], df['SPCG64'], width, color='#FFC222')\nplt.bar([(p + width * 3) for p in pos], df['Xoroshiro 128+'], width, color=\n '#FF3300')\nax.set_ylabel('Average MB/s', fontweight='bold')\nax.set_title('Average MBs of Random Numbers Generated in a Second',\n fontweight='bold')\nax.set_xticks([(p + 1.5 * width) for p in pos])\nax.set_xticklabels(['MacBook 2017', 'MacBook 2015', 'MacBook 2011',\n 'Ubuntu 18.04'])\nplt.xlim(min(pos) - width, max(pos) + width * 4)\nplt.ylim([0, 10000])\nplt.legend(['Mersenne Twister', 'Xorshift 128+', 'SPCG64', 'Xoroshiro 128+'\n ], loc='upper left')\nplt.grid()\nplt.savefig('barchart_compare.png')\n",
"step-4": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nr_data_df = pd.read_csv('./Shootout Data/Shootout_Mac2017.csv')\nem_data_df = pd.read_csv('./Shootout Data/Shootout_Emily.csv')\naishah_data_df = pd.read_csv('./Shootout Data/Shootout_Aishah_Mac2011.csv')\nagni_data_df = pd.read_csv('./Shootout Data/Shootout_Agni.csv')\ndf = pd.concat([aishah_data_df.mean(), em_data_df.mean(), r_data_df.mean(),\n agni_data_df.mean()], axis=1).T\npos = list(range(len(df['Mersenne Twister'])))\nwidth = 0.2\nfig, ax = plt.subplots(figsize=(10, 5))\nplt.bar(pos, df['Mersenne Twister'], width, alpha=0.5, color='#EE3224')\nplt.bar([(p + width) for p in pos], df['Xorshift 128+'], width, alpha=0.5,\n color='#F78F1E')\nplt.bar([(p + width * 2) for p in pos], df['SPCG64'], width, color='#FFC222')\nplt.bar([(p + width * 3) for p in pos], df['Xoroshiro 128+'], width, color=\n '#FF3300')\nax.set_ylabel('Average MB/s', fontweight='bold')\nax.set_title('Average MBs of Random Numbers Generated in a Second',\n fontweight='bold')\nax.set_xticks([(p + 1.5 * width) for p in pos])\nax.set_xticklabels(['MacBook 2017', 'MacBook 2015', 'MacBook 2011',\n 'Ubuntu 18.04'])\nplt.xlim(min(pos) - width, max(pos) + width * 4)\nplt.ylim([0, 10000])\nplt.legend(['Mersenne Twister', 'Xorshift 128+', 'SPCG64', 'Xoroshiro 128+'\n ], loc='upper left')\nplt.grid()\nplt.savefig('barchart_compare.png')\n",
"step-5": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nr_data_df = pd.read_csv('./Shootout Data/Shootout_Mac2017.csv')\nem_data_df = pd.read_csv('./Shootout Data/Shootout_Emily.csv')\naishah_data_df = pd.read_csv('./Shootout Data/Shootout_Aishah_Mac2011.csv')\nagni_data_df = pd.read_csv('./Shootout Data/Shootout_Agni.csv')\n\ndf = pd.concat([aishah_data_df.mean(),em_data_df.mean(),r_data_df.mean(),agni_data_df.mean()],axis=1).T\n\n\n# Setting the positions and width for the bars\npos = list(range(len(df['Mersenne Twister'])))\nwidth = 0.2\n\n# Plotting the bars\nfig, ax = plt.subplots(figsize=(10,5))\n\n# Create a bar with pre_score data,\n# in position pos,\nplt.bar(pos,\n #using df['pre_score'] data,\n df['Mersenne Twister'],\n # of width\n width,\n # with alpha 0.5\n alpha=0.5,\n # with color\n color='#EE3224')\n # with label the first value in first_name\n #label=df['first_name'][0])\n\n# Create a bar with mid_score data,\n# in position pos + some width buffer,\nplt.bar([p + width for p in pos],\n #using df['mid_score'] data,\n df['Xorshift 128+'],\n # of width\n width,\n # with alpha 0.5\n alpha=0.5,\n # with color\n color='#F78F1E')\n # with label the second value in first_name\n #label=df['first_name'][1])\n\n# Create a bar with post_score data,\n# in position pos + some width buffer,\nplt.bar([p + width*2 for p in pos],\n #using df['post_score'] data,\n df['SPCG64'],\n # of width\n width,\n # with alpha 0.5\n #alpha=0.5,\n # with color\n color='#FFC222')\n # with label the third value in first_name\n #label=df['first_name'][2])\n \n# Create a bar with post_score data,\n# in position pos + some width buffer,\nplt.bar([p + width*3 for p in pos],\n #using df['post_score'] data,\n df['Xoroshiro 128+'],\n # of width\n width,\n # with alpha 0.5\n #alpha=0.5,\n # with color\n color='#FF3300')\n # with label the third value in first_name\n #label=df['first_name'][2])\n\n# Set the y axis label\nax.set_ylabel('Average MB/s',fontweight='bold')\n\n# Set the chart's title\nax.set_title('Average MBs of Random Numbers Generated in a Second',fontweight='bold')\n\n# Set the position of the x ticks\nax.set_xticks([p + 1.5 * width for p in pos])\n\n# Set the labels for the x ticks\nax.set_xticklabels(['MacBook 2017','MacBook 2015','MacBook 2011','Ubuntu 18.04'])\n\n# Setting the x-axis and y-axis limits\nplt.xlim(min(pos)-width, max(pos)+width*4)\nplt.ylim([0, 10000] )\n\n# Adding the legend and showing the plot\nplt.legend(['Mersenne Twister','Xorshift 128+', 'SPCG64','Xoroshiro 128+'], loc='upper left')\nplt.grid()\n#plt.show()\nplt.savefig('barchart_compare.png')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def test_divide_in_zero_from_start():
expression = '56/0'
result = main_evaluate(expression)
assert result.error_type == DIVIDE_ZERO
<|reserved_special_token_0|>
def test_mod_in_zero():
expression = '-3%0'
result = main_evaluate(expression)
assert result.error_type == DIVIDE_ZERO
def test_complex_number():
expression = '(-7)^0.5'
result = main_evaluate(expression)
assert result.error_type == COMPLEX_ERROR
<|reserved_special_token_0|>
def test_minus_start():
expression = '-2^3'
result = main_evaluate(expression)
assert result == -8
def test_minus_after_binary():
expression = '5*-2'
result = main_evaluate(expression)
assert result == -10
<|reserved_special_token_0|>
def test_huge_equation():
expression = '1+' * 10000 + '0'
result = main_evaluate(expression)
assert result == 10000
<|reserved_special_token_0|>
def test_minus_inf_number():
expression = (
'-676757656756756756758973333333330987676756565675674534554333333567567563453423423423436546333337476467675675765756757567567333357657886786456453453542342341353332523523525235235235235352352433124124135235235352352412412412312412442187412651256127512657132352352353523524124124121241244218523523535235241241241212412442185235235352352412412412124124421852352353523524124124121241244218'
)
result = main_evaluate(expression)
assert result.error_type == MEMORY_EXCEPTION
<|reserved_special_token_0|>
def test_space_inside_number():
expression = '5*1^4+4 7+5'
result = main_evaluate(expression)
assert result.error_type == SPACE_IN_NUMBER
<|reserved_special_token_0|>
def test_closer_has_no_opener():
expr = '(4+5)+9)+25^4'
validate = math_validations(expr)
assert validate.error_type == BRACKETS_ERROR
def test_last_token_pre_unary():
expr = '4!+~'
validate = math_validations(expr)
assert validate.error_type == LAST_TOKEN_ERROR
def test_last_token_binary_operator():
expr = '4!+'
validate = math_validations(expr)
assert validate.error_type == LAST_TOKEN_ERROR
<|reserved_special_token_0|>
def test_first_dot_validation():
expr = '.5+45*(65/7)'
validate = math_validations(expr)
assert validate.error_type == DOT_ERROR
<|reserved_special_token_0|>
def test_pre_unary_in_a_row():
expr = '~~~2'
result = main_evaluate(expr)
assert result == -2
def test_pre_unary_with_minuses():
expr = '~-~--~-10'
result = main_evaluate(expr)
assert result == -10
def test_post_unary_in_a_row():
expr = '3!!+4'
result = main_evaluate(expr)
assert result == 724
def test_post_unary_on_brackets():
expr = '(1+5&8$3)!+4'
result = main_evaluate(expr)
assert result == 724
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_divide_in_zero_from_start():
expression = '56/0'
result = main_evaluate(expression)
assert result.error_type == DIVIDE_ZERO
<|reserved_special_token_0|>
def test_mod_in_zero():
expression = '-3%0'
result = main_evaluate(expression)
assert result.error_type == DIVIDE_ZERO
def test_complex_number():
expression = '(-7)^0.5'
result = main_evaluate(expression)
assert result.error_type == COMPLEX_ERROR
<|reserved_special_token_0|>
def test_factorial_not_round():
expression = '2.404!+34'
result = main_evaluate(expression)
assert result.error_type == FACTORIAL_ERROR
<|reserved_special_token_0|>
def test_minus_start():
expression = '-2^3'
result = main_evaluate(expression)
assert result == -8
def test_minus_after_binary():
expression = '5*-2'
result = main_evaluate(expression)
assert result == -10
<|reserved_special_token_0|>
def test_huge_equation():
expression = '1+' * 10000 + '0'
result = main_evaluate(expression)
assert result == 10000
def test_max_size_expression():
expression = '5' * (MAX_EXPRESSION_SIZE + 1)
result = main_evaluate(expression)
assert result.error_type == MEMORY_EXCEPTION
<|reserved_special_token_0|>
def test_multiply_overflow():
expression = '170!*444444'
result = main_evaluate(expression)
assert result.error_type == MEMORY_EXCEPTION
def test_minus_inf_number():
expression = (
'-676757656756756756758973333333330987676756565675674534554333333567567563453423423423436546333337476467675675765756757567567333357657886786456453453542342341353332523523525235235235235352352433124124135235235352352412412412312412442187412651256127512657132352352353523524124124121241244218523523535235241241241212412442185235235352352412412412124124421852352353523524124124121241244218'
)
result = main_evaluate(expression)
assert result.error_type == MEMORY_EXCEPTION
def test_plus_inf_number():
expression = (
'676757656756756756758973333333330987676756565675674534554333333567567563453423423423436546333337476467675675765756757567567333357657886786456453453542342341353332523523525235235235235352352433124124135235235352352412412412312412442187412651256127512657132352352353523524124124121241244218523523535235241241241212412442185235235352352412412412124124421852352353523524124124121241244218'
)
result = main_evaluate(expression)
assert result.error_type == MEMORY_EXCEPTION
def test_space_inside_number():
expression = '5*1^4+4 7+5'
result = main_evaluate(expression)
assert result.error_type == SPACE_IN_NUMBER
def test_illegal_char_validation():
expr = '454#f'
validate = math_validations(expr)
assert validate.error_type == ILLEGAL_CHAR
<|reserved_special_token_0|>
def test_closer_has_no_opener():
expr = '(4+5)+9)+25^4'
validate = math_validations(expr)
assert validate.error_type == BRACKETS_ERROR
def test_last_token_pre_unary():
expr = '4!+~'
validate = math_validations(expr)
assert validate.error_type == LAST_TOKEN_ERROR
def test_last_token_binary_operator():
expr = '4!+'
validate = math_validations(expr)
assert validate.error_type == LAST_TOKEN_ERROR
def test_double_dot_validation():
expr = '4!+7..7'
validate = math_validations(expr)
assert validate.error_type == DOT_ERROR
def test_first_dot_validation():
expr = '.5+45*(65/7)'
validate = math_validations(expr)
assert validate.error_type == DOT_ERROR
def test_dot_after_operator_validation():
expr = '45+.5'
validate = math_validations(expr)
assert validate.error_type == DOT_ERROR
def test_valid_dot():
expr = '45+0.5'
result = main_evaluate(expr)
assert result == 45.5
<|reserved_special_token_0|>
def test_tilda_before_minus():
expr = '~~-(70)'
result = main_evaluate(expr)
assert result == -70
def test_pre_unary_in_a_row():
expr = '~~~2'
result = main_evaluate(expr)
assert result == -2
def test_pre_unary_with_minuses():
expr = '~-~--~-10'
result = main_evaluate(expr)
assert result == -10
def test_post_unary_in_a_row():
expr = '3!!+4'
result = main_evaluate(expr)
assert result == 724
def test_post_unary_on_brackets():
expr = '(1+5&8$3)!+4'
result = main_evaluate(expr)
assert result == 724
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_divide_in_zero_from_start():
expression = '56/0'
result = main_evaluate(expression)
assert result.error_type == DIVIDE_ZERO
<|reserved_special_token_0|>
def test_mod_in_zero():
expression = '-3%0'
result = main_evaluate(expression)
assert result.error_type == DIVIDE_ZERO
def test_complex_number():
expression = '(-7)^0.5'
result = main_evaluate(expression)
assert result.error_type == COMPLEX_ERROR
<|reserved_special_token_0|>
def test_factorial_not_round():
expression = '2.404!+34'
result = main_evaluate(expression)
assert result.error_type == FACTORIAL_ERROR
def test_factorial_huge_number():
expression = '600000!+4'
result = main_evaluate(expression)
assert result.error_type == MEMORY_EXCEPTION
def test_minus_start():
expression = '-2^3'
result = main_evaluate(expression)
assert result == -8
def test_minus_after_binary():
expression = '5*-2'
result = main_evaluate(expression)
assert result == -10
<|reserved_special_token_0|>
def test_huge_equation():
expression = '1+' * 10000 + '0'
result = main_evaluate(expression)
assert result == 10000
def test_max_size_expression():
expression = '5' * (MAX_EXPRESSION_SIZE + 1)
result = main_evaluate(expression)
assert result.error_type == MEMORY_EXCEPTION
<|reserved_special_token_0|>
def test_multiply_overflow():
expression = '170!*444444'
result = main_evaluate(expression)
assert result.error_type == MEMORY_EXCEPTION
def test_minus_inf_number():
expression = (
'-676757656756756756758973333333330987676756565675674534554333333567567563453423423423436546333337476467675675765756757567567333357657886786456453453542342341353332523523525235235235235352352433124124135235235352352412412412312412442187412651256127512657132352352353523524124124121241244218523523535235241241241212412442185235235352352412412412124124421852352353523524124124121241244218'
)
result = main_evaluate(expression)
assert result.error_type == MEMORY_EXCEPTION
def test_plus_inf_number():
expression = (
'676757656756756756758973333333330987676756565675674534554333333567567563453423423423436546333337476467675675765756757567567333357657886786456453453542342341353332523523525235235235235352352433124124135235235352352412412412312412442187412651256127512657132352352353523524124124121241244218523523535235241241241212412442185235235352352412412412124124421852352353523524124124121241244218'
)
result = main_evaluate(expression)
assert result.error_type == MEMORY_EXCEPTION
def test_space_inside_number():
expression = '5*1^4+4 7+5'
result = main_evaluate(expression)
assert result.error_type == SPACE_IN_NUMBER
def test_illegal_char_validation():
expr = '454#f'
validate = math_validations(expr)
assert validate.error_type == ILLEGAL_CHAR
<|reserved_special_token_0|>
def test_closer_has_no_opener():
expr = '(4+5)+9)+25^4'
validate = math_validations(expr)
assert validate.error_type == BRACKETS_ERROR
def test_last_token_pre_unary():
expr = '4!+~'
validate = math_validations(expr)
assert validate.error_type == LAST_TOKEN_ERROR
def test_last_token_binary_operator():
expr = '4!+'
validate = math_validations(expr)
assert validate.error_type == LAST_TOKEN_ERROR
def test_double_dot_validation():
expr = '4!+7..7'
validate = math_validations(expr)
assert validate.error_type == DOT_ERROR
def test_first_dot_validation():
expr = '.5+45*(65/7)'
validate = math_validations(expr)
assert validate.error_type == DOT_ERROR
def test_dot_after_operator_validation():
expr = '45+.5'
validate = math_validations(expr)
assert validate.error_type == DOT_ERROR
def test_valid_dot():
expr = '45+0.5'
result = main_evaluate(expr)
assert result == 45.5
<|reserved_special_token_0|>
def test_tilda_before_minus():
expr = '~~-(70)'
result = main_evaluate(expr)
assert result == -70
def test_pre_unary_in_a_row():
expr = '~~~2'
result = main_evaluate(expr)
assert result == -2
def test_pre_unary_with_minuses():
expr = '~-~--~-10'
result = main_evaluate(expr)
assert result == -10
def test_post_unary_in_a_row():
expr = '3!!+4'
result = main_evaluate(expr)
assert result == 724
def test_post_unary_on_brackets():
expr = '(1+5&8$3)!+4'
result = main_evaluate(expr)
assert result == 724
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_divide_in_zero_from_start():
expression = '56/0'
result = main_evaluate(expression)
assert result.error_type == DIVIDE_ZERO
def test_divide_in_zero_while_solve():
expression = '56/(5-5)'
result = main_evaluate(expression)
assert result.error_type == DIVIDE_ZERO
def test_mod_in_zero():
expression = '-3%0'
result = main_evaluate(expression)
assert result.error_type == DIVIDE_ZERO
def test_complex_number():
expression = '(-7)^0.5'
result = main_evaluate(expression)
assert result.error_type == COMPLEX_ERROR
def test_factorial_negative():
expression = '(-9)!'
result = main_evaluate(expression)
assert result.error_type == FACTORIAL_ERROR
def test_factorial_not_round():
expression = '2.404!+34'
result = main_evaluate(expression)
assert result.error_type == FACTORIAL_ERROR
def test_factorial_huge_number():
expression = '600000!+4'
result = main_evaluate(expression)
assert result.error_type == MEMORY_EXCEPTION
def test_minus_start():
expression = '-2^3'
result = main_evaluate(expression)
assert result == -8
def test_minus_after_binary():
expression = '5*-2'
result = main_evaluate(expression)
assert result == -10
def test_minuses_row():
expression = '---(4+2)+8----8'
result = main_evaluate(expression)
assert result == 10
def test_huge_equation():
expression = '1+' * 10000 + '0'
result = main_evaluate(expression)
assert result == 10000
def test_max_size_expression():
expression = '5' * (MAX_EXPRESSION_SIZE + 1)
result = main_evaluate(expression)
assert result.error_type == MEMORY_EXCEPTION
def test_pow_overflow():
expression = '225^225.6'
result = main_evaluate(expression)
assert result.error_type == MEMORY_EXCEPTION
def test_multiply_overflow():
expression = '170!*444444'
result = main_evaluate(expression)
assert result.error_type == MEMORY_EXCEPTION
def test_minus_inf_number():
expression = (
'-676757656756756756758973333333330987676756565675674534554333333567567563453423423423436546333337476467675675765756757567567333357657886786456453453542342341353332523523525235235235235352352433124124135235235352352412412412312412442187412651256127512657132352352353523524124124121241244218523523535235241241241212412442185235235352352412412412124124421852352353523524124124121241244218'
)
result = main_evaluate(expression)
assert result.error_type == MEMORY_EXCEPTION
def test_plus_inf_number():
expression = (
'676757656756756756758973333333330987676756565675674534554333333567567563453423423423436546333337476467675675765756757567567333357657886786456453453542342341353332523523525235235235235352352433124124135235235352352412412412312412442187412651256127512657132352352353523524124124121241244218523523535235241241241212412442185235235352352412412412124124421852352353523524124124121241244218'
)
result = main_evaluate(expression)
assert result.error_type == MEMORY_EXCEPTION
def test_space_inside_number():
expression = '5*1^4+4 7+5'
result = main_evaluate(expression)
assert result.error_type == SPACE_IN_NUMBER
def test_illegal_char_validation():
expr = '454#f'
validate = math_validations(expr)
assert validate.error_type == ILLEGAL_CHAR
def test_unnecessary_brackets_validation():
expr = '3^((4+4))'
validate = math_validations(expr)
assert validate.error_type == UNNECESSARY_PARENTHESES
<|reserved_special_token_0|>
def test_opener_has_no_closer():
expr = '((65+6)/6+(4+8/3'
validate = math_validations(expr)
assert validate.error_type == BRACKETS_ERROR
def test_closer_has_no_opener():
expr = '(4+5)+9)+25^4'
validate = math_validations(expr)
assert validate.error_type == BRACKETS_ERROR
def test_last_token_pre_unary():
expr = '4!+~'
validate = math_validations(expr)
assert validate.error_type == LAST_TOKEN_ERROR
def test_last_token_binary_operator():
expr = '4!+'
validate = math_validations(expr)
assert validate.error_type == LAST_TOKEN_ERROR
def test_double_dot_validation():
expr = '4!+7..7'
validate = math_validations(expr)
assert validate.error_type == DOT_ERROR
def test_first_dot_validation():
expr = '.5+45*(65/7)'
validate = math_validations(expr)
assert validate.error_type == DOT_ERROR
def test_dot_after_operator_validation():
expr = '45+.5'
validate = math_validations(expr)
assert validate.error_type == DOT_ERROR
def test_valid_dot():
expr = '45+0.5'
result = main_evaluate(expr)
assert result == 45.5
def test_no_fraction_after_dot():
expr = '8.*2'
result = main_evaluate(expr)
assert result == 16
def test_tilda_before_minus():
expr = '~~-(70)'
result = main_evaluate(expr)
assert result == -70
def test_pre_unary_in_a_row():
expr = '~~~2'
result = main_evaluate(expr)
assert result == -2
def test_pre_unary_with_minuses():
expr = '~-~--~-10'
result = main_evaluate(expr)
assert result == -10
def test_post_unary_in_a_row():
expr = '3!!+4'
result = main_evaluate(expr)
assert result == 724
def test_post_unary_on_brackets():
expr = '(1+5&8$3)!+4'
result = main_evaluate(expr)
assert result == 724
<|reserved_special_token_1|>
"""
test_extra.py:
In this file i wrote extra tests to my calculator program.
Divided to some main parts:
- Math Errors Tests (divide in zero, factorial, complex numbers)
- Test with edge cases of minus (operator / sign)
- Big results tests: expression that their result
will be inf or cause overflow exception
- test spaces in expressions
- test to the tokens_validation.py functions:
valid brackets, unnecessary parentheses, last token validation
- Decimal point place in expressions
- pre and post unary operations
"""
from calculator_main_omega import *
from errors import *
# Math Errors Tests
def test_divide_in_zero_from_start():
# test divide in zero in case that
# we can see before solving
expression = '56/0'
result = main_evaluate(expression)
assert result.error_type == DIVIDE_ZERO
def test_divide_in_zero_while_solve():
# test divide in zero in case that
# we can't see before solving
expression = '56/(5-5)'
result = main_evaluate(expression)
assert result.error_type == DIVIDE_ZERO
def test_mod_in_zero():
expression = '-3%0'
result = main_evaluate(expression)
# this result need to be none because we mod in zero
assert result.error_type == DIVIDE_ZERO
def test_complex_number():
expression = '(-7)^0.5'
result = main_evaluate(expression)
# check if get COMPLEX_ERROR when get complex result
assert result.error_type == COMPLEX_ERROR
def test_factorial_negative():
expression = '(-9)!'
result = main_evaluate(expression)
# the factorial operation on negative numbers is not legal
assert result.error_type == FACTORIAL_ERROR
def test_factorial_not_round():
expression = '2.404!+34'
result = main_evaluate(expression)
# the factorial operation on fraction numbers is not legal
assert result.error_type == FACTORIAL_ERROR
def test_factorial_huge_number():
expression = '600000!+4'
result = main_evaluate(expression)
assert result.error_type == MEMORY_EXCEPTION
# Minus tests:
def test_minus_start():
# build expression that have '-' in the start
expression = '-2^3'
result = main_evaluate(expression)
assert result == -8
def test_minus_after_binary():
# test expression with minus after binary operator
expression = '5*-2'
result = main_evaluate(expression)
assert result == -10
def test_minuses_row():
# test expression with some minuses right after each other
expression = '---(4+2)+8----8'
result = main_evaluate(expression)
assert result == 10
def test_huge_equation():
expression = ('1+' * 10000) + '0'
# i build expression with 10000 operators
# i test this affect on my program
result = main_evaluate(expression)
assert result == 10000
def test_max_size_expression():
# build expression with size bigger then the
# MAX_EXPRESSION_SIZE
expression = '5' * (MAX_EXPRESSION_SIZE + 1)
result = main_evaluate(expression)
assert result.error_type == MEMORY_EXCEPTION
# Big results tests:
def test_pow_overflow():
expression = '225^225.6'
result = main_evaluate(expression)
assert result.error_type == MEMORY_EXCEPTION
def test_multiply_overflow():
expression = '170!*444444'
result = main_evaluate(expression)
# the result of this expression is too big to store in float
assert result.error_type == MEMORY_EXCEPTION
def test_minus_inf_number():
expression = '-67675765675675675675897333333333' \
'09876767565656756745345543333335' \
'67567563453423423423436546333337' \
'47646767567576575675756756733335' \
'76578867864564534535423423413533' \
'32523523525235235235235352352433' \
'12412413523523535235241241241231' \
'24124421874126512561275126571323' \
'52352353523524124124121241244218' \
'52352353523524124124121241244218' \
'52352353523524124124121241244218' \
'52352353523524124124121241244218'\
result = main_evaluate(expression)
# python store it in float('inf'),
# i test here if my program handle with that
assert result.error_type == MEMORY_EXCEPTION
def test_plus_inf_number():
expression = '67675765675675675675897333333333' \
'09876767565656756745345543333335' \
'67567563453423423423436546333337' \
'47646767567576575675756756733335' \
'76578867864564534535423423413533' \
'32523523525235235235235352352433' \
'12412413523523535235241241241231' \
'24124421874126512561275126571323' \
'52352353523524124124121241244218' \
'52352353523524124124121241244218' \
'52352353523524124124121241244218' \
'52352353523524124124121241244218'\
result = main_evaluate(expression)
# python store it in float('inf'),
# i test here if my program handle with that
assert result.error_type == MEMORY_EXCEPTION
# Space Test:
def test_space_inside_number():
# we have illegal space inside the number '47'
# in the real interpreter in python space inside
# a expression is invalid syntax
expression = '5*1^4+4 7+5'
result = main_evaluate(expression)
assert result.error_type == SPACE_IN_NUMBER
# Expression validations tests:
# Test to the validations that i do to
# expression before building the token list
def test_illegal_char_validation():
expr = '454#f'
validate = math_validations(expr)
assert validate.error_type == ILLEGAL_CHAR
def test_unnecessary_brackets_validation():
expr = '3^((4+4))'
validate = math_validations(expr)
# the double brackets around simple expression like: 4+4
# is not legal do check if my calculator recognize it before solving
assert validate.error_type == UNNECESSARY_PARENTHESES
def test_only_number_in_brackets():
expr = '(6)'
validate = math_validations(expr)
# test only number in brackets
assert validate is True
def test_opener_has_no_closer():
expr = '((65+6)/6+(4+8/3'
validate = math_validations(expr)
# error that say the one of the opener '(' has no ')'
assert validate.error_type == BRACKETS_ERROR
def test_closer_has_no_opener():
expr = '(4+5)+9)+25^4'
validate = math_validations(expr)
# error that say the one of the closer ')' has no '(' matched
assert validate.error_type == BRACKETS_ERROR
def test_last_token_pre_unary():
expr = '4!+~'
validate = math_validations(expr)
assert validate.error_type == LAST_TOKEN_ERROR
def test_last_token_binary_operator():
expr = '4!+'
validate = math_validations(expr)
assert validate.error_type == LAST_TOKEN_ERROR
# Test to the use of decimal point in expression
def test_double_dot_validation():
expr = '4!+7..7'
validate = math_validations(expr)
# dot can't be after dot, because of that the specific error
# will be 'dot after error'
assert validate.error_type == DOT_ERROR
def test_first_dot_validation():
expr = '.5+45*(65/7)'
validate = math_validations(expr)
# dot can't be the first char in expression
assert validate.error_type == DOT_ERROR
def test_dot_after_operator_validation():
expr = '45+.5'
validate = math_validations(expr)
# dot can't be after operator
assert validate.error_type == DOT_ERROR
def test_valid_dot():
expr = '45+0.5'
result = main_evaluate(expr)
assert result == 45.5
def test_no_fraction_after_dot():
expr = '8.*2'
# i decided to support expressions
# like that like the real python interpreter
result = main_evaluate(expr)
assert result == 16
# Pre Unary Tests:
def test_tilda_before_minus():
expr = '~~-(70)'
result = main_evaluate(expr)
assert result == -70
def test_pre_unary_in_a_row():
expr = '~~~2'
result = main_evaluate(expr)
assert result == -2
def test_pre_unary_with_minuses():
expr = '~-~--~-10'
result = main_evaluate(expr)
assert result == -10
# Post Unary Tests:
def test_post_unary_in_a_row():
expr = '3!!+4'
result = main_evaluate(expr)
assert result == 724
def test_post_unary_on_brackets():
expr = '(1+5&8$3)!+4'
result = main_evaluate(expr)
assert result == 724
|
flexible
|
{
"blob_id": "f17d59ca9bfa82848ec6a599e98f759449ccdd14",
"index": 6376,
"step-1": "<mask token>\n\n\ndef test_divide_in_zero_from_start():\n expression = '56/0'\n result = main_evaluate(expression)\n assert result.error_type == DIVIDE_ZERO\n\n\n<mask token>\n\n\ndef test_mod_in_zero():\n expression = '-3%0'\n result = main_evaluate(expression)\n assert result.error_type == DIVIDE_ZERO\n\n\ndef test_complex_number():\n expression = '(-7)^0.5'\n result = main_evaluate(expression)\n assert result.error_type == COMPLEX_ERROR\n\n\n<mask token>\n\n\ndef test_minus_start():\n expression = '-2^3'\n result = main_evaluate(expression)\n assert result == -8\n\n\ndef test_minus_after_binary():\n expression = '5*-2'\n result = main_evaluate(expression)\n assert result == -10\n\n\n<mask token>\n\n\ndef test_huge_equation():\n expression = '1+' * 10000 + '0'\n result = main_evaluate(expression)\n assert result == 10000\n\n\n<mask token>\n\n\ndef test_minus_inf_number():\n expression = (\n '-676757656756756756758973333333330987676756565675674534554333333567567563453423423423436546333337476467675675765756757567567333357657886786456453453542342341353332523523525235235235235352352433124124135235235352352412412412312412442187412651256127512657132352352353523524124124121241244218523523535235241241241212412442185235235352352412412412124124421852352353523524124124121241244218'\n )\n result = main_evaluate(expression)\n assert result.error_type == MEMORY_EXCEPTION\n\n\n<mask token>\n\n\ndef test_space_inside_number():\n expression = '5*1^4+4 7+5'\n result = main_evaluate(expression)\n assert result.error_type == SPACE_IN_NUMBER\n\n\n<mask token>\n\n\ndef test_closer_has_no_opener():\n expr = '(4+5)+9)+25^4'\n validate = math_validations(expr)\n assert validate.error_type == BRACKETS_ERROR\n\n\ndef test_last_token_pre_unary():\n expr = '4!+~'\n validate = math_validations(expr)\n assert validate.error_type == LAST_TOKEN_ERROR\n\n\ndef test_last_token_binary_operator():\n expr = '4!+'\n validate = math_validations(expr)\n assert validate.error_type == LAST_TOKEN_ERROR\n\n\n<mask token>\n\n\ndef test_first_dot_validation():\n expr = '.5+45*(65/7)'\n validate = math_validations(expr)\n assert validate.error_type == DOT_ERROR\n\n\n<mask token>\n\n\ndef test_pre_unary_in_a_row():\n expr = '~~~2'\n result = main_evaluate(expr)\n assert result == -2\n\n\ndef test_pre_unary_with_minuses():\n expr = '~-~--~-10'\n result = main_evaluate(expr)\n assert result == -10\n\n\ndef test_post_unary_in_a_row():\n expr = '3!!+4'\n result = main_evaluate(expr)\n assert result == 724\n\n\ndef test_post_unary_on_brackets():\n expr = '(1+5&8$3)!+4'\n result = main_evaluate(expr)\n assert result == 724\n",
"step-2": "<mask token>\n\n\ndef test_divide_in_zero_from_start():\n expression = '56/0'\n result = main_evaluate(expression)\n assert result.error_type == DIVIDE_ZERO\n\n\n<mask token>\n\n\ndef test_mod_in_zero():\n expression = '-3%0'\n result = main_evaluate(expression)\n assert result.error_type == DIVIDE_ZERO\n\n\ndef test_complex_number():\n expression = '(-7)^0.5'\n result = main_evaluate(expression)\n assert result.error_type == COMPLEX_ERROR\n\n\n<mask token>\n\n\ndef test_factorial_not_round():\n expression = '2.404!+34'\n result = main_evaluate(expression)\n assert result.error_type == FACTORIAL_ERROR\n\n\n<mask token>\n\n\ndef test_minus_start():\n expression = '-2^3'\n result = main_evaluate(expression)\n assert result == -8\n\n\ndef test_minus_after_binary():\n expression = '5*-2'\n result = main_evaluate(expression)\n assert result == -10\n\n\n<mask token>\n\n\ndef test_huge_equation():\n expression = '1+' * 10000 + '0'\n result = main_evaluate(expression)\n assert result == 10000\n\n\ndef test_max_size_expression():\n expression = '5' * (MAX_EXPRESSION_SIZE + 1)\n result = main_evaluate(expression)\n assert result.error_type == MEMORY_EXCEPTION\n\n\n<mask token>\n\n\ndef test_multiply_overflow():\n expression = '170!*444444'\n result = main_evaluate(expression)\n assert result.error_type == MEMORY_EXCEPTION\n\n\ndef test_minus_inf_number():\n expression = (\n '-676757656756756756758973333333330987676756565675674534554333333567567563453423423423436546333337476467675675765756757567567333357657886786456453453542342341353332523523525235235235235352352433124124135235235352352412412412312412442187412651256127512657132352352353523524124124121241244218523523535235241241241212412442185235235352352412412412124124421852352353523524124124121241244218'\n )\n result = main_evaluate(expression)\n assert result.error_type == MEMORY_EXCEPTION\n\n\ndef test_plus_inf_number():\n expression = (\n '676757656756756756758973333333330987676756565675674534554333333567567563453423423423436546333337476467675675765756757567567333357657886786456453453542342341353332523523525235235235235352352433124124135235235352352412412412312412442187412651256127512657132352352353523524124124121241244218523523535235241241241212412442185235235352352412412412124124421852352353523524124124121241244218'\n )\n result = main_evaluate(expression)\n assert result.error_type == MEMORY_EXCEPTION\n\n\ndef test_space_inside_number():\n expression = '5*1^4+4 7+5'\n result = main_evaluate(expression)\n assert result.error_type == SPACE_IN_NUMBER\n\n\ndef test_illegal_char_validation():\n expr = '454#f'\n validate = math_validations(expr)\n assert validate.error_type == ILLEGAL_CHAR\n\n\n<mask token>\n\n\ndef test_closer_has_no_opener():\n expr = '(4+5)+9)+25^4'\n validate = math_validations(expr)\n assert validate.error_type == BRACKETS_ERROR\n\n\ndef test_last_token_pre_unary():\n expr = '4!+~'\n validate = math_validations(expr)\n assert validate.error_type == LAST_TOKEN_ERROR\n\n\ndef test_last_token_binary_operator():\n expr = '4!+'\n validate = math_validations(expr)\n assert validate.error_type == LAST_TOKEN_ERROR\n\n\ndef test_double_dot_validation():\n expr = '4!+7..7'\n validate = math_validations(expr)\n assert validate.error_type == DOT_ERROR\n\n\ndef test_first_dot_validation():\n expr = '.5+45*(65/7)'\n validate = math_validations(expr)\n assert validate.error_type == DOT_ERROR\n\n\ndef test_dot_after_operator_validation():\n expr = '45+.5'\n validate = math_validations(expr)\n assert validate.error_type == DOT_ERROR\n\n\ndef test_valid_dot():\n expr = '45+0.5'\n result = main_evaluate(expr)\n assert result == 45.5\n\n\n<mask token>\n\n\ndef test_tilda_before_minus():\n expr = '~~-(70)'\n result = main_evaluate(expr)\n assert result == -70\n\n\ndef test_pre_unary_in_a_row():\n expr = '~~~2'\n result = main_evaluate(expr)\n assert result == -2\n\n\ndef test_pre_unary_with_minuses():\n expr = '~-~--~-10'\n result = main_evaluate(expr)\n assert result == -10\n\n\ndef test_post_unary_in_a_row():\n expr = '3!!+4'\n result = main_evaluate(expr)\n assert result == 724\n\n\ndef test_post_unary_on_brackets():\n expr = '(1+5&8$3)!+4'\n result = main_evaluate(expr)\n assert result == 724\n",
"step-3": "<mask token>\n\n\ndef test_divide_in_zero_from_start():\n expression = '56/0'\n result = main_evaluate(expression)\n assert result.error_type == DIVIDE_ZERO\n\n\n<mask token>\n\n\ndef test_mod_in_zero():\n expression = '-3%0'\n result = main_evaluate(expression)\n assert result.error_type == DIVIDE_ZERO\n\n\ndef test_complex_number():\n expression = '(-7)^0.5'\n result = main_evaluate(expression)\n assert result.error_type == COMPLEX_ERROR\n\n\n<mask token>\n\n\ndef test_factorial_not_round():\n expression = '2.404!+34'\n result = main_evaluate(expression)\n assert result.error_type == FACTORIAL_ERROR\n\n\ndef test_factorial_huge_number():\n expression = '600000!+4'\n result = main_evaluate(expression)\n assert result.error_type == MEMORY_EXCEPTION\n\n\ndef test_minus_start():\n expression = '-2^3'\n result = main_evaluate(expression)\n assert result == -8\n\n\ndef test_minus_after_binary():\n expression = '5*-2'\n result = main_evaluate(expression)\n assert result == -10\n\n\n<mask token>\n\n\ndef test_huge_equation():\n expression = '1+' * 10000 + '0'\n result = main_evaluate(expression)\n assert result == 10000\n\n\ndef test_max_size_expression():\n expression = '5' * (MAX_EXPRESSION_SIZE + 1)\n result = main_evaluate(expression)\n assert result.error_type == MEMORY_EXCEPTION\n\n\n<mask token>\n\n\ndef test_multiply_overflow():\n expression = '170!*444444'\n result = main_evaluate(expression)\n assert result.error_type == MEMORY_EXCEPTION\n\n\ndef test_minus_inf_number():\n expression = (\n '-676757656756756756758973333333330987676756565675674534554333333567567563453423423423436546333337476467675675765756757567567333357657886786456453453542342341353332523523525235235235235352352433124124135235235352352412412412312412442187412651256127512657132352352353523524124124121241244218523523535235241241241212412442185235235352352412412412124124421852352353523524124124121241244218'\n )\n result = main_evaluate(expression)\n assert result.error_type == MEMORY_EXCEPTION\n\n\ndef test_plus_inf_number():\n expression = (\n '676757656756756756758973333333330987676756565675674534554333333567567563453423423423436546333337476467675675765756757567567333357657886786456453453542342341353332523523525235235235235352352433124124135235235352352412412412312412442187412651256127512657132352352353523524124124121241244218523523535235241241241212412442185235235352352412412412124124421852352353523524124124121241244218'\n )\n result = main_evaluate(expression)\n assert result.error_type == MEMORY_EXCEPTION\n\n\ndef test_space_inside_number():\n expression = '5*1^4+4 7+5'\n result = main_evaluate(expression)\n assert result.error_type == SPACE_IN_NUMBER\n\n\ndef test_illegal_char_validation():\n expr = '454#f'\n validate = math_validations(expr)\n assert validate.error_type == ILLEGAL_CHAR\n\n\n<mask token>\n\n\ndef test_closer_has_no_opener():\n expr = '(4+5)+9)+25^4'\n validate = math_validations(expr)\n assert validate.error_type == BRACKETS_ERROR\n\n\ndef test_last_token_pre_unary():\n expr = '4!+~'\n validate = math_validations(expr)\n assert validate.error_type == LAST_TOKEN_ERROR\n\n\ndef test_last_token_binary_operator():\n expr = '4!+'\n validate = math_validations(expr)\n assert validate.error_type == LAST_TOKEN_ERROR\n\n\ndef test_double_dot_validation():\n expr = '4!+7..7'\n validate = math_validations(expr)\n assert validate.error_type == DOT_ERROR\n\n\ndef test_first_dot_validation():\n expr = '.5+45*(65/7)'\n validate = math_validations(expr)\n assert validate.error_type == DOT_ERROR\n\n\ndef test_dot_after_operator_validation():\n expr = '45+.5'\n validate = math_validations(expr)\n assert validate.error_type == DOT_ERROR\n\n\ndef test_valid_dot():\n expr = '45+0.5'\n result = main_evaluate(expr)\n assert result == 45.5\n\n\n<mask token>\n\n\ndef test_tilda_before_minus():\n expr = '~~-(70)'\n result = main_evaluate(expr)\n assert result == -70\n\n\ndef test_pre_unary_in_a_row():\n expr = '~~~2'\n result = main_evaluate(expr)\n assert result == -2\n\n\ndef test_pre_unary_with_minuses():\n expr = '~-~--~-10'\n result = main_evaluate(expr)\n assert result == -10\n\n\ndef test_post_unary_in_a_row():\n expr = '3!!+4'\n result = main_evaluate(expr)\n assert result == 724\n\n\ndef test_post_unary_on_brackets():\n expr = '(1+5&8$3)!+4'\n result = main_evaluate(expr)\n assert result == 724\n",
"step-4": "<mask token>\n\n\ndef test_divide_in_zero_from_start():\n expression = '56/0'\n result = main_evaluate(expression)\n assert result.error_type == DIVIDE_ZERO\n\n\ndef test_divide_in_zero_while_solve():\n expression = '56/(5-5)'\n result = main_evaluate(expression)\n assert result.error_type == DIVIDE_ZERO\n\n\ndef test_mod_in_zero():\n expression = '-3%0'\n result = main_evaluate(expression)\n assert result.error_type == DIVIDE_ZERO\n\n\ndef test_complex_number():\n expression = '(-7)^0.5'\n result = main_evaluate(expression)\n assert result.error_type == COMPLEX_ERROR\n\n\ndef test_factorial_negative():\n expression = '(-9)!'\n result = main_evaluate(expression)\n assert result.error_type == FACTORIAL_ERROR\n\n\ndef test_factorial_not_round():\n expression = '2.404!+34'\n result = main_evaluate(expression)\n assert result.error_type == FACTORIAL_ERROR\n\n\ndef test_factorial_huge_number():\n expression = '600000!+4'\n result = main_evaluate(expression)\n assert result.error_type == MEMORY_EXCEPTION\n\n\ndef test_minus_start():\n expression = '-2^3'\n result = main_evaluate(expression)\n assert result == -8\n\n\ndef test_minus_after_binary():\n expression = '5*-2'\n result = main_evaluate(expression)\n assert result == -10\n\n\ndef test_minuses_row():\n expression = '---(4+2)+8----8'\n result = main_evaluate(expression)\n assert result == 10\n\n\ndef test_huge_equation():\n expression = '1+' * 10000 + '0'\n result = main_evaluate(expression)\n assert result == 10000\n\n\ndef test_max_size_expression():\n expression = '5' * (MAX_EXPRESSION_SIZE + 1)\n result = main_evaluate(expression)\n assert result.error_type == MEMORY_EXCEPTION\n\n\ndef test_pow_overflow():\n expression = '225^225.6'\n result = main_evaluate(expression)\n assert result.error_type == MEMORY_EXCEPTION\n\n\ndef test_multiply_overflow():\n expression = '170!*444444'\n result = main_evaluate(expression)\n assert result.error_type == MEMORY_EXCEPTION\n\n\ndef test_minus_inf_number():\n expression = (\n '-676757656756756756758973333333330987676756565675674534554333333567567563453423423423436546333337476467675675765756757567567333357657886786456453453542342341353332523523525235235235235352352433124124135235235352352412412412312412442187412651256127512657132352352353523524124124121241244218523523535235241241241212412442185235235352352412412412124124421852352353523524124124121241244218'\n )\n result = main_evaluate(expression)\n assert result.error_type == MEMORY_EXCEPTION\n\n\ndef test_plus_inf_number():\n expression = (\n '676757656756756756758973333333330987676756565675674534554333333567567563453423423423436546333337476467675675765756757567567333357657886786456453453542342341353332523523525235235235235352352433124124135235235352352412412412312412442187412651256127512657132352352353523524124124121241244218523523535235241241241212412442185235235352352412412412124124421852352353523524124124121241244218'\n )\n result = main_evaluate(expression)\n assert result.error_type == MEMORY_EXCEPTION\n\n\ndef test_space_inside_number():\n expression = '5*1^4+4 7+5'\n result = main_evaluate(expression)\n assert result.error_type == SPACE_IN_NUMBER\n\n\ndef test_illegal_char_validation():\n expr = '454#f'\n validate = math_validations(expr)\n assert validate.error_type == ILLEGAL_CHAR\n\n\ndef test_unnecessary_brackets_validation():\n expr = '3^((4+4))'\n validate = math_validations(expr)\n assert validate.error_type == UNNECESSARY_PARENTHESES\n\n\n<mask token>\n\n\ndef test_opener_has_no_closer():\n expr = '((65+6)/6+(4+8/3'\n validate = math_validations(expr)\n assert validate.error_type == BRACKETS_ERROR\n\n\ndef test_closer_has_no_opener():\n expr = '(4+5)+9)+25^4'\n validate = math_validations(expr)\n assert validate.error_type == BRACKETS_ERROR\n\n\ndef test_last_token_pre_unary():\n expr = '4!+~'\n validate = math_validations(expr)\n assert validate.error_type == LAST_TOKEN_ERROR\n\n\ndef test_last_token_binary_operator():\n expr = '4!+'\n validate = math_validations(expr)\n assert validate.error_type == LAST_TOKEN_ERROR\n\n\ndef test_double_dot_validation():\n expr = '4!+7..7'\n validate = math_validations(expr)\n assert validate.error_type == DOT_ERROR\n\n\ndef test_first_dot_validation():\n expr = '.5+45*(65/7)'\n validate = math_validations(expr)\n assert validate.error_type == DOT_ERROR\n\n\ndef test_dot_after_operator_validation():\n expr = '45+.5'\n validate = math_validations(expr)\n assert validate.error_type == DOT_ERROR\n\n\ndef test_valid_dot():\n expr = '45+0.5'\n result = main_evaluate(expr)\n assert result == 45.5\n\n\ndef test_no_fraction_after_dot():\n expr = '8.*2'\n result = main_evaluate(expr)\n assert result == 16\n\n\ndef test_tilda_before_minus():\n expr = '~~-(70)'\n result = main_evaluate(expr)\n assert result == -70\n\n\ndef test_pre_unary_in_a_row():\n expr = '~~~2'\n result = main_evaluate(expr)\n assert result == -2\n\n\ndef test_pre_unary_with_minuses():\n expr = '~-~--~-10'\n result = main_evaluate(expr)\n assert result == -10\n\n\ndef test_post_unary_in_a_row():\n expr = '3!!+4'\n result = main_evaluate(expr)\n assert result == 724\n\n\ndef test_post_unary_on_brackets():\n expr = '(1+5&8$3)!+4'\n result = main_evaluate(expr)\n assert result == 724\n",
"step-5": "\"\"\"\n test_extra.py:\n In this file i wrote extra tests to my calculator program.\n Divided to some main parts:\n - Math Errors Tests (divide in zero, factorial, complex numbers)\n - Test with edge cases of minus (operator / sign)\n - Big results tests: expression that their result\n will be inf or cause overflow exception\n - test spaces in expressions\n - test to the tokens_validation.py functions:\n valid brackets, unnecessary parentheses, last token validation\n - Decimal point place in expressions\n - pre and post unary operations\n\"\"\"\n\nfrom calculator_main_omega import *\nfrom errors import *\n\n\n# Math Errors Tests\n\ndef test_divide_in_zero_from_start():\n # test divide in zero in case that\n # we can see before solving\n expression = '56/0'\n result = main_evaluate(expression)\n assert result.error_type == DIVIDE_ZERO\n\n\ndef test_divide_in_zero_while_solve():\n # test divide in zero in case that\n # we can't see before solving\n expression = '56/(5-5)'\n result = main_evaluate(expression)\n assert result.error_type == DIVIDE_ZERO\n\n\ndef test_mod_in_zero():\n expression = '-3%0'\n result = main_evaluate(expression)\n # this result need to be none because we mod in zero\n assert result.error_type == DIVIDE_ZERO\n\n\ndef test_complex_number():\n expression = '(-7)^0.5'\n result = main_evaluate(expression)\n # check if get COMPLEX_ERROR when get complex result\n assert result.error_type == COMPLEX_ERROR\n\n\ndef test_factorial_negative():\n expression = '(-9)!'\n result = main_evaluate(expression)\n # the factorial operation on negative numbers is not legal\n assert result.error_type == FACTORIAL_ERROR\n\n\ndef test_factorial_not_round():\n expression = '2.404!+34'\n result = main_evaluate(expression)\n # the factorial operation on fraction numbers is not legal\n assert result.error_type == FACTORIAL_ERROR\n\n\ndef test_factorial_huge_number():\n expression = '600000!+4'\n result = main_evaluate(expression)\n assert result.error_type == MEMORY_EXCEPTION\n\n\n# Minus tests:\n\ndef test_minus_start():\n # build expression that have '-' in the start\n expression = '-2^3'\n result = main_evaluate(expression)\n assert result == -8\n\n\ndef test_minus_after_binary():\n # test expression with minus after binary operator\n expression = '5*-2'\n result = main_evaluate(expression)\n assert result == -10\n\n\ndef test_minuses_row():\n # test expression with some minuses right after each other\n expression = '---(4+2)+8----8'\n result = main_evaluate(expression)\n assert result == 10\n\n\ndef test_huge_equation():\n expression = ('1+' * 10000) + '0'\n # i build expression with 10000 operators\n # i test this affect on my program\n result = main_evaluate(expression)\n assert result == 10000\n\n\ndef test_max_size_expression():\n # build expression with size bigger then the\n # MAX_EXPRESSION_SIZE\n expression = '5' * (MAX_EXPRESSION_SIZE + 1)\n result = main_evaluate(expression)\n assert result.error_type == MEMORY_EXCEPTION\n\n\n# Big results tests:\n\ndef test_pow_overflow():\n expression = '225^225.6'\n result = main_evaluate(expression)\n assert result.error_type == MEMORY_EXCEPTION\n\n\ndef test_multiply_overflow():\n expression = '170!*444444'\n result = main_evaluate(expression)\n # the result of this expression is too big to store in float\n assert result.error_type == MEMORY_EXCEPTION\n\n\ndef test_minus_inf_number():\n expression = '-67675765675675675675897333333333' \\\n '09876767565656756745345543333335' \\\n '67567563453423423423436546333337' \\\n '47646767567576575675756756733335' \\\n '76578867864564534535423423413533' \\\n '32523523525235235235235352352433' \\\n '12412413523523535235241241241231' \\\n '24124421874126512561275126571323' \\\n '52352353523524124124121241244218' \\\n '52352353523524124124121241244218' \\\n '52352353523524124124121241244218' \\\n '52352353523524124124121241244218'\\\n\n result = main_evaluate(expression)\n # python store it in float('inf'),\n # i test here if my program handle with that\n assert result.error_type == MEMORY_EXCEPTION\n\n\ndef test_plus_inf_number():\n expression = '67675765675675675675897333333333' \\\n '09876767565656756745345543333335' \\\n '67567563453423423423436546333337' \\\n '47646767567576575675756756733335' \\\n '76578867864564534535423423413533' \\\n '32523523525235235235235352352433' \\\n '12412413523523535235241241241231' \\\n '24124421874126512561275126571323' \\\n '52352353523524124124121241244218' \\\n '52352353523524124124121241244218' \\\n '52352353523524124124121241244218' \\\n '52352353523524124124121241244218'\\\n\n result = main_evaluate(expression)\n # python store it in float('inf'),\n # i test here if my program handle with that\n assert result.error_type == MEMORY_EXCEPTION\n\n\n# Space Test:\n\ndef test_space_inside_number():\n # we have illegal space inside the number '47'\n # in the real interpreter in python space inside\n # a expression is invalid syntax\n expression = '5*1^4+4 7+5'\n result = main_evaluate(expression)\n assert result.error_type == SPACE_IN_NUMBER\n\n\n# Expression validations tests:\n# Test to the validations that i do to\n# expression before building the token list\n\n\ndef test_illegal_char_validation():\n expr = '454#f'\n validate = math_validations(expr)\n assert validate.error_type == ILLEGAL_CHAR\n\n\ndef test_unnecessary_brackets_validation():\n expr = '3^((4+4))'\n validate = math_validations(expr)\n # the double brackets around simple expression like: 4+4\n # is not legal do check if my calculator recognize it before solving\n assert validate.error_type == UNNECESSARY_PARENTHESES\n\n\ndef test_only_number_in_brackets():\n expr = '(6)'\n validate = math_validations(expr)\n # test only number in brackets\n assert validate is True\n\n\ndef test_opener_has_no_closer():\n expr = '((65+6)/6+(4+8/3'\n validate = math_validations(expr)\n # error that say the one of the opener '(' has no ')'\n assert validate.error_type == BRACKETS_ERROR\n\n\ndef test_closer_has_no_opener():\n expr = '(4+5)+9)+25^4'\n validate = math_validations(expr)\n # error that say the one of the closer ')' has no '(' matched\n assert validate.error_type == BRACKETS_ERROR\n\n\ndef test_last_token_pre_unary():\n expr = '4!+~'\n validate = math_validations(expr)\n assert validate.error_type == LAST_TOKEN_ERROR\n\n\ndef test_last_token_binary_operator():\n expr = '4!+'\n validate = math_validations(expr)\n assert validate.error_type == LAST_TOKEN_ERROR\n\n\n# Test to the use of decimal point in expression\n\ndef test_double_dot_validation():\n expr = '4!+7..7'\n validate = math_validations(expr)\n # dot can't be after dot, because of that the specific error\n # will be 'dot after error'\n assert validate.error_type == DOT_ERROR\n\n\ndef test_first_dot_validation():\n expr = '.5+45*(65/7)'\n validate = math_validations(expr)\n # dot can't be the first char in expression\n assert validate.error_type == DOT_ERROR\n\n\ndef test_dot_after_operator_validation():\n expr = '45+.5'\n validate = math_validations(expr)\n # dot can't be after operator\n assert validate.error_type == DOT_ERROR\n\n\ndef test_valid_dot():\n expr = '45+0.5'\n result = main_evaluate(expr)\n assert result == 45.5\n\n\ndef test_no_fraction_after_dot():\n expr = '8.*2'\n # i decided to support expressions\n # like that like the real python interpreter\n result = main_evaluate(expr)\n assert result == 16\n\n\n# Pre Unary Tests:\ndef test_tilda_before_minus():\n expr = '~~-(70)'\n result = main_evaluate(expr)\n assert result == -70\n\n\ndef test_pre_unary_in_a_row():\n expr = '~~~2'\n result = main_evaluate(expr)\n assert result == -2\n\n\ndef test_pre_unary_with_minuses():\n expr = '~-~--~-10'\n result = main_evaluate(expr)\n assert result == -10\n\n\n# Post Unary Tests:\ndef test_post_unary_in_a_row():\n expr = '3!!+4'\n result = main_evaluate(expr)\n assert result == 724\n\n\ndef test_post_unary_on_brackets():\n expr = '(1+5&8$3)!+4'\n result = main_evaluate(expr)\n assert result == 724\n",
"step-ids": [
16,
25,
26,
33,
36
]
}
|
[
16,
25,
26,
33,
36
] |
import time
import math
from random import randrange
import multilineMAX7219 as LEDMatrix
from multilineMAX7219_fonts import CP437_FONT, SINCLAIRS_FONT, LCD_FONT, TINY_FONT
from multilineMAX7219 import DIR_L, DIR_R, DIR_U, DIR_D
from multilineMAX7219 import DIR_LU, DIR_RU, DIR_LD, DIR_RD
from multilineMAX7219 import DISSOLVE, GFX_ON, GFX_OFF, GFX_INVERT
import datetime,ephem
from myfont import f
def utlst():
gtc = ephem.Observer()
gtc.lat, gtc.lon, gtc.elevation = '28.7565187', '-17.8919956', 2175.0
t = "%s %s" % (gtc.date,gtc.sidereal_time())
p = t.split(" ")
lst=p[2].split(".")
ut=p[1]
return ut,lst[0]
def at(x,y,string,state=GFX_ON):
for c in string:
LEDMatrix.gfx_sprite_array(f[ord(c)-48],x,y,state)
x+=len(f[ord(c)-48][0])
if c == ":" : x-=7
if c >= "A" : x-=1
# Initialise the library and the MAX7219/8x8LED arrays
LEDMatrix.init()
LEDMatrix.brightness(5)
sun, moon = ephem.Sun(), ephem.Moon()
gtc = ephem.Observer()
gtc.lat, gtc.lon, gtc.elevation = '28.7565187', '-17.8919956', 2175.0
print gtc.date, gtc.sidereal_time()
print gtc.lon, gtc.lat
try:
while 1:
ut,lst=utlst()
sut="%s" % ut
slst="%s" % lst
if len(slst) < 8: slst = "0"+slst
at(0,16,"UT%s" % sut)
at(0, 0,"ST%s" % slst)
LEDMatrix.gfx_render()
time.sleep(0.1)
except KeyboardInterrupt:
# reset array
LEDMatrix.clear_all()
|
normal
|
{
"blob_id": "ba486b64b1da3dc1775bee0980d5236516e130d4",
"index": 4033,
"step-1": "import time\nimport math\nfrom random import randrange\n\nimport multilineMAX7219 as LEDMatrix\nfrom multilineMAX7219_fonts import CP437_FONT, SINCLAIRS_FONT, LCD_FONT, TINY_FONT\nfrom multilineMAX7219 import DIR_L, DIR_R, DIR_U, DIR_D\nfrom multilineMAX7219 import DIR_LU, DIR_RU, DIR_LD, DIR_RD\nfrom multilineMAX7219 import DISSOLVE, GFX_ON, GFX_OFF, GFX_INVERT\nimport datetime,ephem\nfrom myfont import f\n\ndef utlst():\n gtc = ephem.Observer()\n gtc.lat, gtc.lon, gtc.elevation = '28.7565187', '-17.8919956', 2175.0\n t = \"%s %s\" % (gtc.date,gtc.sidereal_time())\n p = t.split(\" \")\n lst=p[2].split(\".\")\n ut=p[1]\n return ut,lst[0]\n\ndef at(x,y,string,state=GFX_ON):\n for c in string:\n LEDMatrix.gfx_sprite_array(f[ord(c)-48],x,y,state)\n x+=len(f[ord(c)-48][0])\n if c == \":\" : x-=7\n if c >= \"A\" : x-=1\n\n\n\n# Initialise the library and the MAX7219/8x8LED arrays\nLEDMatrix.init()\nLEDMatrix.brightness(5)\nsun, moon = ephem.Sun(), ephem.Moon()\n\ngtc = ephem.Observer()\ngtc.lat, gtc.lon, gtc.elevation = '28.7565187', '-17.8919956', 2175.0\nprint gtc.date, gtc.sidereal_time()\nprint gtc.lon, gtc.lat\n\ntry:\n while 1:\n ut,lst=utlst()\n sut=\"%s\" % ut\n slst=\"%s\" % lst \n if len(slst) < 8: slst = \"0\"+slst\n at(0,16,\"UT%s\" % sut)\n at(0, 0,\"ST%s\" % slst)\n LEDMatrix.gfx_render()\n time.sleep(0.1)\n\nexcept KeyboardInterrupt:\n # reset array\n LEDMatrix.clear_all()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""
Problem Statement
You and Fredrick are good friends. Yesterday, Fredrick received N credit
cards from ABCD Bank. He wants to verify whether his credit card numbers are
valid or not. You happen to be great at regex so he is asking for your help!
A valid credit card from ABCD Bank has the following characteristics:
It must start with a 4, 5 or 6.
It must contain exactly 16 digits.
It must only consist of digits (0-9).
It may have digits in groups of 4, separated by one hyphen "-".
It must NOT use any other separator like ' ' , '_', etc.
It must NOT have 4 or more consecutive repeated digits.
Examples:
Valid Credit Card Numbers
---------------------------
4253625879615786
4424424424442444
5122-2368-7954-3214
Invalid Credit Card Numbers
---------------------------
42536258796157867 #17 digits in card number --> Invalid
4424444424442444 #Consecutive digits are repeating 4 or more times
--> Invalid
5122-2368-7954 - 3214 #Separators other than '-' are used --> Invalid
44244x4424442444 #Contains non digit characters --> Invalid
0525362587961578 #Doesn't start with 4, 5 or 6 --> Invalid
Input Format
The first line of input contains an integer N.
The next N lines contain credit card numbers.
Constraints
0<N<100
Output Format
Print 'Valid' if the credit card number is valid. Otherwise,
print 'Invalid'. Do not print the quotes.
Sample Input
------------
6
4123456789123456
5123-4567-8912-3456
61234-567-8912-3456
4123356789123456
5133-3367-8912-3456
5123 - 3567 - 8912 - 3456
Sample Output
------------
Valid
Valid
Invalid
Valid
Invalid
Invalid
Explanation
-----------
4123456789123456 : Valid
5123-4567-8912-3456 : Valid
61234-567-8912-3456 : Invalid, because the card number is not divided into
equal groups of 4.
4123356789123456 : Valid
5133-3367-8912-3456 : Invalid, consecutive digits 3333 is repeating 4 times.
5123 - 4567 - 8912 - 3456 : Invalid, because space ' ' and - are used as
separators.
"""
import re
for _ in range(int(raw_input())):
credit_card_number = raw_input()
if len(credit_card_number) == 16 or len(credit_card_number) == 19:
if credit_card_number.count('-') == 3 and len(
credit_card_number) != 19:
print "Invalid"
continue
if credit_card_number.count('-') == 3:
cc_split = credit_card_number.split('-')
is_invalid = False
for cc in cc_split:
if len(cc) != 4:
is_invalid = True
break
if is_invalid:
print "Invalid"
continue
credit_card_number = credit_card_number.replace('-', '')
#print credit_card_number
start_pattern = r"[456]"
digit_pattern = r"\d*([0-9])\1\1\1"
start_match = re.match(start_pattern, credit_card_number)
digit_match = re.match(digit_pattern, credit_card_number)
#print start_match, digit_match
if start_match and not digit_match:
print "Valid"
else:
print "Invalid"
else:
print "Invalid"
for i in range(int(raw_input())):
S = raw_input().strip()
pre_match = re.search(r'^[456]\d{3}(-?)\d{4}\1\d{4}\1\d{4}$',S)
if pre_match:
processed_string = "".join(pre_match.group(0).split('-'))
final_match = re.search(r'(\d)\1{3,}',processed_string)
print 'Invalid' if final_match else 'Valid'
else:
print 'Invalid'
|
normal
|
{
"blob_id": "09f2fabaf3c19aa0d4cb522c6dbf5fd8d720b4df",
"index": 1567,
"step-1": "\"\"\"\nProblem Statement\n\nYou and Fredrick are good friends. Yesterday, Fredrick received N credit\ncards from ABCD Bank. He wants to verify whether his credit card numbers are\nvalid or not. You happen to be great at regex so he is asking for your help!\n\nA valid credit card from ABCD Bank has the following characteristics:\n\n It must start with a 4, 5 or 6.\n It must contain exactly 16 digits.\n It must only consist of digits (0-9).\n It may have digits in groups of 4, separated by one hyphen \"-\".\n It must NOT use any other separator like ' ' , '_', etc.\n It must NOT have 4 or more consecutive repeated digits.\n\nExamples:\n\nValid Credit Card Numbers\n---------------------------\n4253625879615786\n4424424424442444\n5122-2368-7954-3214\n\nInvalid Credit Card Numbers\n---------------------------\n42536258796157867 #17 digits in card number --> Invalid\n4424444424442444 #Consecutive digits are repeating 4 or more times\n--> Invalid\n5122-2368-7954 - 3214 #Separators other than '-' are used --> Invalid\n44244x4424442444 #Contains non digit characters --> Invalid\n0525362587961578 #Doesn't start with 4, 5 or 6 --> Invalid\n\nInput Format\n\nThe first line of input contains an integer N.\nThe next N lines contain credit card numbers.\n\nConstraints\n\n0<N<100\nOutput Format\n\nPrint 'Valid' if the credit card number is valid. Otherwise,\nprint 'Invalid'. Do not print the quotes.\n\nSample Input\n------------\n6\n4123456789123456\n5123-4567-8912-3456\n61234-567-8912-3456\n4123356789123456\n5133-3367-8912-3456\n5123 - 3567 - 8912 - 3456\n\nSample Output\n------------\nValid\nValid\nInvalid\nValid\nInvalid\nInvalid\n\nExplanation\n-----------\n4123456789123456 : Valid\n5123-4567-8912-3456 : Valid\n61234-567-8912-3456 : Invalid, because the card number is not divided into\nequal groups of 4.\n4123356789123456 : Valid\n5133-3367-8912-3456 : Invalid, consecutive digits 3333 is repeating 4 times.\n5123 - 4567 - 8912 - 3456 : Invalid, because space ' ' and - are used as\nseparators.\n\"\"\"\nimport re\n\nfor _ in range(int(raw_input())):\n credit_card_number = raw_input()\n if len(credit_card_number) == 16 or len(credit_card_number) == 19:\n if credit_card_number.count('-') == 3 and len(\n credit_card_number) != 19:\n print \"Invalid\"\n continue\n if credit_card_number.count('-') == 3:\n cc_split = credit_card_number.split('-')\n is_invalid = False\n for cc in cc_split:\n if len(cc) != 4:\n is_invalid = True\n break\n if is_invalid:\n print \"Invalid\"\n continue\n credit_card_number = credit_card_number.replace('-', '')\n #print credit_card_number\n start_pattern = r\"[456]\"\n digit_pattern = r\"\\d*([0-9])\\1\\1\\1\"\n start_match = re.match(start_pattern, credit_card_number)\n digit_match = re.match(digit_pattern, credit_card_number)\n #print start_match, digit_match\n if start_match and not digit_match:\n print \"Valid\"\n else:\n print \"Invalid\"\n else:\n print \"Invalid\"\n\nfor i in range(int(raw_input())):\n S = raw_input().strip()\n pre_match = re.search(r'^[456]\\d{3}(-?)\\d{4}\\1\\d{4}\\1\\d{4}$',S)\n if pre_match:\n processed_string = \"\".join(pre_match.group(0).split('-'))\n final_match = re.search(r'(\\d)\\1{3,}',processed_string)\n print 'Invalid' if final_match else 'Valid'\n else:\n print 'Invalid'",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# coding: utf-8
'''
Programa : py02_variavel.py
Homepage : http://www
Autor : Helber Palheta <[email protected]>
Execução:
python py02_variavel.py
'''
#variável curso e sua atribuição
curso = "Introdução a Biopython!"
#função print
print("Nome do Curso: "+curso)
|
normal
|
{
"blob_id": "ad59c1f0038294144b1c63db5f048b0a6b5ebb89",
"index": 4654,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Nome do Curso: ' + curso)\n",
"step-3": "<mask token>\ncurso = 'Introdução a Biopython!'\nprint('Nome do Curso: ' + curso)\n",
"step-4": "# coding: utf-8\n'''\n \n Programa : py02_variavel.py\n Homepage : http://www\n Autor : Helber Palheta <[email protected]>\n\n Execução:\n python py02_variavel.py\n\n''' \n#variável curso e sua atribuição\ncurso = \"Introdução a Biopython!\"\n\n#função print\nprint(\"Nome do Curso: \"+curso)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class UnLoginGetArticleJsonTestCase(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
call_command('loaddata', 'fixtures/auth.json', verbosity=0)
call_command('loaddata', 'fixtures/dump.json', verbosity=0)
def test_article_success_data(self):
url = reverse('api_v1:articles')
self.response = self.client.get(url)
data = json.loads(self.response.content)
self.assertTrue(len(data) >= 1)
self.assertIn('pk', data[0])
self.assertIn('title', data[0])
self.assertIn('description', data[0])
self.assertIn('category_id', data[0])
self.assertIn('user_id', data[0])
self.assertIn('image', data[0])
def test_get_main_category_json_data(self):
url = reverse('api_v1:main_categories')
self.response = self.client.get(url)
data = json.loads(self.response.content)
self.assertTrue(len(data) >= 1)
self.assertIn('pk', data[0])
self.assertIn('title', data[0])
def test_get_json_category_success_data(self):
url = reverse('api_v1:categories')
self.response = self.client.get(url)
data = json.loads(self.response.content)
self.assertTrue(len(data) >= 1)
self.assertIn('pk', data[0])
self.assertIn('title', data[0])
self.assertIn('parent_id', data[0])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GetJsonData(TestCase):
<|reserved_special_token_0|>
def check_redirect(self, response):
self.assertEqual(response.status_code, 200)
self.assertEqual(type(response), JsonResponse)
class UnLoginGetArticleJsonTestCase(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
call_command('loaddata', 'fixtures/auth.json', verbosity=0)
call_command('loaddata', 'fixtures/dump.json', verbosity=0)
def test_article_success_data(self):
url = reverse('api_v1:articles')
self.response = self.client.get(url)
data = json.loads(self.response.content)
self.assertTrue(len(data) >= 1)
self.assertIn('pk', data[0])
self.assertIn('title', data[0])
self.assertIn('description', data[0])
self.assertIn('category_id', data[0])
self.assertIn('user_id', data[0])
self.assertIn('image', data[0])
def test_get_main_category_json_data(self):
url = reverse('api_v1:main_categories')
self.response = self.client.get(url)
data = json.loads(self.response.content)
self.assertTrue(len(data) >= 1)
self.assertIn('pk', data[0])
self.assertIn('title', data[0])
def test_get_json_category_success_data(self):
url = reverse('api_v1:categories')
self.response = self.client.get(url)
data = json.loads(self.response.content)
self.assertTrue(len(data) >= 1)
self.assertIn('pk', data[0])
self.assertIn('title', data[0])
self.assertIn('parent_id', data[0])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GetJsonData(TestCase):
def test_post_not_login_no_pk(self):
for url in URLS:
response = self.client.get(reverse(url))
self.check_redirect(response)
def check_redirect(self, response):
self.assertEqual(response.status_code, 200)
self.assertEqual(type(response), JsonResponse)
class UnLoginGetArticleJsonTestCase(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
call_command('loaddata', 'fixtures/auth.json', verbosity=0)
call_command('loaddata', 'fixtures/dump.json', verbosity=0)
def test_article_success_data(self):
url = reverse('api_v1:articles')
self.response = self.client.get(url)
data = json.loads(self.response.content)
self.assertTrue(len(data) >= 1)
self.assertIn('pk', data[0])
self.assertIn('title', data[0])
self.assertIn('description', data[0])
self.assertIn('category_id', data[0])
self.assertIn('user_id', data[0])
self.assertIn('image', data[0])
def test_get_main_category_json_data(self):
url = reverse('api_v1:main_categories')
self.response = self.client.get(url)
data = json.loads(self.response.content)
self.assertTrue(len(data) >= 1)
self.assertIn('pk', data[0])
self.assertIn('title', data[0])
def test_get_json_category_success_data(self):
url = reverse('api_v1:categories')
self.response = self.client.get(url)
data = json.loads(self.response.content)
self.assertTrue(len(data) >= 1)
self.assertIn('pk', data[0])
self.assertIn('title', data[0])
self.assertIn('parent_id', data[0])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
URLS = ['api_v1:categories', 'api_v1:main_categories', 'api_v1:articles']
class GetJsonData(TestCase):
def test_post_not_login_no_pk(self):
for url in URLS:
response = self.client.get(reverse(url))
self.check_redirect(response)
def check_redirect(self, response):
self.assertEqual(response.status_code, 200)
self.assertEqual(type(response), JsonResponse)
class UnLoginGetArticleJsonTestCase(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
call_command('loaddata', 'fixtures/auth.json', verbosity=0)
call_command('loaddata', 'fixtures/dump.json', verbosity=0)
def test_article_success_data(self):
url = reverse('api_v1:articles')
self.response = self.client.get(url)
data = json.loads(self.response.content)
self.assertTrue(len(data) >= 1)
self.assertIn('pk', data[0])
self.assertIn('title', data[0])
self.assertIn('description', data[0])
self.assertIn('category_id', data[0])
self.assertIn('user_id', data[0])
self.assertIn('image', data[0])
def test_get_main_category_json_data(self):
url = reverse('api_v1:main_categories')
self.response = self.client.get(url)
data = json.loads(self.response.content)
self.assertTrue(len(data) >= 1)
self.assertIn('pk', data[0])
self.assertIn('title', data[0])
def test_get_json_category_success_data(self):
url = reverse('api_v1:categories')
self.response = self.client.get(url)
data = json.loads(self.response.content)
self.assertTrue(len(data) >= 1)
self.assertIn('pk', data[0])
self.assertIn('title', data[0])
self.assertIn('parent_id', data[0])
<|reserved_special_token_1|>
import json
from django.core.management import call_command
from django.http import JsonResponse
from django.test import TestCase
from django.urls import reverse
URLS = ['api_v1:categories', 'api_v1:main_categories', 'api_v1:articles']
class GetJsonData(TestCase):
def test_post_not_login_no_pk(self):
for url in URLS:
response = self.client.get(reverse(url))
self.check_redirect(response)
def check_redirect(self, response):
self.assertEqual(response.status_code, 200)
self.assertEqual(type(response), JsonResponse)
class UnLoginGetArticleJsonTestCase(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
call_command('loaddata', 'fixtures/auth.json', verbosity=0)
call_command('loaddata', 'fixtures/dump.json', verbosity=0)
def test_article_success_data(self):
url = reverse('api_v1:articles')
self.response = self.client.get(url)
data = json.loads(self.response.content)
self.assertTrue(len(data) >= 1)
self.assertIn('pk', data[0])
self.assertIn('title', data[0])
self.assertIn('description', data[0])
self.assertIn('category_id', data[0])
self.assertIn('user_id', data[0])
self.assertIn('image', data[0])
def test_get_main_category_json_data(self):
url = reverse('api_v1:main_categories')
self.response = self.client.get(url)
data = json.loads(self.response.content)
self.assertTrue(len(data) >= 1)
self.assertIn('pk', data[0])
self.assertIn('title', data[0])
def test_get_json_category_success_data(self):
url = reverse('api_v1:categories')
self.response = self.client.get(url)
data = json.loads(self.response.content)
self.assertTrue(len(data) >= 1)
self.assertIn('pk', data[0])
self.assertIn('title', data[0])
self.assertIn('parent_id', data[0])
|
flexible
|
{
"blob_id": "676caabb103f67c631bc191b11ab0d2d8ab25d1e",
"index": 5803,
"step-1": "<mask token>\n\n\nclass UnLoginGetArticleJsonTestCase(TestCase):\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n call_command('loaddata', 'fixtures/auth.json', verbosity=0)\n call_command('loaddata', 'fixtures/dump.json', verbosity=0)\n\n def test_article_success_data(self):\n url = reverse('api_v1:articles')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('description', data[0])\n self.assertIn('category_id', data[0])\n self.assertIn('user_id', data[0])\n self.assertIn('image', data[0])\n\n def test_get_main_category_json_data(self):\n url = reverse('api_v1:main_categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n\n def test_get_json_category_success_data(self):\n url = reverse('api_v1:categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('parent_id', data[0])\n",
"step-2": "<mask token>\n\n\nclass GetJsonData(TestCase):\n <mask token>\n\n def check_redirect(self, response):\n self.assertEqual(response.status_code, 200)\n self.assertEqual(type(response), JsonResponse)\n\n\nclass UnLoginGetArticleJsonTestCase(TestCase):\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n call_command('loaddata', 'fixtures/auth.json', verbosity=0)\n call_command('loaddata', 'fixtures/dump.json', verbosity=0)\n\n def test_article_success_data(self):\n url = reverse('api_v1:articles')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('description', data[0])\n self.assertIn('category_id', data[0])\n self.assertIn('user_id', data[0])\n self.assertIn('image', data[0])\n\n def test_get_main_category_json_data(self):\n url = reverse('api_v1:main_categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n\n def test_get_json_category_success_data(self):\n url = reverse('api_v1:categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('parent_id', data[0])\n",
"step-3": "<mask token>\n\n\nclass GetJsonData(TestCase):\n\n def test_post_not_login_no_pk(self):\n for url in URLS:\n response = self.client.get(reverse(url))\n self.check_redirect(response)\n\n def check_redirect(self, response):\n self.assertEqual(response.status_code, 200)\n self.assertEqual(type(response), JsonResponse)\n\n\nclass UnLoginGetArticleJsonTestCase(TestCase):\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n call_command('loaddata', 'fixtures/auth.json', verbosity=0)\n call_command('loaddata', 'fixtures/dump.json', verbosity=0)\n\n def test_article_success_data(self):\n url = reverse('api_v1:articles')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('description', data[0])\n self.assertIn('category_id', data[0])\n self.assertIn('user_id', data[0])\n self.assertIn('image', data[0])\n\n def test_get_main_category_json_data(self):\n url = reverse('api_v1:main_categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n\n def test_get_json_category_success_data(self):\n url = reverse('api_v1:categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('parent_id', data[0])\n",
"step-4": "<mask token>\nURLS = ['api_v1:categories', 'api_v1:main_categories', 'api_v1:articles']\n\n\nclass GetJsonData(TestCase):\n\n def test_post_not_login_no_pk(self):\n for url in URLS:\n response = self.client.get(reverse(url))\n self.check_redirect(response)\n\n def check_redirect(self, response):\n self.assertEqual(response.status_code, 200)\n self.assertEqual(type(response), JsonResponse)\n\n\nclass UnLoginGetArticleJsonTestCase(TestCase):\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n call_command('loaddata', 'fixtures/auth.json', verbosity=0)\n call_command('loaddata', 'fixtures/dump.json', verbosity=0)\n\n def test_article_success_data(self):\n url = reverse('api_v1:articles')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('description', data[0])\n self.assertIn('category_id', data[0])\n self.assertIn('user_id', data[0])\n self.assertIn('image', data[0])\n\n def test_get_main_category_json_data(self):\n url = reverse('api_v1:main_categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n\n def test_get_json_category_success_data(self):\n url = reverse('api_v1:categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('parent_id', data[0])\n",
"step-5": "import json\n\nfrom django.core.management import call_command\nfrom django.http import JsonResponse\nfrom django.test import TestCase\nfrom django.urls import reverse\n\n\nURLS = ['api_v1:categories', 'api_v1:main_categories', 'api_v1:articles']\n\n\nclass GetJsonData(TestCase):\n def test_post_not_login_no_pk(self):\n for url in URLS:\n response = self.client.get(reverse(url))\n self.check_redirect(response)\n\n def check_redirect(self, response):\n self.assertEqual(response.status_code, 200)\n self.assertEqual(type(response), JsonResponse)\n\n\nclass UnLoginGetArticleJsonTestCase(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n call_command('loaddata', 'fixtures/auth.json', verbosity=0)\n call_command('loaddata', 'fixtures/dump.json', verbosity=0)\n\n def test_article_success_data(self):\n url = reverse('api_v1:articles')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('description', data[0])\n self.assertIn('category_id', data[0])\n self.assertIn('user_id', data[0])\n self.assertIn('image', data[0])\n\n def test_get_main_category_json_data(self):\n url = reverse('api_v1:main_categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n\n def test_get_json_category_success_data(self):\n url = reverse('api_v1:categories')\n self.response = self.client.get(url)\n data = json.loads(self.response.content)\n self.assertTrue(len(data) >= 1)\n self.assertIn('pk', data[0])\n self.assertIn('title', data[0])\n self.assertIn('parent_id', data[0])\n",
"step-ids": [
5,
7,
8,
9,
11
]
}
|
[
5,
7,
8,
9,
11
] |
#!/usr/bin/python
import sys
import cgi
import urllib2
url = sys.argv[1]
try:
response = urllib2.urlopen(url)
redir = response.geturl()
except Exception:
import traceback
redir = 'generic exception: ' + traceback.format_exc()
print redir
|
normal
|
{
"blob_id": "3ff3b8a1d8e74c09da9d6f39e4abf0963002a812",
"index": 5682,
"step-1": "#!/usr/bin/python\nimport sys\nimport cgi\nimport urllib2\n\n\nurl = sys.argv[1]\n\ntry:\n\tresponse = urllib2.urlopen(url)\n\tredir = response.geturl()\nexcept Exception:\n\timport traceback\n\tredir = 'generic exception: ' + traceback.format_exc()\n\nprint redir\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
@register.inclusion_tag('tags/fieldsetForm.html')
def show_fieldsetform(form):
"""
Renders given form without marking required fields.
@param form:
@return:
"""
return {'form': form, 'required_fields': True}
@register.inclusion_tag('tags/fieldsetForm.html')
def show_fieldsetform_nrf(form):
"""
Renders given form with required fields marked.
@param form:
@return:
"""
return {'form': form, 'required_fields': False}
@register.inclusion_tag('tags/sendForm.html')
def show_sendform(form):
"""
Renders given form without marking required fields.
@param form:
@return:
"""
return {'form': form, 'required_fields': True}
<|reserved_special_token_0|>
@register.inclusion_tag('tags/accountForm.html')
def show_accountform(form, is_superuser):
"""
Renders given form without marking required fields.
@param form:
@return:
"""
return {'form': form, 'required_fields': False, 'is_superuser':
is_superuser}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@register.inclusion_tag('tags/fieldsetForm.html')
def show_fieldsetform(form):
"""
Renders given form without marking required fields.
@param form:
@return:
"""
return {'form': form, 'required_fields': True}
@register.inclusion_tag('tags/fieldsetForm.html')
def show_fieldsetform_nrf(form):
"""
Renders given form with required fields marked.
@param form:
@return:
"""
return {'form': form, 'required_fields': False}
@register.inclusion_tag('tags/sendForm.html')
def show_sendform(form):
"""
Renders given form without marking required fields.
@param form:
@return:
"""
return {'form': form, 'required_fields': True}
@register.inclusion_tag('tags/loginForm.html')
def show_loginform(form):
"""
Renders given form without marking required fields.
@param form:
@return:
"""
return {'form': form, 'required_fields': True}
@register.inclusion_tag('tags/accountForm.html')
def show_accountform(form, is_superuser):
"""
Renders given form without marking required fields.
@param form:
@return:
"""
return {'form': form, 'required_fields': False, 'is_superuser':
is_superuser}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
register = template.Library()
@register.inclusion_tag('tags/fieldsetForm.html')
def show_fieldsetform(form):
"""
Renders given form without marking required fields.
@param form:
@return:
"""
return {'form': form, 'required_fields': True}
@register.inclusion_tag('tags/fieldsetForm.html')
def show_fieldsetform_nrf(form):
"""
Renders given form with required fields marked.
@param form:
@return:
"""
return {'form': form, 'required_fields': False}
@register.inclusion_tag('tags/sendForm.html')
def show_sendform(form):
"""
Renders given form without marking required fields.
@param form:
@return:
"""
return {'form': form, 'required_fields': True}
@register.inclusion_tag('tags/loginForm.html')
def show_loginform(form):
"""
Renders given form without marking required fields.
@param form:
@return:
"""
return {'form': form, 'required_fields': True}
@register.inclusion_tag('tags/accountForm.html')
def show_accountform(form, is_superuser):
"""
Renders given form without marking required fields.
@param form:
@return:
"""
return {'form': form, 'required_fields': False, 'is_superuser':
is_superuser}
<|reserved_special_token_1|>
from django import template
register = template.Library()
@register.inclusion_tag('tags/fieldsetForm.html')
def show_fieldsetform(form):
"""
Renders given form without marking required fields.
@param form:
@return:
"""
return {'form': form, 'required_fields': True}
@register.inclusion_tag('tags/fieldsetForm.html')
def show_fieldsetform_nrf(form):
"""
Renders given form with required fields marked.
@param form:
@return:
"""
return {'form': form, 'required_fields': False}
@register.inclusion_tag('tags/sendForm.html')
def show_sendform(form):
"""
Renders given form without marking required fields.
@param form:
@return:
"""
return {'form': form, 'required_fields': True}
@register.inclusion_tag('tags/loginForm.html')
def show_loginform(form):
"""
Renders given form without marking required fields.
@param form:
@return:
"""
return {'form': form, 'required_fields': True}
@register.inclusion_tag('tags/accountForm.html')
def show_accountform(form, is_superuser):
"""
Renders given form without marking required fields.
@param form:
@return:
"""
return {'form': form, 'required_fields': False, 'is_superuser':
is_superuser}
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2015] Michał Szczygieł, M4GiK Software
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
from django import template
register = template.Library()
@register.inclusion_tag('tags/fieldsetForm.html')
def show_fieldsetform(form):
"""
Renders given form without marking required fields.
@param form:
@return:
"""
return {'form': form, 'required_fields': True}
@register.inclusion_tag('tags/fieldsetForm.html')
def show_fieldsetform_nrf(form):
"""
Renders given form with required fields marked.
@param form:
@return:
"""
return {'form': form, 'required_fields': False}
@register.inclusion_tag('tags/sendForm.html')
def show_sendform(form):
"""
Renders given form without marking required fields.
@param form:
@return:
"""
return {'form': form, 'required_fields': True}
@register.inclusion_tag('tags/loginForm.html')
def show_loginform(form):
"""
Renders given form without marking required fields.
@param form:
@return:
"""
return {'form': form, 'required_fields': True}
@register.inclusion_tag('tags/accountForm.html')
def show_accountform(form, is_superuser):
"""
Renders given form without marking required fields.
@param form:
@return:
"""
return {'form': form, 'required_fields': False, 'is_superuser': is_superuser}
|
flexible
|
{
"blob_id": "9f2105d188ac32a9eef31b21065e9bda13a02995",
"index": 6735,
"step-1": "<mask token>\n\n\[email protected]_tag('tags/fieldsetForm.html')\ndef show_fieldsetform(form):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': True}\n\n\[email protected]_tag('tags/fieldsetForm.html')\ndef show_fieldsetform_nrf(form):\n \"\"\"\n Renders given form with required fields marked.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': False}\n\n\[email protected]_tag('tags/sendForm.html')\ndef show_sendform(form):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': True}\n\n\n<mask token>\n\n\[email protected]_tag('tags/accountForm.html')\ndef show_accountform(form, is_superuser):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': False, 'is_superuser':\n is_superuser}\n",
"step-2": "<mask token>\n\n\[email protected]_tag('tags/fieldsetForm.html')\ndef show_fieldsetform(form):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': True}\n\n\[email protected]_tag('tags/fieldsetForm.html')\ndef show_fieldsetform_nrf(form):\n \"\"\"\n Renders given form with required fields marked.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': False}\n\n\[email protected]_tag('tags/sendForm.html')\ndef show_sendform(form):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': True}\n\n\[email protected]_tag('tags/loginForm.html')\ndef show_loginform(form):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': True}\n\n\[email protected]_tag('tags/accountForm.html')\ndef show_accountform(form, is_superuser):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': False, 'is_superuser':\n is_superuser}\n",
"step-3": "<mask token>\nregister = template.Library()\n\n\[email protected]_tag('tags/fieldsetForm.html')\ndef show_fieldsetform(form):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': True}\n\n\[email protected]_tag('tags/fieldsetForm.html')\ndef show_fieldsetform_nrf(form):\n \"\"\"\n Renders given form with required fields marked.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': False}\n\n\[email protected]_tag('tags/sendForm.html')\ndef show_sendform(form):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': True}\n\n\[email protected]_tag('tags/loginForm.html')\ndef show_loginform(form):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': True}\n\n\[email protected]_tag('tags/accountForm.html')\ndef show_accountform(form, is_superuser):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': False, 'is_superuser':\n is_superuser}\n",
"step-4": "from django import template\nregister = template.Library()\n\n\[email protected]_tag('tags/fieldsetForm.html')\ndef show_fieldsetform(form):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': True}\n\n\[email protected]_tag('tags/fieldsetForm.html')\ndef show_fieldsetform_nrf(form):\n \"\"\"\n Renders given form with required fields marked.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': False}\n\n\[email protected]_tag('tags/sendForm.html')\ndef show_sendform(form):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': True}\n\n\[email protected]_tag('tags/loginForm.html')\ndef show_loginform(form):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': True}\n\n\[email protected]_tag('tags/accountForm.html')\ndef show_accountform(form, is_superuser):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': False, 'is_superuser':\n is_superuser}\n",
"step-5": "# -*- coding: utf-8 -*-\n# @COPYRIGHT_begin\n#\n# Copyright [2015] Michał Szczygieł, M4GiK Software\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# @COPYRIGHT_end\nfrom django import template\n\n\nregister = template.Library()\n\n\[email protected]_tag('tags/fieldsetForm.html')\ndef show_fieldsetform(form):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': True}\n\n\[email protected]_tag('tags/fieldsetForm.html')\ndef show_fieldsetform_nrf(form):\n \"\"\"\n Renders given form with required fields marked.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': False}\n\n\[email protected]_tag('tags/sendForm.html')\ndef show_sendform(form):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': True}\n\n\[email protected]_tag('tags/loginForm.html')\ndef show_loginform(form):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': True}\n\n\[email protected]_tag('tags/accountForm.html')\ndef show_accountform(form, is_superuser):\n \"\"\"\n Renders given form without marking required fields.\n @param form:\n @return:\n \"\"\"\n return {'form': form, 'required_fields': False, 'is_superuser': is_superuser}\n\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# -*- coding: utf-8 -*-
# !/usr/bin/env python3
import pathlib
from PIL import Image
if __name__ == '__main__':
img_path = (pathlib.Path('..') / 'images' / 'tiger.jpg').resolve()
# image load
with Image.open(str(img_path)) as img:
# image info
print('IMAGE: {}'.format(str(img_path)))
print('Image is in {} format'.format(img.format))
print('Image size: width {} pixels, height {} pixels'.format(img.size[0], img.size[1]))
print('Image color bands: {}'.format(img.mode))
# image display
img.show()
|
normal
|
{
"blob_id": "05edbf3662936465eee8eee0824d1a0cca0df0e5",
"index": 4855,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n img_path = (pathlib.Path('..') / 'images' / 'tiger.jpg').resolve()\n with Image.open(str(img_path)) as img:\n print('IMAGE: {}'.format(str(img_path)))\n print('Image is in {} format'.format(img.format))\n print('Image size: width {} pixels, height {} pixels'.format(img.\n size[0], img.size[1]))\n print('Image color bands: {}'.format(img.mode))\n img.show()\n",
"step-3": "import pathlib\nfrom PIL import Image\nif __name__ == '__main__':\n img_path = (pathlib.Path('..') / 'images' / 'tiger.jpg').resolve()\n with Image.open(str(img_path)) as img:\n print('IMAGE: {}'.format(str(img_path)))\n print('Image is in {} format'.format(img.format))\n print('Image size: width {} pixels, height {} pixels'.format(img.\n size[0], img.size[1]))\n print('Image color bands: {}'.format(img.mode))\n img.show()\n",
"step-4": "# -*- coding: utf-8 -*-\n# !/usr/bin/env python3\n\nimport pathlib\nfrom PIL import Image\n\n\nif __name__ == '__main__':\n\n img_path = (pathlib.Path('..') / 'images' / 'tiger.jpg').resolve()\n\n # image load\n with Image.open(str(img_path)) as img:\n # image info\n print('IMAGE: {}'.format(str(img_path)))\n print('Image is in {} format'.format(img.format))\n print('Image size: width {} pixels, height {} pixels'.format(img.size[0], img.size[1]))\n print('Image color bands: {}'.format(img.mode))\n # image display\n img.show()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# $Header: //depot/cs/s/ajax_support.wsgi#10 $
from werkzeug.wrappers import Response
from p.DRequest import DRequest
from db.Support import SupportSession
from db.Exceptions import DbError, SupportSessionExpired
import db.Db as Db
import db.Support
import cgi
import simplejson as json
def application(environ, start_response):
"""AJAX scripts for email templates."""
request = DRequest(environ)
resp = None
try :
Db.start_transaction()
form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)
args = form['args'].value
req = json.loads(args)
support = SupportSession(key=request.support_key())
handler = handlers[req['command']]
resp = Response(json.dumps(handler(request, req)))
Db.finish_transaction()
except SupportSessionExpired:
Db.cancel_transaction()
resp = Response(json.dumps({ 'Error': 'Session Expired' }))
except DbError as e:
Db.cancel_transaction()
resp = Response(json.dumps({ 'Error': e.args[0]}))
except Exception as e:
Db.cancel_transaction()
import traceback
traceback.print_exc()
resp = Response(json.dumps({ 'Error': "Internal Error"}))
request.cookie_freshen(resp)
resp.headers['content-type'] = 'application/json'
resp.headers['cache-control'] = 'no-cache, must-revalidate, no-store'
return resp(environ, start_response)
def get(request, req):
return db.Support.get_all()
def edit(request, req):
return db.Support.edit(req);
def delete(request, req):
return db.Support.delete(req['support_id'])
def add(request, req):
return db.Support.new()
handlers = { 'get': get, 'edit': edit, 'delete': delete, 'add': add }
|
normal
|
{
"blob_id": "be58862b66708c9de8cf7642c9de52ec744b079e",
"index": 805,
"step-1": "<mask token>\n\n\ndef application(environ, start_response):\n \"\"\"AJAX scripts for email templates.\"\"\"\n request = DRequest(environ)\n resp = None\n try:\n Db.start_transaction()\n form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)\n args = form['args'].value\n req = json.loads(args)\n support = SupportSession(key=request.support_key())\n handler = handlers[req['command']]\n resp = Response(json.dumps(handler(request, req)))\n Db.finish_transaction()\n except SupportSessionExpired:\n Db.cancel_transaction()\n resp = Response(json.dumps({'Error': 'Session Expired'}))\n except DbError as e:\n Db.cancel_transaction()\n resp = Response(json.dumps({'Error': e.args[0]}))\n except Exception as e:\n Db.cancel_transaction()\n import traceback\n traceback.print_exc()\n resp = Response(json.dumps({'Error': 'Internal Error'}))\n request.cookie_freshen(resp)\n resp.headers['content-type'] = 'application/json'\n resp.headers['cache-control'] = 'no-cache, must-revalidate, no-store'\n return resp(environ, start_response)\n\n\ndef get(request, req):\n return db.Support.get_all()\n\n\n<mask token>\n\n\ndef delete(request, req):\n return db.Support.delete(req['support_id'])\n\n\ndef add(request, req):\n return db.Support.new()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef application(environ, start_response):\n \"\"\"AJAX scripts for email templates.\"\"\"\n request = DRequest(environ)\n resp = None\n try:\n Db.start_transaction()\n form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)\n args = form['args'].value\n req = json.loads(args)\n support = SupportSession(key=request.support_key())\n handler = handlers[req['command']]\n resp = Response(json.dumps(handler(request, req)))\n Db.finish_transaction()\n except SupportSessionExpired:\n Db.cancel_transaction()\n resp = Response(json.dumps({'Error': 'Session Expired'}))\n except DbError as e:\n Db.cancel_transaction()\n resp = Response(json.dumps({'Error': e.args[0]}))\n except Exception as e:\n Db.cancel_transaction()\n import traceback\n traceback.print_exc()\n resp = Response(json.dumps({'Error': 'Internal Error'}))\n request.cookie_freshen(resp)\n resp.headers['content-type'] = 'application/json'\n resp.headers['cache-control'] = 'no-cache, must-revalidate, no-store'\n return resp(environ, start_response)\n\n\ndef get(request, req):\n return db.Support.get_all()\n\n\ndef edit(request, req):\n return db.Support.edit(req)\n\n\ndef delete(request, req):\n return db.Support.delete(req['support_id'])\n\n\ndef add(request, req):\n return db.Support.new()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef application(environ, start_response):\n \"\"\"AJAX scripts for email templates.\"\"\"\n request = DRequest(environ)\n resp = None\n try:\n Db.start_transaction()\n form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)\n args = form['args'].value\n req = json.loads(args)\n support = SupportSession(key=request.support_key())\n handler = handlers[req['command']]\n resp = Response(json.dumps(handler(request, req)))\n Db.finish_transaction()\n except SupportSessionExpired:\n Db.cancel_transaction()\n resp = Response(json.dumps({'Error': 'Session Expired'}))\n except DbError as e:\n Db.cancel_transaction()\n resp = Response(json.dumps({'Error': e.args[0]}))\n except Exception as e:\n Db.cancel_transaction()\n import traceback\n traceback.print_exc()\n resp = Response(json.dumps({'Error': 'Internal Error'}))\n request.cookie_freshen(resp)\n resp.headers['content-type'] = 'application/json'\n resp.headers['cache-control'] = 'no-cache, must-revalidate, no-store'\n return resp(environ, start_response)\n\n\ndef get(request, req):\n return db.Support.get_all()\n\n\ndef edit(request, req):\n return db.Support.edit(req)\n\n\ndef delete(request, req):\n return db.Support.delete(req['support_id'])\n\n\ndef add(request, req):\n return db.Support.new()\n\n\nhandlers = {'get': get, 'edit': edit, 'delete': delete, 'add': add}\n",
"step-4": "from werkzeug.wrappers import Response\nfrom p.DRequest import DRequest\nfrom db.Support import SupportSession\nfrom db.Exceptions import DbError, SupportSessionExpired\nimport db.Db as Db\nimport db.Support\nimport cgi\nimport simplejson as json\n\n\ndef application(environ, start_response):\n \"\"\"AJAX scripts for email templates.\"\"\"\n request = DRequest(environ)\n resp = None\n try:\n Db.start_transaction()\n form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)\n args = form['args'].value\n req = json.loads(args)\n support = SupportSession(key=request.support_key())\n handler = handlers[req['command']]\n resp = Response(json.dumps(handler(request, req)))\n Db.finish_transaction()\n except SupportSessionExpired:\n Db.cancel_transaction()\n resp = Response(json.dumps({'Error': 'Session Expired'}))\n except DbError as e:\n Db.cancel_transaction()\n resp = Response(json.dumps({'Error': e.args[0]}))\n except Exception as e:\n Db.cancel_transaction()\n import traceback\n traceback.print_exc()\n resp = Response(json.dumps({'Error': 'Internal Error'}))\n request.cookie_freshen(resp)\n resp.headers['content-type'] = 'application/json'\n resp.headers['cache-control'] = 'no-cache, must-revalidate, no-store'\n return resp(environ, start_response)\n\n\ndef get(request, req):\n return db.Support.get_all()\n\n\ndef edit(request, req):\n return db.Support.edit(req)\n\n\ndef delete(request, req):\n return db.Support.delete(req['support_id'])\n\n\ndef add(request, req):\n return db.Support.new()\n\n\nhandlers = {'get': get, 'edit': edit, 'delete': delete, 'add': add}\n",
"step-5": "# $Header: //depot/cs/s/ajax_support.wsgi#10 $\nfrom werkzeug.wrappers import Response\nfrom p.DRequest import DRequest\nfrom db.Support import SupportSession\nfrom db.Exceptions import DbError, SupportSessionExpired\nimport db.Db as Db\nimport db.Support\n\nimport cgi\nimport simplejson as json\n\ndef application(environ, start_response):\n \"\"\"AJAX scripts for email templates.\"\"\"\n\n request = DRequest(environ)\n\n resp = None\n\n try :\n Db.start_transaction()\n form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)\n args = form['args'].value\n req = json.loads(args)\n\n support = SupportSession(key=request.support_key())\n handler = handlers[req['command']]\n resp = Response(json.dumps(handler(request, req)))\n Db.finish_transaction()\n\n except SupportSessionExpired:\n Db.cancel_transaction()\n resp = Response(json.dumps({ 'Error': 'Session Expired' }))\n except DbError as e:\n Db.cancel_transaction()\n resp = Response(json.dumps({ 'Error': e.args[0]}))\n except Exception as e:\n Db.cancel_transaction()\n import traceback\n traceback.print_exc()\n resp = Response(json.dumps({ 'Error': \"Internal Error\"}))\n\n request.cookie_freshen(resp)\n resp.headers['content-type'] = 'application/json'\n resp.headers['cache-control'] = 'no-cache, must-revalidate, no-store'\n return resp(environ, start_response)\n\n\ndef get(request, req):\n return db.Support.get_all()\n\ndef edit(request, req):\n return db.Support.edit(req);\n\ndef delete(request, req):\n return db.Support.delete(req['support_id'])\n\ndef add(request, req):\n return db.Support.new()\n\n\nhandlers = { 'get': get, 'edit': edit, 'delete': delete, 'add': add }\n\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import tensorflow as tf
from tensorflow.python.framework import graph_util
from net import siameseNet_batchnorm as siameseNet
import dataset
import numpy as np
import cv2
import os
batch_size=64
input_height=32
input_width=32
total_epoch_num=50
snapshot=100
support_image_extensions=[".jpg",".png",".jpeg",".bmp"]
margin=1.0
channals=3
train_image_root="D:/forTensorflow/charRecTrain/forMyDNNCode/train"
test_image_root="D:/forTensorflow/charRecTrain/forMyDNNCode/test"
model_path="models/"
pb_path=os.path.join(model_path,"pb/")
ckpt_path=os.path.join(model_path,"ckpt/")
if not os.path.exists(pb_path):
os.makedirs(pb_path)
if not os.path.exists(ckpt_path):
os.makedirs(ckpt_path)
model_name="siamese_triplet_28out_allloss_bn"
if __name__ == '__main__':
# image_paths,labels=get_images_path(test_image_root)
# data=next_batch(True,None,image_paths,labels)
# for left,right,label in zip(*data):
# cv2.imshow("left",left)
# cv2.imshow("right", right)
# print(label)
# cv2.waitKey(0)
first_shape=None
anchor_placeholder = tf.placeholder(tf.float32,shape=[first_shape,input_height,input_width,channals],name="anchor")
similar_placeholder = tf.placeholder(tf.float32, shape=[first_shape, input_height, input_width, channals], name="similar")
dissimilar_placeholder = tf.placeholder(tf.float32, shape=[first_shape, input_height, input_width, channals], name="dissimilar")
labels_placeholder = tf.placeholder(tf.float32, shape=
[None if first_shape is None else first_shape * 3, ], name="labels")
is_training_placeholder = tf.placeholder_with_default(False, shape=(), name="is_training")
siamese_net=siameseNet.siameseNet()
anchor = siamese_net.inference(anchor_placeholder,reuse=False,is_training=is_training_placeholder)
similar = siamese_net.inference(similar_placeholder,reuse=True,is_training=is_training_placeholder)
dissimilar = siamese_net.inference(dissimilar_placeholder,reuse=True,is_training=is_training_placeholder)
loss,pos_dist,neg_dist = siamese_net.loss(anchor,similar,dissimilar,labels_placeholder,margin)
flatten_out_anchor = tf.identity(anchor, name="flatten_anchor")
flatten_out_similar = tf.identity(similar, name="flatten_similar")
flatten_out_dissimilar = tf.identity(dissimilar, name="flatten_dissimilar")
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
global_step = tf.Variable(0, trainable=False)
# learning_rate = tf.train.exponential_decay(0.01, global_step, 100, 0.9)
# optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9)
with tf.control_dependencies([tf.group(*update_ops)]):
# train_step = optimizer.minimize(loss, global_step)
train_step = tf.train.MomentumOptimizer(0.01, 0.90).\
minimize(loss, global_step=global_step)
var_list = tf.trainable_variables()
if global_step is not None:
var_list.append(global_step)
g_list = tf.global_variables() # 从全局变量中获得batch norm的缩放和偏差
bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name]
bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name]
var_list += bn_moving_vars
ckpt_saver = tf.train.Saver()
train_dataset = dataset.dataset(train_image_root,batch_size,support_image_extensions,
input_height,input_width,channals)
test_dataset = dataset.dataset(test_image_root, batch_size, support_image_extensions,
input_height, input_width, channals)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# if os.path.exists(os.path.join(ckpt_path, "checkpoint")):
# ckpt_saver.restore(sess, tf.train.latest_checkpoint(ckpt_path))
total_iters_num = 0
for epoch_num in range(total_epoch_num):
train_images_num = train_dataset.sample_len
cur_epoch_iters_num = train_images_num // batch_size
for iters_num in range(cur_epoch_iters_num):
train_anchor, train_similar, train_dissimilar,train_labels = \
train_dataset.next_triplet_batch()
test_anchor, test_similar, test_dissimilar,test_labels = \
test_dataset.next_triplet_batch()
if train_anchor is None or test_anchor is None:
continue
train_dict = {anchor_placeholder: train_anchor,
similar_placeholder: train_similar,
dissimilar_placeholder: train_dissimilar,
labels_placeholder:train_labels,
is_training_placeholder:True}
test_dict = {anchor_placeholder: test_anchor,
similar_placeholder: test_similar,
dissimilar_placeholder: test_dissimilar,
labels_placeholder:test_labels,
is_training_placeholder: False}
_,_global_step=sess.run([train_step,global_step], feed_dict=train_dict)
anchor_out,similar_out,dissimilar_out = sess.run([
flatten_out_anchor,flatten_out_similar,flatten_out_dissimilar],
feed_dict=train_dict)
_train_loss,_train_pos_dist,_train_neg_dist = \
sess.run([loss,pos_dist,neg_dist], feed_dict=train_dict)
_test_loss,_test_pos_dist,_test_neg_dist =\
sess.run([loss,pos_dist,neg_dist], feed_dict=test_dict)
print("distance:",list(zip(_train_pos_dist.flatten(),_train_neg_dist.flatten()))[:5])
one_moving_meaning_show = "No mean or variance"
if len(bn_moving_vars) > 0:
one_moving_meaning = sess.graph.get_tensor_by_name(bn_moving_vars[0].name)
one_moving_meaning_show = "{}={}".\
format(bn_moving_vars[0].name,np.mean(one_moving_meaning.eval()))
print(one_moving_meaning_show)
show_text = "epoch:{},epoch-iters:{},total-iters:{},loss:{},lr:{},val:{}".format \
(epoch_num, iters_num + 1, _global_step, _train_loss, "0.99", _test_loss)
print(show_text)
if _global_step % snapshot == 0:
# 保存PB
constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, ["flatten_anchor"])
save_model_name=model_name + "-" + str(_global_step) + ".pb"
with tf.gfile.FastGFile(pb_path + save_model_name, mode="wb") as fw:
fw.write(constant_graph.SerializeToString())
# 保存CKPT
ckpt_saver.save(sess, ckpt_path + model_name + ".ckpt", global_step=total_iters_num)
print("Successfully saved model {}".format(save_model_name))
|
normal
|
{
"blob_id": "97bbb181cbc0f5bfbf0b2298133fc226b6217d91",
"index": 399,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif not os.path.exists(pb_path):\n os.makedirs(pb_path)\nif not os.path.exists(ckpt_path):\n os.makedirs(ckpt_path)\n<mask token>\nif __name__ == '__main__':\n first_shape = None\n anchor_placeholder = tf.placeholder(tf.float32, shape=[first_shape,\n input_height, input_width, channals], name='anchor')\n similar_placeholder = tf.placeholder(tf.float32, shape=[first_shape,\n input_height, input_width, channals], name='similar')\n dissimilar_placeholder = tf.placeholder(tf.float32, shape=[first_shape,\n input_height, input_width, channals], name='dissimilar')\n labels_placeholder = tf.placeholder(tf.float32, shape=[None if \n first_shape is None else first_shape * 3], name='labels')\n is_training_placeholder = tf.placeholder_with_default(False, shape=(),\n name='is_training')\n siamese_net = siameseNet.siameseNet()\n anchor = siamese_net.inference(anchor_placeholder, reuse=False,\n is_training=is_training_placeholder)\n similar = siamese_net.inference(similar_placeholder, reuse=True,\n is_training=is_training_placeholder)\n dissimilar = siamese_net.inference(dissimilar_placeholder, reuse=True,\n is_training=is_training_placeholder)\n loss, pos_dist, neg_dist = siamese_net.loss(anchor, similar, dissimilar,\n labels_placeholder, margin)\n flatten_out_anchor = tf.identity(anchor, name='flatten_anchor')\n flatten_out_similar = tf.identity(similar, name='flatten_similar')\n flatten_out_dissimilar = tf.identity(dissimilar, name='flatten_dissimilar')\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n global_step = tf.Variable(0, trainable=False)\n with tf.control_dependencies([tf.group(*update_ops)]):\n train_step = tf.train.MomentumOptimizer(0.01, 0.9).minimize(loss,\n global_step=global_step)\n var_list = tf.trainable_variables()\n if global_step is not None:\n var_list.append(global_step)\n g_list = tf.global_variables()\n bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name]\n bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name]\n var_list += bn_moving_vars\n ckpt_saver = tf.train.Saver()\n train_dataset = dataset.dataset(train_image_root, batch_size,\n support_image_extensions, input_height, input_width, channals)\n test_dataset = dataset.dataset(test_image_root, batch_size,\n support_image_extensions, input_height, input_width, channals)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n total_iters_num = 0\n for epoch_num in range(total_epoch_num):\n train_images_num = train_dataset.sample_len\n cur_epoch_iters_num = train_images_num // batch_size\n for iters_num in range(cur_epoch_iters_num):\n (train_anchor, train_similar, train_dissimilar, train_labels\n ) = train_dataset.next_triplet_batch()\n test_anchor, test_similar, test_dissimilar, test_labels = (\n test_dataset.next_triplet_batch())\n if train_anchor is None or test_anchor is None:\n continue\n train_dict = {anchor_placeholder: train_anchor,\n similar_placeholder: train_similar,\n dissimilar_placeholder: train_dissimilar,\n labels_placeholder: train_labels,\n is_training_placeholder: True}\n test_dict = {anchor_placeholder: test_anchor,\n similar_placeholder: test_similar,\n dissimilar_placeholder: test_dissimilar,\n labels_placeholder: test_labels,\n is_training_placeholder: False}\n _, _global_step = sess.run([train_step, global_step],\n feed_dict=train_dict)\n anchor_out, similar_out, dissimilar_out = sess.run([\n flatten_out_anchor, flatten_out_similar,\n flatten_out_dissimilar], feed_dict=train_dict)\n _train_loss, _train_pos_dist, _train_neg_dist = sess.run([\n loss, pos_dist, neg_dist], feed_dict=train_dict)\n _test_loss, _test_pos_dist, _test_neg_dist = sess.run([loss,\n pos_dist, neg_dist], feed_dict=test_dict)\n print('distance:', list(zip(_train_pos_dist.flatten(),\n _train_neg_dist.flatten()))[:5])\n one_moving_meaning_show = 'No mean or variance'\n if len(bn_moving_vars) > 0:\n one_moving_meaning = sess.graph.get_tensor_by_name(\n bn_moving_vars[0].name)\n one_moving_meaning_show = '{}={}'.format(bn_moving_vars\n [0].name, np.mean(one_moving_meaning.eval()))\n print(one_moving_meaning_show)\n show_text = (\n 'epoch:{},epoch-iters:{},total-iters:{},loss:{},lr:{},val:{}'\n .format(epoch_num, iters_num + 1, _global_step,\n _train_loss, '0.99', _test_loss))\n print(show_text)\n if _global_step % snapshot == 0:\n constant_graph = graph_util.convert_variables_to_constants(\n sess, sess.graph_def, ['flatten_anchor'])\n save_model_name = model_name + '-' + str(_global_step\n ) + '.pb'\n with tf.gfile.FastGFile(pb_path + save_model_name, mode\n ='wb') as fw:\n fw.write(constant_graph.SerializeToString())\n ckpt_saver.save(sess, ckpt_path + model_name + '.ckpt',\n global_step=total_iters_num)\n print('Successfully saved model {}'.format(save_model_name)\n )\n",
"step-3": "<mask token>\nbatch_size = 64\ninput_height = 32\ninput_width = 32\ntotal_epoch_num = 50\nsnapshot = 100\nsupport_image_extensions = ['.jpg', '.png', '.jpeg', '.bmp']\nmargin = 1.0\nchannals = 3\ntrain_image_root = 'D:/forTensorflow/charRecTrain/forMyDNNCode/train'\ntest_image_root = 'D:/forTensorflow/charRecTrain/forMyDNNCode/test'\nmodel_path = 'models/'\npb_path = os.path.join(model_path, 'pb/')\nckpt_path = os.path.join(model_path, 'ckpt/')\nif not os.path.exists(pb_path):\n os.makedirs(pb_path)\nif not os.path.exists(ckpt_path):\n os.makedirs(ckpt_path)\nmodel_name = 'siamese_triplet_28out_allloss_bn'\nif __name__ == '__main__':\n first_shape = None\n anchor_placeholder = tf.placeholder(tf.float32, shape=[first_shape,\n input_height, input_width, channals], name='anchor')\n similar_placeholder = tf.placeholder(tf.float32, shape=[first_shape,\n input_height, input_width, channals], name='similar')\n dissimilar_placeholder = tf.placeholder(tf.float32, shape=[first_shape,\n input_height, input_width, channals], name='dissimilar')\n labels_placeholder = tf.placeholder(tf.float32, shape=[None if \n first_shape is None else first_shape * 3], name='labels')\n is_training_placeholder = tf.placeholder_with_default(False, shape=(),\n name='is_training')\n siamese_net = siameseNet.siameseNet()\n anchor = siamese_net.inference(anchor_placeholder, reuse=False,\n is_training=is_training_placeholder)\n similar = siamese_net.inference(similar_placeholder, reuse=True,\n is_training=is_training_placeholder)\n dissimilar = siamese_net.inference(dissimilar_placeholder, reuse=True,\n is_training=is_training_placeholder)\n loss, pos_dist, neg_dist = siamese_net.loss(anchor, similar, dissimilar,\n labels_placeholder, margin)\n flatten_out_anchor = tf.identity(anchor, name='flatten_anchor')\n flatten_out_similar = tf.identity(similar, name='flatten_similar')\n flatten_out_dissimilar = tf.identity(dissimilar, name='flatten_dissimilar')\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n global_step = tf.Variable(0, trainable=False)\n with tf.control_dependencies([tf.group(*update_ops)]):\n train_step = tf.train.MomentumOptimizer(0.01, 0.9).minimize(loss,\n global_step=global_step)\n var_list = tf.trainable_variables()\n if global_step is not None:\n var_list.append(global_step)\n g_list = tf.global_variables()\n bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name]\n bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name]\n var_list += bn_moving_vars\n ckpt_saver = tf.train.Saver()\n train_dataset = dataset.dataset(train_image_root, batch_size,\n support_image_extensions, input_height, input_width, channals)\n test_dataset = dataset.dataset(test_image_root, batch_size,\n support_image_extensions, input_height, input_width, channals)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n total_iters_num = 0\n for epoch_num in range(total_epoch_num):\n train_images_num = train_dataset.sample_len\n cur_epoch_iters_num = train_images_num // batch_size\n for iters_num in range(cur_epoch_iters_num):\n (train_anchor, train_similar, train_dissimilar, train_labels\n ) = train_dataset.next_triplet_batch()\n test_anchor, test_similar, test_dissimilar, test_labels = (\n test_dataset.next_triplet_batch())\n if train_anchor is None or test_anchor is None:\n continue\n train_dict = {anchor_placeholder: train_anchor,\n similar_placeholder: train_similar,\n dissimilar_placeholder: train_dissimilar,\n labels_placeholder: train_labels,\n is_training_placeholder: True}\n test_dict = {anchor_placeholder: test_anchor,\n similar_placeholder: test_similar,\n dissimilar_placeholder: test_dissimilar,\n labels_placeholder: test_labels,\n is_training_placeholder: False}\n _, _global_step = sess.run([train_step, global_step],\n feed_dict=train_dict)\n anchor_out, similar_out, dissimilar_out = sess.run([\n flatten_out_anchor, flatten_out_similar,\n flatten_out_dissimilar], feed_dict=train_dict)\n _train_loss, _train_pos_dist, _train_neg_dist = sess.run([\n loss, pos_dist, neg_dist], feed_dict=train_dict)\n _test_loss, _test_pos_dist, _test_neg_dist = sess.run([loss,\n pos_dist, neg_dist], feed_dict=test_dict)\n print('distance:', list(zip(_train_pos_dist.flatten(),\n _train_neg_dist.flatten()))[:5])\n one_moving_meaning_show = 'No mean or variance'\n if len(bn_moving_vars) > 0:\n one_moving_meaning = sess.graph.get_tensor_by_name(\n bn_moving_vars[0].name)\n one_moving_meaning_show = '{}={}'.format(bn_moving_vars\n [0].name, np.mean(one_moving_meaning.eval()))\n print(one_moving_meaning_show)\n show_text = (\n 'epoch:{},epoch-iters:{},total-iters:{},loss:{},lr:{},val:{}'\n .format(epoch_num, iters_num + 1, _global_step,\n _train_loss, '0.99', _test_loss))\n print(show_text)\n if _global_step % snapshot == 0:\n constant_graph = graph_util.convert_variables_to_constants(\n sess, sess.graph_def, ['flatten_anchor'])\n save_model_name = model_name + '-' + str(_global_step\n ) + '.pb'\n with tf.gfile.FastGFile(pb_path + save_model_name, mode\n ='wb') as fw:\n fw.write(constant_graph.SerializeToString())\n ckpt_saver.save(sess, ckpt_path + model_name + '.ckpt',\n global_step=total_iters_num)\n print('Successfully saved model {}'.format(save_model_name)\n )\n",
"step-4": "import tensorflow as tf\nfrom tensorflow.python.framework import graph_util\nfrom net import siameseNet_batchnorm as siameseNet\nimport dataset\nimport numpy as np\nimport cv2\nimport os\nbatch_size = 64\ninput_height = 32\ninput_width = 32\ntotal_epoch_num = 50\nsnapshot = 100\nsupport_image_extensions = ['.jpg', '.png', '.jpeg', '.bmp']\nmargin = 1.0\nchannals = 3\ntrain_image_root = 'D:/forTensorflow/charRecTrain/forMyDNNCode/train'\ntest_image_root = 'D:/forTensorflow/charRecTrain/forMyDNNCode/test'\nmodel_path = 'models/'\npb_path = os.path.join(model_path, 'pb/')\nckpt_path = os.path.join(model_path, 'ckpt/')\nif not os.path.exists(pb_path):\n os.makedirs(pb_path)\nif not os.path.exists(ckpt_path):\n os.makedirs(ckpt_path)\nmodel_name = 'siamese_triplet_28out_allloss_bn'\nif __name__ == '__main__':\n first_shape = None\n anchor_placeholder = tf.placeholder(tf.float32, shape=[first_shape,\n input_height, input_width, channals], name='anchor')\n similar_placeholder = tf.placeholder(tf.float32, shape=[first_shape,\n input_height, input_width, channals], name='similar')\n dissimilar_placeholder = tf.placeholder(tf.float32, shape=[first_shape,\n input_height, input_width, channals], name='dissimilar')\n labels_placeholder = tf.placeholder(tf.float32, shape=[None if \n first_shape is None else first_shape * 3], name='labels')\n is_training_placeholder = tf.placeholder_with_default(False, shape=(),\n name='is_training')\n siamese_net = siameseNet.siameseNet()\n anchor = siamese_net.inference(anchor_placeholder, reuse=False,\n is_training=is_training_placeholder)\n similar = siamese_net.inference(similar_placeholder, reuse=True,\n is_training=is_training_placeholder)\n dissimilar = siamese_net.inference(dissimilar_placeholder, reuse=True,\n is_training=is_training_placeholder)\n loss, pos_dist, neg_dist = siamese_net.loss(anchor, similar, dissimilar,\n labels_placeholder, margin)\n flatten_out_anchor = tf.identity(anchor, name='flatten_anchor')\n flatten_out_similar = tf.identity(similar, name='flatten_similar')\n flatten_out_dissimilar = tf.identity(dissimilar, name='flatten_dissimilar')\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n global_step = tf.Variable(0, trainable=False)\n with tf.control_dependencies([tf.group(*update_ops)]):\n train_step = tf.train.MomentumOptimizer(0.01, 0.9).minimize(loss,\n global_step=global_step)\n var_list = tf.trainable_variables()\n if global_step is not None:\n var_list.append(global_step)\n g_list = tf.global_variables()\n bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name]\n bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name]\n var_list += bn_moving_vars\n ckpt_saver = tf.train.Saver()\n train_dataset = dataset.dataset(train_image_root, batch_size,\n support_image_extensions, input_height, input_width, channals)\n test_dataset = dataset.dataset(test_image_root, batch_size,\n support_image_extensions, input_height, input_width, channals)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n total_iters_num = 0\n for epoch_num in range(total_epoch_num):\n train_images_num = train_dataset.sample_len\n cur_epoch_iters_num = train_images_num // batch_size\n for iters_num in range(cur_epoch_iters_num):\n (train_anchor, train_similar, train_dissimilar, train_labels\n ) = train_dataset.next_triplet_batch()\n test_anchor, test_similar, test_dissimilar, test_labels = (\n test_dataset.next_triplet_batch())\n if train_anchor is None or test_anchor is None:\n continue\n train_dict = {anchor_placeholder: train_anchor,\n similar_placeholder: train_similar,\n dissimilar_placeholder: train_dissimilar,\n labels_placeholder: train_labels,\n is_training_placeholder: True}\n test_dict = {anchor_placeholder: test_anchor,\n similar_placeholder: test_similar,\n dissimilar_placeholder: test_dissimilar,\n labels_placeholder: test_labels,\n is_training_placeholder: False}\n _, _global_step = sess.run([train_step, global_step],\n feed_dict=train_dict)\n anchor_out, similar_out, dissimilar_out = sess.run([\n flatten_out_anchor, flatten_out_similar,\n flatten_out_dissimilar], feed_dict=train_dict)\n _train_loss, _train_pos_dist, _train_neg_dist = sess.run([\n loss, pos_dist, neg_dist], feed_dict=train_dict)\n _test_loss, _test_pos_dist, _test_neg_dist = sess.run([loss,\n pos_dist, neg_dist], feed_dict=test_dict)\n print('distance:', list(zip(_train_pos_dist.flatten(),\n _train_neg_dist.flatten()))[:5])\n one_moving_meaning_show = 'No mean or variance'\n if len(bn_moving_vars) > 0:\n one_moving_meaning = sess.graph.get_tensor_by_name(\n bn_moving_vars[0].name)\n one_moving_meaning_show = '{}={}'.format(bn_moving_vars\n [0].name, np.mean(one_moving_meaning.eval()))\n print(one_moving_meaning_show)\n show_text = (\n 'epoch:{},epoch-iters:{},total-iters:{},loss:{},lr:{},val:{}'\n .format(epoch_num, iters_num + 1, _global_step,\n _train_loss, '0.99', _test_loss))\n print(show_text)\n if _global_step % snapshot == 0:\n constant_graph = graph_util.convert_variables_to_constants(\n sess, sess.graph_def, ['flatten_anchor'])\n save_model_name = model_name + '-' + str(_global_step\n ) + '.pb'\n with tf.gfile.FastGFile(pb_path + save_model_name, mode\n ='wb') as fw:\n fw.write(constant_graph.SerializeToString())\n ckpt_saver.save(sess, ckpt_path + model_name + '.ckpt',\n global_step=total_iters_num)\n print('Successfully saved model {}'.format(save_model_name)\n )\n",
"step-5": "import tensorflow as tf\nfrom tensorflow.python.framework import graph_util\nfrom net import siameseNet_batchnorm as siameseNet\nimport dataset\nimport numpy as np\nimport cv2\nimport os\n\nbatch_size=64\ninput_height=32\ninput_width=32\ntotal_epoch_num=50\nsnapshot=100\nsupport_image_extensions=[\".jpg\",\".png\",\".jpeg\",\".bmp\"]\nmargin=1.0\nchannals=3\n\ntrain_image_root=\"D:/forTensorflow/charRecTrain/forMyDNNCode/train\"\ntest_image_root=\"D:/forTensorflow/charRecTrain/forMyDNNCode/test\"\n\nmodel_path=\"models/\"\npb_path=os.path.join(model_path,\"pb/\")\nckpt_path=os.path.join(model_path,\"ckpt/\")\n\nif not os.path.exists(pb_path):\n os.makedirs(pb_path)\nif not os.path.exists(ckpt_path):\n os.makedirs(ckpt_path)\nmodel_name=\"siamese_triplet_28out_allloss_bn\"\n\nif __name__ == '__main__':\n # image_paths,labels=get_images_path(test_image_root)\n # data=next_batch(True,None,image_paths,labels)\n # for left,right,label in zip(*data):\n # cv2.imshow(\"left\",left)\n # cv2.imshow(\"right\", right)\n # print(label)\n # cv2.waitKey(0)\n\n first_shape=None\n anchor_placeholder = tf.placeholder(tf.float32,shape=[first_shape,input_height,input_width,channals],name=\"anchor\")\n similar_placeholder = tf.placeholder(tf.float32, shape=[first_shape, input_height, input_width, channals], name=\"similar\")\n dissimilar_placeholder = tf.placeholder(tf.float32, shape=[first_shape, input_height, input_width, channals], name=\"dissimilar\")\n labels_placeholder = tf.placeholder(tf.float32, shape=\n [None if first_shape is None else first_shape * 3, ], name=\"labels\")\n is_training_placeholder = tf.placeholder_with_default(False, shape=(), name=\"is_training\")\n siamese_net=siameseNet.siameseNet()\n\n anchor = siamese_net.inference(anchor_placeholder,reuse=False,is_training=is_training_placeholder)\n similar = siamese_net.inference(similar_placeholder,reuse=True,is_training=is_training_placeholder)\n dissimilar = siamese_net.inference(dissimilar_placeholder,reuse=True,is_training=is_training_placeholder)\n loss,pos_dist,neg_dist = siamese_net.loss(anchor,similar,dissimilar,labels_placeholder,margin)\n\n flatten_out_anchor = tf.identity(anchor, name=\"flatten_anchor\")\n flatten_out_similar = tf.identity(similar, name=\"flatten_similar\")\n flatten_out_dissimilar = tf.identity(dissimilar, name=\"flatten_dissimilar\")\n\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n global_step = tf.Variable(0, trainable=False)\n # learning_rate = tf.train.exponential_decay(0.01, global_step, 100, 0.9)\n # optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9)\n\n with tf.control_dependencies([tf.group(*update_ops)]):\n # train_step = optimizer.minimize(loss, global_step)\n train_step = tf.train.MomentumOptimizer(0.01, 0.90).\\\n minimize(loss, global_step=global_step)\n\n var_list = tf.trainable_variables()\n if global_step is not None:\n var_list.append(global_step)\n g_list = tf.global_variables() # 从全局变量中获得batch norm的缩放和偏差\n bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name]\n bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name]\n var_list += bn_moving_vars\n\n ckpt_saver = tf.train.Saver()\n train_dataset = dataset.dataset(train_image_root,batch_size,support_image_extensions,\n input_height,input_width,channals)\n\n test_dataset = dataset.dataset(test_image_root, batch_size, support_image_extensions,\n input_height, input_width, channals)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n # if os.path.exists(os.path.join(ckpt_path, \"checkpoint\")):\n # ckpt_saver.restore(sess, tf.train.latest_checkpoint(ckpt_path))\n\n total_iters_num = 0\n for epoch_num in range(total_epoch_num):\n\n train_images_num = train_dataset.sample_len\n cur_epoch_iters_num = train_images_num // batch_size\n for iters_num in range(cur_epoch_iters_num):\n\n train_anchor, train_similar, train_dissimilar,train_labels = \\\n train_dataset.next_triplet_batch()\n test_anchor, test_similar, test_dissimilar,test_labels = \\\n test_dataset.next_triplet_batch()\n\n if train_anchor is None or test_anchor is None:\n continue\n train_dict = {anchor_placeholder: train_anchor,\n similar_placeholder: train_similar,\n dissimilar_placeholder: train_dissimilar,\n\t\t\t\t\t\t\t labels_placeholder:train_labels,\n is_training_placeholder:True}\n test_dict = {anchor_placeholder: test_anchor,\n similar_placeholder: test_similar,\n dissimilar_placeholder: test_dissimilar,\n\t\t\t\t\t\t\t labels_placeholder:test_labels,\n is_training_placeholder: False}\n _,_global_step=sess.run([train_step,global_step], feed_dict=train_dict)\n\n anchor_out,similar_out,dissimilar_out = sess.run([\n flatten_out_anchor,flatten_out_similar,flatten_out_dissimilar],\n feed_dict=train_dict)\n\n _train_loss,_train_pos_dist,_train_neg_dist = \\\n sess.run([loss,pos_dist,neg_dist], feed_dict=train_dict)\n _test_loss,_test_pos_dist,_test_neg_dist =\\\n sess.run([loss,pos_dist,neg_dist], feed_dict=test_dict)\n\n print(\"distance:\",list(zip(_train_pos_dist.flatten(),_train_neg_dist.flatten()))[:5])\n one_moving_meaning_show = \"No mean or variance\"\n if len(bn_moving_vars) > 0:\n one_moving_meaning = sess.graph.get_tensor_by_name(bn_moving_vars[0].name)\n one_moving_meaning_show = \"{}={}\".\\\n format(bn_moving_vars[0].name,np.mean(one_moving_meaning.eval()))\n\n print(one_moving_meaning_show)\n show_text = \"epoch:{},epoch-iters:{},total-iters:{},loss:{},lr:{},val:{}\".format \\\n (epoch_num, iters_num + 1, _global_step, _train_loss, \"0.99\", _test_loss)\n print(show_text)\n\n if _global_step % snapshot == 0:\n # 保存PB\n constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, [\"flatten_anchor\"])\n save_model_name=model_name + \"-\" + str(_global_step) + \".pb\"\n with tf.gfile.FastGFile(pb_path + save_model_name, mode=\"wb\") as fw:\n fw.write(constant_graph.SerializeToString())\n # 保存CKPT\n ckpt_saver.save(sess, ckpt_path + model_name + \".ckpt\", global_step=total_iters_num)\n print(\"Successfully saved model {}\".format(save_model_name))\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def koodrinate(kraj, kraji):
for ime, x, y in kraji:
if ime == kraj:
return x, y
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def koodrinate(kraj, kraji):
for ime, x, y in kraji:
if ime == kraj:
return x, y
<|reserved_special_token_0|>
def koodrinate(kraj, kraji):
return kraji.get(kraj)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def koodrinate(kraj, kraji):
for ime, x, y in kraji:
if ime == kraj:
return x, y
<|reserved_special_token_0|>
def koodrinate(kraj, kraji):
return kraji.get(kraj)
<|reserved_special_token_0|>
for c in napis:
vsota += vrednost.get(c, 0)
print(sum(vrednost.get(c, 0) for c in napis))
for c in napis:
if c == 'I':
vsota += 1
elif c == 'V':
vsota += 5
elif c == 'X':
vsota += 10
elif c == 'L':
vsota += 50
elif c == 'C':
vsota += 100
elif c == 'D':
vsota += 500
elif c == 'M':
vsota += 1000
<|reserved_special_token_1|>
def koodrinate(kraj, kraji):
for ime, x, y in kraji:
if ime == kraj:
return x, y
kraji = {'Brežice': (68.66, 7.04), 'Lenart': (85.2, 78.75), 'Rateče': (-
65.04, 70.04), 'Ljutomer': (111.26, 71.82), 'Rogaška Slatina': (71.0,
42.0), 'Ribnica': (7.1, -10.5), 'Dutovlje': (-56.8, -6.93), 'Lokve': (-
57.94, 19.32), 'Vinica': (43.81, -38.43), 'Brtonigla': (-71.0, -47.25),
'Kanal': (-71.0, 26.25), 'Črnomelj': (39.05, -27.93), 'Trbovlje': (
29.61, 35.07), 'Beltinci': (114.81, 80.54), 'Domžale': (-2.34, 31.5)}
def koodrinate(kraj, kraji):
return kraji.get(kraj)
napis = 'KRNEKI'
vrednost = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
vsota = 0
for c in napis:
vsota += vrednost.get(c, 0)
print(sum(vrednost.get(c, 0) for c in napis))
for c in napis:
if c == 'I':
vsota += 1
elif c == 'V':
vsota += 5
elif c == 'X':
vsota += 10
elif c == 'L':
vsota += 50
elif c == 'C':
vsota += 100
elif c == 'D':
vsota += 500
elif c == 'M':
vsota += 1000
<|reserved_special_token_1|>
def koodrinate(kraj, kraji):
for ime, x, y in kraji:
if ime == kraj:
return x, y
kraji = {
'Brežice': (68.66, 7.04),
'Lenart': (85.20, 78.75),
'Rateče': (-65.04, 70.04),
'Ljutomer': (111.26, 71.82),
'Rogaška Slatina': (71.00, 42.00),
'Ribnica': (7.10, -10.50),
'Dutovlje': (-56.80, -6.93),
'Lokve': (-57.94, 19.32),
'Vinica': (43.81, -38.43),
'Brtonigla': (-71.00, -47.25),
'Kanal': (-71.00, 26.25),
'Črnomelj': (39.05, -27.93),
'Trbovlje': (29.61, 35.07),
'Beltinci': (114.81, 80.54),
'Domžale': (-2.34, 31.50)
}
def koodrinate(kraj, kraji):
return kraji.get(kraj)
napis = "KRNEKI"
vrednost = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
vsota = 0
for c in napis:
vsota += vrednost.get(c, 0)
print(sum(vrednost.get(c, 0) for c in napis))
for c in napis:
if c == "I":
vsota += 1
elif c == "V":
vsota += 5
elif c == "X":
vsota += 10
elif c == "L":
vsota += 50
elif c == "C":
vsota += 100
elif c == "D":
vsota += 500
elif c == "M":
vsota += 1000
|
flexible
|
{
"blob_id": "2cfc1bea6dd1571eff67c3f49b2a1899560c7ba7",
"index": 3469,
"step-1": "def koodrinate(kraj, kraji):\n for ime, x, y in kraji:\n if ime == kraj:\n return x, y\n\n\n<mask token>\n",
"step-2": "def koodrinate(kraj, kraji):\n for ime, x, y in kraji:\n if ime == kraj:\n return x, y\n\n\n<mask token>\n\n\ndef koodrinate(kraj, kraji):\n return kraji.get(kraj)\n\n\n<mask token>\n",
"step-3": "def koodrinate(kraj, kraji):\n for ime, x, y in kraji:\n if ime == kraj:\n return x, y\n\n\n<mask token>\n\n\ndef koodrinate(kraj, kraji):\n return kraji.get(kraj)\n\n\n<mask token>\nfor c in napis:\n vsota += vrednost.get(c, 0)\nprint(sum(vrednost.get(c, 0) for c in napis))\nfor c in napis:\n if c == 'I':\n vsota += 1\n elif c == 'V':\n vsota += 5\n elif c == 'X':\n vsota += 10\n elif c == 'L':\n vsota += 50\n elif c == 'C':\n vsota += 100\n elif c == 'D':\n vsota += 500\n elif c == 'M':\n vsota += 1000\n",
"step-4": "def koodrinate(kraj, kraji):\n for ime, x, y in kraji:\n if ime == kraj:\n return x, y\n\n\nkraji = {'Brežice': (68.66, 7.04), 'Lenart': (85.2, 78.75), 'Rateče': (-\n 65.04, 70.04), 'Ljutomer': (111.26, 71.82), 'Rogaška Slatina': (71.0, \n 42.0), 'Ribnica': (7.1, -10.5), 'Dutovlje': (-56.8, -6.93), 'Lokve': (-\n 57.94, 19.32), 'Vinica': (43.81, -38.43), 'Brtonigla': (-71.0, -47.25),\n 'Kanal': (-71.0, 26.25), 'Črnomelj': (39.05, -27.93), 'Trbovlje': (\n 29.61, 35.07), 'Beltinci': (114.81, 80.54), 'Domžale': (-2.34, 31.5)}\n\n\ndef koodrinate(kraj, kraji):\n return kraji.get(kraj)\n\n\nnapis = 'KRNEKI'\nvrednost = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}\nvsota = 0\nfor c in napis:\n vsota += vrednost.get(c, 0)\nprint(sum(vrednost.get(c, 0) for c in napis))\nfor c in napis:\n if c == 'I':\n vsota += 1\n elif c == 'V':\n vsota += 5\n elif c == 'X':\n vsota += 10\n elif c == 'L':\n vsota += 50\n elif c == 'C':\n vsota += 100\n elif c == 'D':\n vsota += 500\n elif c == 'M':\n vsota += 1000\n",
"step-5": "def koodrinate(kraj, kraji):\n for ime, x, y in kraji:\n if ime == kraj:\n return x, y\n\n\nkraji = {\n 'Brežice': (68.66, 7.04),\n 'Lenart': (85.20, 78.75),\n 'Rateče': (-65.04, 70.04),\n 'Ljutomer': (111.26, 71.82),\n 'Rogaška Slatina': (71.00, 42.00),\n 'Ribnica': (7.10, -10.50),\n 'Dutovlje': (-56.80, -6.93),\n 'Lokve': (-57.94, 19.32),\n 'Vinica': (43.81, -38.43),\n 'Brtonigla': (-71.00, -47.25),\n 'Kanal': (-71.00, 26.25),\n 'Črnomelj': (39.05, -27.93),\n 'Trbovlje': (29.61, 35.07),\n 'Beltinci': (114.81, 80.54),\n 'Domžale': (-2.34, 31.50)\n}\n\n\ndef koodrinate(kraj, kraji):\n return kraji.get(kraj)\n\n\nnapis = \"KRNEKI\"\n\nvrednost = {\"I\": 1, \"V\": 5, \"X\": 10, \"L\": 50, \"C\": 100, \"D\": 500, \"M\": 1000}\n\nvsota = 0\n\nfor c in napis:\n vsota += vrednost.get(c, 0)\n\nprint(sum(vrednost.get(c, 0) for c in napis))\n\nfor c in napis:\n if c == \"I\":\n vsota += 1\n elif c == \"V\":\n vsota += 5\n elif c == \"X\":\n vsota += 10\n elif c == \"L\":\n vsota += 50\n elif c == \"C\":\n vsota += 100\n elif c == \"D\":\n vsota += 500\n elif c == \"M\":\n vsota += 1000\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from abc import ABC, abstractmethod
from datetime import datetime, timedelta, date
import os
import housekeeper
import yfinance as yf
import pandas as pd
class DataManager(ABC):
def __init__(self):
self.__myHousekeeper = housekeeper.instance_class()
self.__config_filename = "tickers_config.json"
self.__dir_list = ['Data', 'Tickers', 'Dummy1']
self.__upper_stages = 0
self.__tickers_config_list = []
self.__tickers_list = []
self.__active_tickers_list = []
self.__selected_tickers_list = []
self.__timestamp = ''
self.__markets = []
self.__last_date_flag = False
def get_config_filename(self):
return self.__config_filename
def set_config_filename(self, config_filename):
self.__config_filename = config_filename
def get_dir_list(self):
return self.__dir_list
def set_dir_list(self, dir_list):
self.__dir_list = dir_list
def get_upper_stages(self):
return self.__upper_stages
def set_upper_stages(self, upper_stages):
self.__upper_stages = dir_list
def get_last_date_flag(self):
return self.__last_date_flag
def set_last_date_flag(self, last_date_flag):
self.__last_date_flag = last_date_flag
def get_tickers_config(self):
return self.__tickers_config_list
def set_tickers_config(self, tickers_config_list):
self.__tickers_config_list = tickers_config_list
def get_tickers(self):
return self.__tickers_list
def set_tickers(self, tickers_list):
self.__tickers_list = tickers_list
def get_active_tickers(self):
return self.__active_tickers_list
def set_active_tickers(self, active_tickers_list):
self.__active_tickers_list = active_tickers_list
def get_selected_tickers(self):
return self.__selected_tickers_list
def set_selected_tickers(self, selected_tickers_list):
self.__selected_tickers_list = selected_tickers_list
def get_timestamp(self):
return self.__timestamp
def set_timestamp(self, timestamp):
self.__timestamp = timestamp
def get_markets(self):
return self.__markets
def set_markets(self, markets):
self.__markets = markets
def load_tickers_config(self):
data = self.__myHousekeeper.load_json_to_list(self.__dir_list, self.__config_filename)
self.set_tickers_config(data)
def save_tickers_config(self):
#No invocar a esta función sin previamente haber cargado tickers_config. O se sobreescribe tickers_config
tickers_config = self.get_tickers_config()
self.__myHousekeeper.list_dict_to_json(self.get_dir_list(),
self.get_upper_stages(),
self.get_config_filename(),
self.get_tickers_config())
def initialize_metadata(self):
self.load_tickers_config()
data = self.get_tickers_config()
self.set_timestamp(data['metadata'][0]['timestamp'])
self.set_tickers(data['data'])
def initialize_config_tickers(self):
# Get markets, get active_tickers
markets = []
active_tickers_ = []
self.initialize_metadata()
data = self.get_tickers()
for d in data:
markets.append(d['market'])
if d['active_type']=='stock' and d['active_flag']:
active_tickers_.append(d)
elif d['active_type']=='ETF':
active_tickers_.append(d)
self.set_active_tickers(active_tickers_)
self.set_markets(list(set(markets)))
def api_selected_tickers(self):
#Se recarga el tickers_config para info actualizada de los tickers.
self.initialize_config_tickers()
# Se despliegan los tickers activos en la UI para que el usuario elija qué tickers quiere actualizar el data.
ticker_list = self.get_tickers()
self.set_selected_tickers(ticker_list[0:3])
#return self.get_active_tickers() #TODO
def update_timeseries_download_date(self, selected_tickers_to_update):
config_ticker_list = self.get_tickers_config()
today = date.today()
# LAs fechas se guardan en formato %m-%d-%Y
[t.update({'data_update':today.strftime("%m-%d-%Y")}) for t in config_ticker_list['data'] if t in selected_tickers_to_update]
self.set_tickers_config(config_ticker_list)
self.save_tickers_config()
def load_ticker_data(self, file_name):
return self.__myHousekeeper.csv_to_df(self.__dir_list,
file_name)
def save_ticker_data(self, file_name, data):
self.__myHousekeeper.df_to_csv(self.__dir_list,
self.__upper_stages, file_name, data)
class DataManager_YahooFinance(DataManager):
def __init__(self):
super().__init__()
def download_ticker_data_from_scratch(self, ticker, ticker_key):
print('Downloading from scratch historic data of: ' + ticker)
data_csv = yf.download(ticker)
data_csv.insert(loc=0, column='Date', value=pd.to_datetime(data_csv.index, errors='coerce'))
data_csv['Date'] = [time.date() for time in data_csv['Date']]
data_csv.reset_index(drop=True, inplace=True)
self.save_ticker_data(ticker_key,data_csv )
return data_csv
def download_ticker_data_from_last_date(self, ticker, ticker_key, start_date):
print('Updating historic data of: ' + ticker)
# 1. Descargar datos desde la ultima fecha
data_csv = yf.download(ticker, start = start_date)
data_csv.insert(loc=0, column='Date', value=pd.to_datetime(data_csv.index, errors='coerce'))
data_csv['Date'] = [time.date() for time in data_csv['Date']]
print('Downloaded(sessions)', len(data_csv))
# 2. Cargar el csv
data_csv_local = DM_YF.load_ticker_data(ticker_key)
# 3. Apendear los datos que faltan, resetear el index y esta será la nueva varaible data_csv
data_csv = pd.concat([data_csv_local, data_csv], ignore_index = True)
data_csv.reset_index(drop=True, inplace=True)
data_csv.drop(data_csv.columns[0], axis = 1, inplace = True)
# 4. Guardar los datos sobreescribiendo el archivo anterior
self.save_ticker_data(ticker_key, data_csv)
#return data_csv
def last_date_download(self, ticker_dict):
# Local variables
last_date_str_ = ticker_dict['data_update']
ticker_key_ = ticker_dict['tickerKey']
ticker = ticker_dict['feeds']['ticker']
# 3 casos: A) last_date is None -> from scratch, B) last >= today -> no hay descarga C) start < today (else) -> download_ticker_data_from_last_date
if last_date_str_ is None: # Aquí va un download_from_scratch
print(ticker + " is not found in database, adding ----")
#data_csv = yf.download(ticker) # Aquí va un download_from_scratch
self.download_ticker_data_from_scratch(ticker, ticker_key_)
return
now = datetime.now()
last_date = datetime.strptime(last_date_str_, '%m-%d-%Y')
delta = now - last_date
start_date = last_date + timedelta(days=+1)
if delta.days <= 0: # Aquí no hay download
print('Data of ', ticker_key_ ,'is already updated')
return
else: # Función download_ticker_data_from_last_date
self.download_ticker_data_from_last_date(ticker, ticker_key_, start_date)
delta = now - start_date
print('Downloaded(days): ', delta.days)
#return data_csv
def timeseries_download_manager(self, ticker_dict):
if self.get_last_date_flag(): # From last date
print('Download ', ticker_dict['tickerKey'],' from last updated_date')
self.last_date_download(ticker_dict)
else: # From scratch
print('Download', ticker_dict['tickerKey'],' from scratch')
self.download_ticker_data_from_scratch(ticker_dict['feeds']['ticker'],ticker_dict['tickerKey'])
def download_selected_tickers(self):
# Se almacenan los tickers que van a se actualizados y se guarda la fecha de actualización en el ticker_config.
# 1.- Almacenar selected_Tickers from user selection and a default option.
#selected_tickers_list = self.api_active_tickers()
self.api_selected_tickers()
#2.- Establecer el tipo de descarga: last_date(True) / from scratch(False, default)
self.set_last_date_flag(False)
#3.- Descargar los selected_tickers. Enganchar con timeseries_download_manager
[self.timeseries_download_manager(t) for t in self.get_selected_tickers()]
# 4.- Actualizar data_update en tickers_config de los tickers descargados
self.update_timeseries_download_date(self.get_selected_tickers())
def download_market_data(self, markets, _last_date_flag = False): #TODO: especificar el subconjunto en selected tickers. Para que se actualice la fecha data_update
print('Download market ticker')
#1.- Almacenar en selected_ticker los tickers correspondientes a un market
#Se recarga el tickers_config para info actualizada de los tickers.
self.initialize_config_tickers()
# Se despliegan los tickers activos en la UI para que el usuario elija qué tickers quiere actualizar el data.
active_ticker_list = self.get_active_tickers()
ticker_list = [t for t in active_ticker_list if t['market'] in markets]
self.set_selected_tickers(ticker_list)
#2.- Establecer el tipo de descarga: last_date(True) / from scratch(False, default)
self.set_last_date_flag(_last_date_flag)
#3.- Descargar los selected_tickers. Enganchar con timeseries_download_manager
#tickers = self.get_active_tickers()
#[DM_YF.download_ticker_data_from_scratch(t['feeds']['ticker'], t['tickerKey']) for t in tickers if t['market'] in markets]
[self.timeseries_download_manager(t) for t in self.get_selected_tickers()]
# 4.- Actualizar data_update en tickers_config de los tickers descargados
self.update_timeseries_download_date(self.get_selected_tickers())
def download_all_markets(self):
print('Download ALL MARKETS')
self.download_market_data(self.get_markets())
|
normal
|
{
"blob_id": "e77e0791ddf211807566528e9532eebb54db43b5",
"index": 5550,
"step-1": "<mask token>\n\n\nclass DataManager(ABC):\n\n def __init__(self):\n self.__myHousekeeper = housekeeper.instance_class()\n self.__config_filename = 'tickers_config.json'\n self.__dir_list = ['Data', 'Tickers', 'Dummy1']\n self.__upper_stages = 0\n self.__tickers_config_list = []\n self.__tickers_list = []\n self.__active_tickers_list = []\n self.__selected_tickers_list = []\n self.__timestamp = ''\n self.__markets = []\n self.__last_date_flag = False\n\n def get_config_filename(self):\n return self.__config_filename\n <mask token>\n\n def get_dir_list(self):\n return self.__dir_list\n <mask token>\n\n def get_upper_stages(self):\n return self.__upper_stages\n\n def set_upper_stages(self, upper_stages):\n self.__upper_stages = dir_list\n\n def get_last_date_flag(self):\n return self.__last_date_flag\n\n def set_last_date_flag(self, last_date_flag):\n self.__last_date_flag = last_date_flag\n\n def get_tickers_config(self):\n return self.__tickers_config_list\n\n def set_tickers_config(self, tickers_config_list):\n self.__tickers_config_list = tickers_config_list\n\n def get_tickers(self):\n return self.__tickers_list\n <mask token>\n\n def get_active_tickers(self):\n return self.__active_tickers_list\n <mask token>\n\n def get_selected_tickers(self):\n return self.__selected_tickers_list\n\n def set_selected_tickers(self, selected_tickers_list):\n self.__selected_tickers_list = selected_tickers_list\n\n def get_timestamp(self):\n return self.__timestamp\n <mask token>\n\n def get_markets(self):\n return self.__markets\n <mask token>\n <mask token>\n\n def save_tickers_config(self):\n tickers_config = self.get_tickers_config()\n self.__myHousekeeper.list_dict_to_json(self.get_dir_list(), self.\n get_upper_stages(), self.get_config_filename(), self.\n get_tickers_config())\n\n def initialize_metadata(self):\n self.load_tickers_config()\n data = self.get_tickers_config()\n self.set_timestamp(data['metadata'][0]['timestamp'])\n self.set_tickers(data['data'])\n\n def initialize_config_tickers(self):\n markets = []\n active_tickers_ = []\n self.initialize_metadata()\n data = self.get_tickers()\n for d in data:\n markets.append(d['market'])\n if d['active_type'] == 'stock' and d['active_flag']:\n active_tickers_.append(d)\n elif d['active_type'] == 'ETF':\n active_tickers_.append(d)\n self.set_active_tickers(active_tickers_)\n self.set_markets(list(set(markets)))\n <mask token>\n <mask token>\n\n def load_ticker_data(self, file_name):\n return self.__myHousekeeper.csv_to_df(self.__dir_list, file_name)\n\n def save_ticker_data(self, file_name, data):\n self.__myHousekeeper.df_to_csv(self.__dir_list, self.__upper_stages,\n file_name, data)\n\n\nclass DataManager_YahooFinance(DataManager):\n\n def __init__(self):\n super().__init__()\n\n def download_ticker_data_from_scratch(self, ticker, ticker_key):\n print('Downloading from scratch historic data of: ' + ticker)\n data_csv = yf.download(ticker)\n data_csv.insert(loc=0, column='Date', value=pd.to_datetime(data_csv\n .index, errors='coerce'))\n data_csv['Date'] = [time.date() for time in data_csv['Date']]\n data_csv.reset_index(drop=True, inplace=True)\n self.save_ticker_data(ticker_key, data_csv)\n return data_csv\n\n def download_ticker_data_from_last_date(self, ticker, ticker_key,\n start_date):\n print('Updating historic data of: ' + ticker)\n data_csv = yf.download(ticker, start=start_date)\n data_csv.insert(loc=0, column='Date', value=pd.to_datetime(data_csv\n .index, errors='coerce'))\n data_csv['Date'] = [time.date() for time in data_csv['Date']]\n print('Downloaded(sessions)', len(data_csv))\n data_csv_local = DM_YF.load_ticker_data(ticker_key)\n data_csv = pd.concat([data_csv_local, data_csv], ignore_index=True)\n data_csv.reset_index(drop=True, inplace=True)\n data_csv.drop(data_csv.columns[0], axis=1, inplace=True)\n self.save_ticker_data(ticker_key, data_csv)\n\n def last_date_download(self, ticker_dict):\n last_date_str_ = ticker_dict['data_update']\n ticker_key_ = ticker_dict['tickerKey']\n ticker = ticker_dict['feeds']['ticker']\n if last_date_str_ is None:\n print(ticker + ' is not found in database, adding ----')\n self.download_ticker_data_from_scratch(ticker, ticker_key_)\n return\n now = datetime.now()\n last_date = datetime.strptime(last_date_str_, '%m-%d-%Y')\n delta = now - last_date\n start_date = last_date + timedelta(days=+1)\n if delta.days <= 0:\n print('Data of ', ticker_key_, 'is already updated')\n return\n else:\n self.download_ticker_data_from_last_date(ticker, ticker_key_,\n start_date)\n delta = now - start_date\n print('Downloaded(days): ', delta.days)\n\n def timeseries_download_manager(self, ticker_dict):\n if self.get_last_date_flag():\n print('Download ', ticker_dict['tickerKey'],\n ' from last updated_date')\n self.last_date_download(ticker_dict)\n else:\n print('Download', ticker_dict['tickerKey'], ' from scratch')\n self.download_ticker_data_from_scratch(ticker_dict['feeds'][\n 'ticker'], ticker_dict['tickerKey'])\n\n def download_selected_tickers(self):\n self.api_selected_tickers()\n self.set_last_date_flag(False)\n [self.timeseries_download_manager(t) for t in self.\n get_selected_tickers()]\n self.update_timeseries_download_date(self.get_selected_tickers())\n\n def download_market_data(self, markets, _last_date_flag=False):\n print('Download market ticker')\n self.initialize_config_tickers()\n active_ticker_list = self.get_active_tickers()\n ticker_list = [t for t in active_ticker_list if t['market'] in markets]\n self.set_selected_tickers(ticker_list)\n self.set_last_date_flag(_last_date_flag)\n [self.timeseries_download_manager(t) for t in self.\n get_selected_tickers()]\n self.update_timeseries_download_date(self.get_selected_tickers())\n\n def download_all_markets(self):\n print('Download ALL MARKETS')\n self.download_market_data(self.get_markets())\n",
"step-2": "<mask token>\n\n\nclass DataManager(ABC):\n\n def __init__(self):\n self.__myHousekeeper = housekeeper.instance_class()\n self.__config_filename = 'tickers_config.json'\n self.__dir_list = ['Data', 'Tickers', 'Dummy1']\n self.__upper_stages = 0\n self.__tickers_config_list = []\n self.__tickers_list = []\n self.__active_tickers_list = []\n self.__selected_tickers_list = []\n self.__timestamp = ''\n self.__markets = []\n self.__last_date_flag = False\n\n def get_config_filename(self):\n return self.__config_filename\n <mask token>\n\n def get_dir_list(self):\n return self.__dir_list\n <mask token>\n\n def get_upper_stages(self):\n return self.__upper_stages\n\n def set_upper_stages(self, upper_stages):\n self.__upper_stages = dir_list\n\n def get_last_date_flag(self):\n return self.__last_date_flag\n\n def set_last_date_flag(self, last_date_flag):\n self.__last_date_flag = last_date_flag\n\n def get_tickers_config(self):\n return self.__tickers_config_list\n\n def set_tickers_config(self, tickers_config_list):\n self.__tickers_config_list = tickers_config_list\n\n def get_tickers(self):\n return self.__tickers_list\n <mask token>\n\n def get_active_tickers(self):\n return self.__active_tickers_list\n\n def set_active_tickers(self, active_tickers_list):\n self.__active_tickers_list = active_tickers_list\n\n def get_selected_tickers(self):\n return self.__selected_tickers_list\n\n def set_selected_tickers(self, selected_tickers_list):\n self.__selected_tickers_list = selected_tickers_list\n\n def get_timestamp(self):\n return self.__timestamp\n\n def set_timestamp(self, timestamp):\n self.__timestamp = timestamp\n\n def get_markets(self):\n return self.__markets\n <mask token>\n\n def load_tickers_config(self):\n data = self.__myHousekeeper.load_json_to_list(self.__dir_list, self\n .__config_filename)\n self.set_tickers_config(data)\n\n def save_tickers_config(self):\n tickers_config = self.get_tickers_config()\n self.__myHousekeeper.list_dict_to_json(self.get_dir_list(), self.\n get_upper_stages(), self.get_config_filename(), self.\n get_tickers_config())\n\n def initialize_metadata(self):\n self.load_tickers_config()\n data = self.get_tickers_config()\n self.set_timestamp(data['metadata'][0]['timestamp'])\n self.set_tickers(data['data'])\n\n def initialize_config_tickers(self):\n markets = []\n active_tickers_ = []\n self.initialize_metadata()\n data = self.get_tickers()\n for d in data:\n markets.append(d['market'])\n if d['active_type'] == 'stock' and d['active_flag']:\n active_tickers_.append(d)\n elif d['active_type'] == 'ETF':\n active_tickers_.append(d)\n self.set_active_tickers(active_tickers_)\n self.set_markets(list(set(markets)))\n\n def api_selected_tickers(self):\n self.initialize_config_tickers()\n ticker_list = self.get_tickers()\n self.set_selected_tickers(ticker_list[0:3])\n <mask token>\n\n def load_ticker_data(self, file_name):\n return self.__myHousekeeper.csv_to_df(self.__dir_list, file_name)\n\n def save_ticker_data(self, file_name, data):\n self.__myHousekeeper.df_to_csv(self.__dir_list, self.__upper_stages,\n file_name, data)\n\n\nclass DataManager_YahooFinance(DataManager):\n\n def __init__(self):\n super().__init__()\n\n def download_ticker_data_from_scratch(self, ticker, ticker_key):\n print('Downloading from scratch historic data of: ' + ticker)\n data_csv = yf.download(ticker)\n data_csv.insert(loc=0, column='Date', value=pd.to_datetime(data_csv\n .index, errors='coerce'))\n data_csv['Date'] = [time.date() for time in data_csv['Date']]\n data_csv.reset_index(drop=True, inplace=True)\n self.save_ticker_data(ticker_key, data_csv)\n return data_csv\n\n def download_ticker_data_from_last_date(self, ticker, ticker_key,\n start_date):\n print('Updating historic data of: ' + ticker)\n data_csv = yf.download(ticker, start=start_date)\n data_csv.insert(loc=0, column='Date', value=pd.to_datetime(data_csv\n .index, errors='coerce'))\n data_csv['Date'] = [time.date() for time in data_csv['Date']]\n print('Downloaded(sessions)', len(data_csv))\n data_csv_local = DM_YF.load_ticker_data(ticker_key)\n data_csv = pd.concat([data_csv_local, data_csv], ignore_index=True)\n data_csv.reset_index(drop=True, inplace=True)\n data_csv.drop(data_csv.columns[0], axis=1, inplace=True)\n self.save_ticker_data(ticker_key, data_csv)\n\n def last_date_download(self, ticker_dict):\n last_date_str_ = ticker_dict['data_update']\n ticker_key_ = ticker_dict['tickerKey']\n ticker = ticker_dict['feeds']['ticker']\n if last_date_str_ is None:\n print(ticker + ' is not found in database, adding ----')\n self.download_ticker_data_from_scratch(ticker, ticker_key_)\n return\n now = datetime.now()\n last_date = datetime.strptime(last_date_str_, '%m-%d-%Y')\n delta = now - last_date\n start_date = last_date + timedelta(days=+1)\n if delta.days <= 0:\n print('Data of ', ticker_key_, 'is already updated')\n return\n else:\n self.download_ticker_data_from_last_date(ticker, ticker_key_,\n start_date)\n delta = now - start_date\n print('Downloaded(days): ', delta.days)\n\n def timeseries_download_manager(self, ticker_dict):\n if self.get_last_date_flag():\n print('Download ', ticker_dict['tickerKey'],\n ' from last updated_date')\n self.last_date_download(ticker_dict)\n else:\n print('Download', ticker_dict['tickerKey'], ' from scratch')\n self.download_ticker_data_from_scratch(ticker_dict['feeds'][\n 'ticker'], ticker_dict['tickerKey'])\n\n def download_selected_tickers(self):\n self.api_selected_tickers()\n self.set_last_date_flag(False)\n [self.timeseries_download_manager(t) for t in self.\n get_selected_tickers()]\n self.update_timeseries_download_date(self.get_selected_tickers())\n\n def download_market_data(self, markets, _last_date_flag=False):\n print('Download market ticker')\n self.initialize_config_tickers()\n active_ticker_list = self.get_active_tickers()\n ticker_list = [t for t in active_ticker_list if t['market'] in markets]\n self.set_selected_tickers(ticker_list)\n self.set_last_date_flag(_last_date_flag)\n [self.timeseries_download_manager(t) for t in self.\n get_selected_tickers()]\n self.update_timeseries_download_date(self.get_selected_tickers())\n\n def download_all_markets(self):\n print('Download ALL MARKETS')\n self.download_market_data(self.get_markets())\n",
"step-3": "<mask token>\n\n\nclass DataManager(ABC):\n\n def __init__(self):\n self.__myHousekeeper = housekeeper.instance_class()\n self.__config_filename = 'tickers_config.json'\n self.__dir_list = ['Data', 'Tickers', 'Dummy1']\n self.__upper_stages = 0\n self.__tickers_config_list = []\n self.__tickers_list = []\n self.__active_tickers_list = []\n self.__selected_tickers_list = []\n self.__timestamp = ''\n self.__markets = []\n self.__last_date_flag = False\n\n def get_config_filename(self):\n return self.__config_filename\n <mask token>\n\n def get_dir_list(self):\n return self.__dir_list\n\n def set_dir_list(self, dir_list):\n self.__dir_list = dir_list\n\n def get_upper_stages(self):\n return self.__upper_stages\n\n def set_upper_stages(self, upper_stages):\n self.__upper_stages = dir_list\n\n def get_last_date_flag(self):\n return self.__last_date_flag\n\n def set_last_date_flag(self, last_date_flag):\n self.__last_date_flag = last_date_flag\n\n def get_tickers_config(self):\n return self.__tickers_config_list\n\n def set_tickers_config(self, tickers_config_list):\n self.__tickers_config_list = tickers_config_list\n\n def get_tickers(self):\n return self.__tickers_list\n <mask token>\n\n def get_active_tickers(self):\n return self.__active_tickers_list\n\n def set_active_tickers(self, active_tickers_list):\n self.__active_tickers_list = active_tickers_list\n\n def get_selected_tickers(self):\n return self.__selected_tickers_list\n\n def set_selected_tickers(self, selected_tickers_list):\n self.__selected_tickers_list = selected_tickers_list\n\n def get_timestamp(self):\n return self.__timestamp\n\n def set_timestamp(self, timestamp):\n self.__timestamp = timestamp\n\n def get_markets(self):\n return self.__markets\n <mask token>\n\n def load_tickers_config(self):\n data = self.__myHousekeeper.load_json_to_list(self.__dir_list, self\n .__config_filename)\n self.set_tickers_config(data)\n\n def save_tickers_config(self):\n tickers_config = self.get_tickers_config()\n self.__myHousekeeper.list_dict_to_json(self.get_dir_list(), self.\n get_upper_stages(), self.get_config_filename(), self.\n get_tickers_config())\n\n def initialize_metadata(self):\n self.load_tickers_config()\n data = self.get_tickers_config()\n self.set_timestamp(data['metadata'][0]['timestamp'])\n self.set_tickers(data['data'])\n\n def initialize_config_tickers(self):\n markets = []\n active_tickers_ = []\n self.initialize_metadata()\n data = self.get_tickers()\n for d in data:\n markets.append(d['market'])\n if d['active_type'] == 'stock' and d['active_flag']:\n active_tickers_.append(d)\n elif d['active_type'] == 'ETF':\n active_tickers_.append(d)\n self.set_active_tickers(active_tickers_)\n self.set_markets(list(set(markets)))\n\n def api_selected_tickers(self):\n self.initialize_config_tickers()\n ticker_list = self.get_tickers()\n self.set_selected_tickers(ticker_list[0:3])\n <mask token>\n\n def load_ticker_data(self, file_name):\n return self.__myHousekeeper.csv_to_df(self.__dir_list, file_name)\n\n def save_ticker_data(self, file_name, data):\n self.__myHousekeeper.df_to_csv(self.__dir_list, self.__upper_stages,\n file_name, data)\n\n\nclass DataManager_YahooFinance(DataManager):\n\n def __init__(self):\n super().__init__()\n\n def download_ticker_data_from_scratch(self, ticker, ticker_key):\n print('Downloading from scratch historic data of: ' + ticker)\n data_csv = yf.download(ticker)\n data_csv.insert(loc=0, column='Date', value=pd.to_datetime(data_csv\n .index, errors='coerce'))\n data_csv['Date'] = [time.date() for time in data_csv['Date']]\n data_csv.reset_index(drop=True, inplace=True)\n self.save_ticker_data(ticker_key, data_csv)\n return data_csv\n\n def download_ticker_data_from_last_date(self, ticker, ticker_key,\n start_date):\n print('Updating historic data of: ' + ticker)\n data_csv = yf.download(ticker, start=start_date)\n data_csv.insert(loc=0, column='Date', value=pd.to_datetime(data_csv\n .index, errors='coerce'))\n data_csv['Date'] = [time.date() for time in data_csv['Date']]\n print('Downloaded(sessions)', len(data_csv))\n data_csv_local = DM_YF.load_ticker_data(ticker_key)\n data_csv = pd.concat([data_csv_local, data_csv], ignore_index=True)\n data_csv.reset_index(drop=True, inplace=True)\n data_csv.drop(data_csv.columns[0], axis=1, inplace=True)\n self.save_ticker_data(ticker_key, data_csv)\n\n def last_date_download(self, ticker_dict):\n last_date_str_ = ticker_dict['data_update']\n ticker_key_ = ticker_dict['tickerKey']\n ticker = ticker_dict['feeds']['ticker']\n if last_date_str_ is None:\n print(ticker + ' is not found in database, adding ----')\n self.download_ticker_data_from_scratch(ticker, ticker_key_)\n return\n now = datetime.now()\n last_date = datetime.strptime(last_date_str_, '%m-%d-%Y')\n delta = now - last_date\n start_date = last_date + timedelta(days=+1)\n if delta.days <= 0:\n print('Data of ', ticker_key_, 'is already updated')\n return\n else:\n self.download_ticker_data_from_last_date(ticker, ticker_key_,\n start_date)\n delta = now - start_date\n print('Downloaded(days): ', delta.days)\n\n def timeseries_download_manager(self, ticker_dict):\n if self.get_last_date_flag():\n print('Download ', ticker_dict['tickerKey'],\n ' from last updated_date')\n self.last_date_download(ticker_dict)\n else:\n print('Download', ticker_dict['tickerKey'], ' from scratch')\n self.download_ticker_data_from_scratch(ticker_dict['feeds'][\n 'ticker'], ticker_dict['tickerKey'])\n\n def download_selected_tickers(self):\n self.api_selected_tickers()\n self.set_last_date_flag(False)\n [self.timeseries_download_manager(t) for t in self.\n get_selected_tickers()]\n self.update_timeseries_download_date(self.get_selected_tickers())\n\n def download_market_data(self, markets, _last_date_flag=False):\n print('Download market ticker')\n self.initialize_config_tickers()\n active_ticker_list = self.get_active_tickers()\n ticker_list = [t for t in active_ticker_list if t['market'] in markets]\n self.set_selected_tickers(ticker_list)\n self.set_last_date_flag(_last_date_flag)\n [self.timeseries_download_manager(t) for t in self.\n get_selected_tickers()]\n self.update_timeseries_download_date(self.get_selected_tickers())\n\n def download_all_markets(self):\n print('Download ALL MARKETS')\n self.download_market_data(self.get_markets())\n",
"step-4": "from abc import ABC, abstractmethod\nfrom datetime import datetime, timedelta, date\nimport os\nimport housekeeper\nimport yfinance as yf\nimport pandas as pd\n\n\nclass DataManager(ABC):\n\n def __init__(self):\n self.__myHousekeeper = housekeeper.instance_class()\n self.__config_filename = 'tickers_config.json'\n self.__dir_list = ['Data', 'Tickers', 'Dummy1']\n self.__upper_stages = 0\n self.__tickers_config_list = []\n self.__tickers_list = []\n self.__active_tickers_list = []\n self.__selected_tickers_list = []\n self.__timestamp = ''\n self.__markets = []\n self.__last_date_flag = False\n\n def get_config_filename(self):\n return self.__config_filename\n\n def set_config_filename(self, config_filename):\n self.__config_filename = config_filename\n\n def get_dir_list(self):\n return self.__dir_list\n\n def set_dir_list(self, dir_list):\n self.__dir_list = dir_list\n\n def get_upper_stages(self):\n return self.__upper_stages\n\n def set_upper_stages(self, upper_stages):\n self.__upper_stages = dir_list\n\n def get_last_date_flag(self):\n return self.__last_date_flag\n\n def set_last_date_flag(self, last_date_flag):\n self.__last_date_flag = last_date_flag\n\n def get_tickers_config(self):\n return self.__tickers_config_list\n\n def set_tickers_config(self, tickers_config_list):\n self.__tickers_config_list = tickers_config_list\n\n def get_tickers(self):\n return self.__tickers_list\n\n def set_tickers(self, tickers_list):\n self.__tickers_list = tickers_list\n\n def get_active_tickers(self):\n return self.__active_tickers_list\n\n def set_active_tickers(self, active_tickers_list):\n self.__active_tickers_list = active_tickers_list\n\n def get_selected_tickers(self):\n return self.__selected_tickers_list\n\n def set_selected_tickers(self, selected_tickers_list):\n self.__selected_tickers_list = selected_tickers_list\n\n def get_timestamp(self):\n return self.__timestamp\n\n def set_timestamp(self, timestamp):\n self.__timestamp = timestamp\n\n def get_markets(self):\n return self.__markets\n\n def set_markets(self, markets):\n self.__markets = markets\n\n def load_tickers_config(self):\n data = self.__myHousekeeper.load_json_to_list(self.__dir_list, self\n .__config_filename)\n self.set_tickers_config(data)\n\n def save_tickers_config(self):\n tickers_config = self.get_tickers_config()\n self.__myHousekeeper.list_dict_to_json(self.get_dir_list(), self.\n get_upper_stages(), self.get_config_filename(), self.\n get_tickers_config())\n\n def initialize_metadata(self):\n self.load_tickers_config()\n data = self.get_tickers_config()\n self.set_timestamp(data['metadata'][0]['timestamp'])\n self.set_tickers(data['data'])\n\n def initialize_config_tickers(self):\n markets = []\n active_tickers_ = []\n self.initialize_metadata()\n data = self.get_tickers()\n for d in data:\n markets.append(d['market'])\n if d['active_type'] == 'stock' and d['active_flag']:\n active_tickers_.append(d)\n elif d['active_type'] == 'ETF':\n active_tickers_.append(d)\n self.set_active_tickers(active_tickers_)\n self.set_markets(list(set(markets)))\n\n def api_selected_tickers(self):\n self.initialize_config_tickers()\n ticker_list = self.get_tickers()\n self.set_selected_tickers(ticker_list[0:3])\n\n def update_timeseries_download_date(self, selected_tickers_to_update):\n config_ticker_list = self.get_tickers_config()\n today = date.today()\n [t.update({'data_update': today.strftime('%m-%d-%Y')}) for t in\n config_ticker_list['data'] if t in selected_tickers_to_update]\n self.set_tickers_config(config_ticker_list)\n self.save_tickers_config()\n\n def load_ticker_data(self, file_name):\n return self.__myHousekeeper.csv_to_df(self.__dir_list, file_name)\n\n def save_ticker_data(self, file_name, data):\n self.__myHousekeeper.df_to_csv(self.__dir_list, self.__upper_stages,\n file_name, data)\n\n\nclass DataManager_YahooFinance(DataManager):\n\n def __init__(self):\n super().__init__()\n\n def download_ticker_data_from_scratch(self, ticker, ticker_key):\n print('Downloading from scratch historic data of: ' + ticker)\n data_csv = yf.download(ticker)\n data_csv.insert(loc=0, column='Date', value=pd.to_datetime(data_csv\n .index, errors='coerce'))\n data_csv['Date'] = [time.date() for time in data_csv['Date']]\n data_csv.reset_index(drop=True, inplace=True)\n self.save_ticker_data(ticker_key, data_csv)\n return data_csv\n\n def download_ticker_data_from_last_date(self, ticker, ticker_key,\n start_date):\n print('Updating historic data of: ' + ticker)\n data_csv = yf.download(ticker, start=start_date)\n data_csv.insert(loc=0, column='Date', value=pd.to_datetime(data_csv\n .index, errors='coerce'))\n data_csv['Date'] = [time.date() for time in data_csv['Date']]\n print('Downloaded(sessions)', len(data_csv))\n data_csv_local = DM_YF.load_ticker_data(ticker_key)\n data_csv = pd.concat([data_csv_local, data_csv], ignore_index=True)\n data_csv.reset_index(drop=True, inplace=True)\n data_csv.drop(data_csv.columns[0], axis=1, inplace=True)\n self.save_ticker_data(ticker_key, data_csv)\n\n def last_date_download(self, ticker_dict):\n last_date_str_ = ticker_dict['data_update']\n ticker_key_ = ticker_dict['tickerKey']\n ticker = ticker_dict['feeds']['ticker']\n if last_date_str_ is None:\n print(ticker + ' is not found in database, adding ----')\n self.download_ticker_data_from_scratch(ticker, ticker_key_)\n return\n now = datetime.now()\n last_date = datetime.strptime(last_date_str_, '%m-%d-%Y')\n delta = now - last_date\n start_date = last_date + timedelta(days=+1)\n if delta.days <= 0:\n print('Data of ', ticker_key_, 'is already updated')\n return\n else:\n self.download_ticker_data_from_last_date(ticker, ticker_key_,\n start_date)\n delta = now - start_date\n print('Downloaded(days): ', delta.days)\n\n def timeseries_download_manager(self, ticker_dict):\n if self.get_last_date_flag():\n print('Download ', ticker_dict['tickerKey'],\n ' from last updated_date')\n self.last_date_download(ticker_dict)\n else:\n print('Download', ticker_dict['tickerKey'], ' from scratch')\n self.download_ticker_data_from_scratch(ticker_dict['feeds'][\n 'ticker'], ticker_dict['tickerKey'])\n\n def download_selected_tickers(self):\n self.api_selected_tickers()\n self.set_last_date_flag(False)\n [self.timeseries_download_manager(t) for t in self.\n get_selected_tickers()]\n self.update_timeseries_download_date(self.get_selected_tickers())\n\n def download_market_data(self, markets, _last_date_flag=False):\n print('Download market ticker')\n self.initialize_config_tickers()\n active_ticker_list = self.get_active_tickers()\n ticker_list = [t for t in active_ticker_list if t['market'] in markets]\n self.set_selected_tickers(ticker_list)\n self.set_last_date_flag(_last_date_flag)\n [self.timeseries_download_manager(t) for t in self.\n get_selected_tickers()]\n self.update_timeseries_download_date(self.get_selected_tickers())\n\n def download_all_markets(self):\n print('Download ALL MARKETS')\n self.download_market_data(self.get_markets())\n",
"step-5": "from abc import ABC, abstractmethod\nfrom datetime import datetime, timedelta, date\nimport os\n\nimport housekeeper\n\nimport yfinance as yf\nimport pandas as pd\n\nclass DataManager(ABC):\n \n def __init__(self):\n \n self.__myHousekeeper = housekeeper.instance_class()\n self.__config_filename = \"tickers_config.json\"\n self.__dir_list = ['Data', 'Tickers', 'Dummy1']\n self.__upper_stages = 0\n self.__tickers_config_list = []\n self.__tickers_list = []\n self.__active_tickers_list = []\n self.__selected_tickers_list = []\n self.__timestamp = ''\n self.__markets = []\n self.__last_date_flag = False\n \n\n def get_config_filename(self):\n return self.__config_filename\n \n def set_config_filename(self, config_filename):\n self.__config_filename = config_filename\n \n def get_dir_list(self):\n return self.__dir_list\n \n def set_dir_list(self, dir_list):\n self.__dir_list = dir_list\n \n def get_upper_stages(self):\n return self.__upper_stages\n \n def set_upper_stages(self, upper_stages):\n self.__upper_stages = dir_list\n \n def get_last_date_flag(self):\n return self.__last_date_flag\n \n def set_last_date_flag(self, last_date_flag):\n self.__last_date_flag = last_date_flag\n \n def get_tickers_config(self):\n return self.__tickers_config_list\n \n def set_tickers_config(self, tickers_config_list):\n self.__tickers_config_list = tickers_config_list\n \n def get_tickers(self):\n return self.__tickers_list\n \n def set_tickers(self, tickers_list):\n self.__tickers_list = tickers_list\n \n def get_active_tickers(self):\n return self.__active_tickers_list\n \n def set_active_tickers(self, active_tickers_list):\n self.__active_tickers_list = active_tickers_list\n \n def get_selected_tickers(self):\n return self.__selected_tickers_list\n \n def set_selected_tickers(self, selected_tickers_list):\n self.__selected_tickers_list = selected_tickers_list\n \n def get_timestamp(self):\n return self.__timestamp\n \n def set_timestamp(self, timestamp):\n self.__timestamp = timestamp\n \n def get_markets(self):\n return self.__markets\n \n def set_markets(self, markets):\n self.__markets = markets\n \n def load_tickers_config(self):\n data = self.__myHousekeeper.load_json_to_list(self.__dir_list, self.__config_filename)\n self.set_tickers_config(data)\n \n def save_tickers_config(self):\n #No invocar a esta función sin previamente haber cargado tickers_config. O se sobreescribe tickers_config\n tickers_config = self.get_tickers_config()\n self.__myHousekeeper.list_dict_to_json(self.get_dir_list(), \n self.get_upper_stages(), \n self.get_config_filename(), \n self.get_tickers_config())\n \n def initialize_metadata(self):\n self.load_tickers_config()\n data = self.get_tickers_config()\n self.set_timestamp(data['metadata'][0]['timestamp'])\n self.set_tickers(data['data'])\n \n def initialize_config_tickers(self):\n # Get markets, get active_tickers\n markets = []\n active_tickers_ = []\n self.initialize_metadata()\n data = self.get_tickers()\n for d in data:\n markets.append(d['market'])\n if d['active_type']=='stock' and d['active_flag']:\n active_tickers_.append(d)\n elif d['active_type']=='ETF':\n active_tickers_.append(d)\n self.set_active_tickers(active_tickers_)\n self.set_markets(list(set(markets)))\n \n def api_selected_tickers(self):\n #Se recarga el tickers_config para info actualizada de los tickers.\n self.initialize_config_tickers()\n # Se despliegan los tickers activos en la UI para que el usuario elija qué tickers quiere actualizar el data.\n ticker_list = self.get_tickers()\n self.set_selected_tickers(ticker_list[0:3])\n \n #return self.get_active_tickers() #TODO\n \n def update_timeseries_download_date(self, selected_tickers_to_update):\n config_ticker_list = self.get_tickers_config()\n today = date.today()\n # LAs fechas se guardan en formato %m-%d-%Y\n [t.update({'data_update':today.strftime(\"%m-%d-%Y\")}) for t in config_ticker_list['data'] if t in selected_tickers_to_update]\n self.set_tickers_config(config_ticker_list)\n self.save_tickers_config()\n \n def load_ticker_data(self, file_name):\n return self.__myHousekeeper.csv_to_df(self.__dir_list,\n file_name)\n \n def save_ticker_data(self, file_name, data):\n self.__myHousekeeper.df_to_csv(self.__dir_list,\n self.__upper_stages, file_name, data)\n \n\nclass DataManager_YahooFinance(DataManager):\n \n def __init__(self):\n super().__init__()\n \n \n def download_ticker_data_from_scratch(self, ticker, ticker_key):\n print('Downloading from scratch historic data of: ' + ticker)\n data_csv = yf.download(ticker)\n data_csv.insert(loc=0, column='Date', value=pd.to_datetime(data_csv.index, errors='coerce'))\n data_csv['Date'] = [time.date() for time in data_csv['Date']]\n data_csv.reset_index(drop=True, inplace=True)\n self.save_ticker_data(ticker_key,data_csv )\n return data_csv\n \n def download_ticker_data_from_last_date(self, ticker, ticker_key, start_date):\n print('Updating historic data of: ' + ticker)\n # 1. Descargar datos desde la ultima fecha\n data_csv = yf.download(ticker, start = start_date)\n data_csv.insert(loc=0, column='Date', value=pd.to_datetime(data_csv.index, errors='coerce'))\n data_csv['Date'] = [time.date() for time in data_csv['Date']]\n print('Downloaded(sessions)', len(data_csv))\n # 2. Cargar el csv\n data_csv_local = DM_YF.load_ticker_data(ticker_key)\n # 3. Apendear los datos que faltan, resetear el index y esta será la nueva varaible data_csv\n data_csv = pd.concat([data_csv_local, data_csv], ignore_index = True)\n data_csv.reset_index(drop=True, inplace=True)\n data_csv.drop(data_csv.columns[0], axis = 1, inplace = True)\n # 4. Guardar los datos sobreescribiendo el archivo anterior\n self.save_ticker_data(ticker_key, data_csv)\n #return data_csv\n \n def last_date_download(self, ticker_dict):\n # Local variables\n last_date_str_ = ticker_dict['data_update']\n ticker_key_ = ticker_dict['tickerKey']\n ticker = ticker_dict['feeds']['ticker']\n # 3 casos: A) last_date is None -> from scratch, B) last >= today -> no hay descarga C) start < today (else) -> download_ticker_data_from_last_date\n if last_date_str_ is None: # Aquí va un download_from_scratch\n print(ticker + \" is not found in database, adding ----\")\n #data_csv = yf.download(ticker) # Aquí va un download_from_scratch\n self.download_ticker_data_from_scratch(ticker, ticker_key_)\n return\n now = datetime.now()\n last_date = datetime.strptime(last_date_str_, '%m-%d-%Y')\n delta = now - last_date\n start_date = last_date + timedelta(days=+1)\n if delta.days <= 0: # Aquí no hay download\n print('Data of ', ticker_key_ ,'is already updated')\n return\n else: # Función download_ticker_data_from_last_date\n self.download_ticker_data_from_last_date(ticker, ticker_key_, start_date)\n delta = now - start_date\n print('Downloaded(days): ', delta.days)\n #return data_csv\n \n \n def timeseries_download_manager(self, ticker_dict):\n if self.get_last_date_flag(): # From last date\n print('Download ', ticker_dict['tickerKey'],' from last updated_date')\n self.last_date_download(ticker_dict)\n else: # From scratch\n print('Download', ticker_dict['tickerKey'],' from scratch')\n self.download_ticker_data_from_scratch(ticker_dict['feeds']['ticker'],ticker_dict['tickerKey'])\n \n \n def download_selected_tickers(self):\n # Se almacenan los tickers que van a se actualizados y se guarda la fecha de actualización en el ticker_config. \n # 1.- Almacenar selected_Tickers from user selection and a default option.\n #selected_tickers_list = self.api_active_tickers()\n self.api_selected_tickers()\n #2.- Establecer el tipo de descarga: last_date(True) / from scratch(False, default) \n self.set_last_date_flag(False)\n #3.- Descargar los selected_tickers. Enganchar con timeseries_download_manager\n [self.timeseries_download_manager(t) for t in self.get_selected_tickers()]\n # 4.- Actualizar data_update en tickers_config de los tickers descargados\n self.update_timeseries_download_date(self.get_selected_tickers())\n \n \n def download_market_data(self, markets, _last_date_flag = False): #TODO: especificar el subconjunto en selected tickers. Para que se actualice la fecha data_update\n print('Download market ticker')\n #1.- Almacenar en selected_ticker los tickers correspondientes a un market\n #Se recarga el tickers_config para info actualizada de los tickers.\n self.initialize_config_tickers()\n # Se despliegan los tickers activos en la UI para que el usuario elija qué tickers quiere actualizar el data.\n active_ticker_list = self.get_active_tickers()\n ticker_list = [t for t in active_ticker_list if t['market'] in markets]\n self.set_selected_tickers(ticker_list)\n #2.- Establecer el tipo de descarga: last_date(True) / from scratch(False, default) \n self.set_last_date_flag(_last_date_flag)\n #3.- Descargar los selected_tickers. Enganchar con timeseries_download_manager\n #tickers = self.get_active_tickers()\n #[DM_YF.download_ticker_data_from_scratch(t['feeds']['ticker'], t['tickerKey']) for t in tickers if t['market'] in markets]\n [self.timeseries_download_manager(t) for t in self.get_selected_tickers()]\n # 4.- Actualizar data_update en tickers_config de los tickers descargados\n self.update_timeseries_download_date(self.get_selected_tickers())\n \n def download_all_markets(self):\n print('Download ALL MARKETS')\n self.download_market_data(self.get_markets())\n ",
"step-ids": [
30,
34,
35,
40,
41
]
}
|
[
30,
34,
35,
40,
41
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
SceneName = 'sphere'
DefaultColor = QtCore.Qt.yellow
<|reserved_special_token_1|>
from PyQt4 import QtCore
SceneName = 'sphere'
DefaultColor = QtCore.Qt.yellow
<|reserved_special_token_1|>
from PyQt4 import QtCore
SceneName = "sphere"
DefaultColor = QtCore.Qt.yellow
|
flexible
|
{
"blob_id": "874b87ca20385aa15cc7299707c9c1c0360ace43",
"index": 1045,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nSceneName = 'sphere'\nDefaultColor = QtCore.Qt.yellow\n",
"step-3": "from PyQt4 import QtCore\nSceneName = 'sphere'\nDefaultColor = QtCore.Qt.yellow\n",
"step-4": "from PyQt4 import QtCore\r\n\r\n\r\nSceneName = \"sphere\"\r\nDefaultColor = QtCore.Qt.yellow\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#상관분석
"""
유클리디안 거리 공식의 한계점: 특정인의 점수가 극단적으로 높거나 낮다면 제대로된 결과를 도출해내기 어렵다.
=>상관분석:두 변수간의 선형적 관계를 분석하겠다는 의미
"""
#BTS와 유성룡 평점, 이황, 조용필
import matplotlib as mpl
mpl.rcParams['axes.unicode_minus']=False #한글 깨짐 방지
from matplotlib import font_manager, rc
import matplotlib.pyplot as plt
from math import sqrt
font_name = font_manager.FontProperties(fname='c:/Windows/Fonts/malgun.ttf').get_name()
rc('font',family=font_name)
critics = {
'조용필': {
'택시운전사': 2.5,
'겨울왕국': 3.5,
'리빙라스베가스': 3.0,
'넘버3': 3.5,
'사랑과전쟁': 2.5,
'세계대전': 3.0,
},
'BTS': {
'택시운전사': 1.0,
'겨울왕국': 4.5,
'리빙라스베가스': 0.5,
'넘버3': 1.5,
'사랑과전쟁': 4.5,
'세계대전': 5.0,
},
'강감찬': {
'택시운전사': 3.0,
'겨울왕국': 3.5,
'리빙라스베가스': 1.5,
'넘버3': 5.0,
'세계대전': 3.0,
'사랑과전쟁': 3.5,
},
'을지문덕': {
'택시운전사': 2.5,
'겨울왕국': 3.0,
'넘버3': 3.5,
'세계대전': 4.0,
},
'김유신': {
'겨울왕국': 3.5,
'리빙라스베가스': 3.0,
'세계대전': 4.5,
'넘버3': 4.0,
'사랑과전쟁': 2.5,
},
'유성룡': {
'택시운전사': 3.0,
'겨울왕국': 4.0,
'리빙라스베가스': 2.0,
'넘버3': 3.0,
'세계대전': 3.5,
'사랑과전쟁': 2.0,
},
'이황': {
'택시운전사': 3.0,
'겨울왕국': 4.0,
'세계대전': 3.0,
'넘버3': 5.0,
'사랑과전쟁': 3.5,
},
'이이': {'겨울왕국': 4.5, '사랑과전쟁': 1.0,
'넘버3': 4.0},
}
def drawGraph(data, name1, name2):
plt.figure(figsize=(14,8))
#plot하기 위한 좌표를 저장하는 list 정의
li = []#name1의 평점을 저장
li2 =[]#name2의 평점을 저장
for i in critics[name1]:
if i in data[name2]: #같은 영화에 대한 평점이 있다면
li.append(critics[name1][i])#name1의 i영화에 대한 평점
li2.append(critics[name2][i])
plt.text(critics[name1][i],critics[name2][i],i)
plt.plot(li,li2, 'ro')
plt.axis([0,6,0,6])
plt.xlabel(name1)
plt.ylabel(name2)
# plt.show()
drawGraph(critics, 'BTS','유성룡')
drawGraph(critics, '이황','조용필') #이황과 조용필의 상관계수가 높게 나옴
## 피어슨 상관계수:x,y의 변화하는 정도를 -1~1 사이로 기술한 통계치, x,y가 함께 변화하는 정도(공분산)/(x가 변하는 정도*y가 변하는 정도)
def sim_pearson(data, name1, name2):
sumX=0 #x의 합
sumY=0 #y의 합
sumPowX=0 #x제곱의 합
sumPowY=0 #y제곱의 합
sumXY=0 #X*Y의 합
count=0 #영화의개수(n)
for i in data[name1]:
if i in data[name2]:#BTS와 유성룡이 모두 본 영화
sumX+=data[name1][i]#BTS의 i영화에 대한 평점
sumY+=data[name2][i]#유성룡의 i영화에 대한 평점
sumPowX+=pow(data[name1][i],2)
sumPowY+= pow(data[name2][i],2)
sumXY+=data[name1][i]*data[name2][i]
count+=1
return (sumXY - ((sumX * sumY) / count)) / \
sqrt((sumPowX - (pow(sumX, 2) / count)) *
(sumPowY - (pow(sumY, 2) / count)))
print("BTS와 유성룡 피어슨 상관계수:", sim_pearson(critics,'BTS','유성룡'))
print("이황과 조용필 피어슨 상관계수:",sim_pearson(critics,'이황','조용필'))
#딕셔너리를 수행하면서 기준(BTS)과 다른 데이터(그외 관객)와의 상관계수->내림차순 정렬
def top_match(data, name, index=2, sim_function=sim_pearson):
#data:영화평점 딕셔너리, name:기준이 되는 사람의 이름, index:피어슨 상관계수에서 상위(가장 가까운)몇명을 추출
#피어슨 함수 호출
li = []
for i in data: #전체영화를 돌겠다
if name !=i: #BTS, 자신이 아니라면
li.append((sim_function(data, name, i),i))
li.sort()
li.reverse()
return li[:index]
#BTS와 성향이 가장 비슷한 3명 추출
print(top_match(critics, 'BTS',3))
#영화를 추천하는 시스템 구성, 예상되는 평점 출력
"""
*추천 시스템 구성 순서*
1)자신을 제외한 나머지 사람들과의 평점에 대한 유사도를 구함
추측되는 평점 = 유사도*(다른사람의)영화평점
2)추측되는 평점들의 총합을 구함
3)추측되는 평점들의 총합/유사도의 총합 =>모든 사람들을 근거로 했을 때 예상되는 평점이 추출됨
4)아직 안본 영화를 대상으로 예상되는 평점을 구하여, 예상되는 평점이 가장 높은 영화를 추천.
"""
def getRecommendation(data, person, sim_function=sim_pearson):
li=[] #결과를 최종적으로 리턴하는 리스트
score_dic={} #유사도의 총합을 저장하기 위한 딕셔너리
sim_dic={}#평점의 총합을 저장하기 위한 딕셔너리
score = 0
result = top_match(data, person, len(data))
print("중간:",result)
for sim, name in result: #유사도, 이름
if sim<0: continue #음의 상관관계를 갖고 있는 사람들 제외
for movie in data[name]:
# print(name, "movie:",movie)
if movie not in data[person]:#다른사람들이 본 영화가 이이가 본영화 목록에 없다면 즉, 이이가 안본영화
score+=sim*data[name][movie] #score변수에 (유사도*이이가 아닌 다름 사람의 영화 평점) 누적
score_dic.setdefault(movie,0) #각 무비에 대한 점수 초기화(딕셔너리)
score_dic[movie]+=score #평점 총합
#유사도의 누적합
sim_dic.setdefault(movie,0)
sim_dic[movie] +=sim
score = 0
for key in score_dic:
score_dic[key] = score_dic[key]/sim_dic[key] # 평점들의 총합/유사도의 총합
li.append((score_dic[key], key))
li.sort()
li.reverse()
return li[0][1]
print("이이님에게는 ",getRecommendation(critics,'이이'),"영화를 가장 추천합니다.")
#기준이 되는 사람(이이)가 안본 영화를 추출, 안본 영화 각각에 대한 예상 평점 추출 => 예상평점이 가장 큰 영화를 추천
# movie="가나다라"
# score_dic={}
# score_dic.setdefault(movie,0)
# print(score_dic)
# 출력 결과 ==> {'가나다라': 0}
|
normal
|
{
"blob_id": "b377a652eec55b03f689a5097bf741b18549cba0",
"index": 4939,
"step-1": "<mask token>\n\n\ndef drawGraph(data, name1, name2):\n plt.figure(figsize=(14, 8))\n li = []\n li2 = []\n for i in critics[name1]:\n if i in data[name2]:\n li.append(critics[name1][i])\n li2.append(critics[name2][i])\n plt.text(critics[name1][i], critics[name2][i], i)\n plt.plot(li, li2, 'ro')\n plt.axis([0, 6, 0, 6])\n plt.xlabel(name1)\n plt.ylabel(name2)\n\n\n<mask token>\n\n\ndef sim_pearson(data, name1, name2):\n sumX = 0\n sumY = 0\n sumPowX = 0\n sumPowY = 0\n sumXY = 0\n count = 0\n for i in data[name1]:\n if i in data[name2]:\n sumX += data[name1][i]\n sumY += data[name2][i]\n sumPowX += pow(data[name1][i], 2)\n sumPowY += pow(data[name2][i], 2)\n sumXY += data[name1][i] * data[name2][i]\n count += 1\n return (sumXY - sumX * sumY / count) / sqrt((sumPowX - pow(sumX, 2) /\n count) * (sumPowY - pow(sumY, 2) / count))\n\n\n<mask token>\n\n\ndef top_match(data, name, index=2, sim_function=sim_pearson):\n li = []\n for i in data:\n if name != i:\n li.append((sim_function(data, name, i), i))\n li.sort()\n li.reverse()\n return li[:index]\n\n\n<mask token>\n\n\ndef getRecommendation(data, person, sim_function=sim_pearson):\n li = []\n score_dic = {}\n sim_dic = {}\n score = 0\n result = top_match(data, person, len(data))\n print('중간:', result)\n for sim, name in result:\n if sim < 0:\n continue\n for movie in data[name]:\n if movie not in data[person]:\n score += sim * data[name][movie]\n score_dic.setdefault(movie, 0)\n score_dic[movie] += score\n sim_dic.setdefault(movie, 0)\n sim_dic[movie] += sim\n score = 0\n for key in score_dic:\n score_dic[key] = score_dic[key] / sim_dic[key]\n li.append((score_dic[key], key))\n li.sort()\n li.reverse()\n return li[0][1]\n\n\n<mask token>\n",
"step-2": "<mask token>\nrc('font', family=font_name)\n<mask token>\n\n\ndef drawGraph(data, name1, name2):\n plt.figure(figsize=(14, 8))\n li = []\n li2 = []\n for i in critics[name1]:\n if i in data[name2]:\n li.append(critics[name1][i])\n li2.append(critics[name2][i])\n plt.text(critics[name1][i], critics[name2][i], i)\n plt.plot(li, li2, 'ro')\n plt.axis([0, 6, 0, 6])\n plt.xlabel(name1)\n plt.ylabel(name2)\n\n\ndrawGraph(critics, 'BTS', '유성룡')\ndrawGraph(critics, '이황', '조용필')\n\n\ndef sim_pearson(data, name1, name2):\n sumX = 0\n sumY = 0\n sumPowX = 0\n sumPowY = 0\n sumXY = 0\n count = 0\n for i in data[name1]:\n if i in data[name2]:\n sumX += data[name1][i]\n sumY += data[name2][i]\n sumPowX += pow(data[name1][i], 2)\n sumPowY += pow(data[name2][i], 2)\n sumXY += data[name1][i] * data[name2][i]\n count += 1\n return (sumXY - sumX * sumY / count) / sqrt((sumPowX - pow(sumX, 2) /\n count) * (sumPowY - pow(sumY, 2) / count))\n\n\nprint('BTS와 유성룡 피어슨 상관계수:', sim_pearson(critics, 'BTS', '유성룡'))\nprint('이황과 조용필 피어슨 상관계수:', sim_pearson(critics, '이황', '조용필'))\n\n\ndef top_match(data, name, index=2, sim_function=sim_pearson):\n li = []\n for i in data:\n if name != i:\n li.append((sim_function(data, name, i), i))\n li.sort()\n li.reverse()\n return li[:index]\n\n\nprint(top_match(critics, 'BTS', 3))\n<mask token>\n\n\ndef getRecommendation(data, person, sim_function=sim_pearson):\n li = []\n score_dic = {}\n sim_dic = {}\n score = 0\n result = top_match(data, person, len(data))\n print('중간:', result)\n for sim, name in result:\n if sim < 0:\n continue\n for movie in data[name]:\n if movie not in data[person]:\n score += sim * data[name][movie]\n score_dic.setdefault(movie, 0)\n score_dic[movie] += score\n sim_dic.setdefault(movie, 0)\n sim_dic[movie] += sim\n score = 0\n for key in score_dic:\n score_dic[key] = score_dic[key] / sim_dic[key]\n li.append((score_dic[key], key))\n li.sort()\n li.reverse()\n return li[0][1]\n\n\nprint('이이님에게는 ', getRecommendation(critics, '이이'), '영화를 가장 추천합니다.')\n",
"step-3": "<mask token>\nmpl.rcParams['axes.unicode_minus'] = False\n<mask token>\nfont_name = font_manager.FontProperties(fname='c:/Windows/Fonts/malgun.ttf'\n ).get_name()\nrc('font', family=font_name)\ncritics = {'조용필': {'택시운전사': 2.5, '겨울왕국': 3.5, '리빙라스베가스': 3.0, '넘버3': 3.5,\n '사랑과전쟁': 2.5, '세계대전': 3.0}, 'BTS': {'택시운전사': 1.0, '겨울왕국': 4.5,\n '리빙라스베가스': 0.5, '넘버3': 1.5, '사랑과전쟁': 4.5, '세계대전': 5.0}, '강감찬': {'택시운전사':\n 3.0, '겨울왕국': 3.5, '리빙라스베가스': 1.5, '넘버3': 5.0, '세계대전': 3.0, '사랑과전쟁': 3.5\n }, '을지문덕': {'택시운전사': 2.5, '겨울왕국': 3.0, '넘버3': 3.5, '세계대전': 4.0}, '김유신':\n {'겨울왕국': 3.5, '리빙라스베가스': 3.0, '세계대전': 4.5, '넘버3': 4.0, '사랑과전쟁': 2.5},\n '유성룡': {'택시운전사': 3.0, '겨울왕국': 4.0, '리빙라스베가스': 2.0, '넘버3': 3.0, '세계대전': \n 3.5, '사랑과전쟁': 2.0}, '이황': {'택시운전사': 3.0, '겨울왕국': 4.0, '세계대전': 3.0,\n '넘버3': 5.0, '사랑과전쟁': 3.5}, '이이': {'겨울왕국': 4.5, '사랑과전쟁': 1.0, '넘버3': 4.0}}\n\n\ndef drawGraph(data, name1, name2):\n plt.figure(figsize=(14, 8))\n li = []\n li2 = []\n for i in critics[name1]:\n if i in data[name2]:\n li.append(critics[name1][i])\n li2.append(critics[name2][i])\n plt.text(critics[name1][i], critics[name2][i], i)\n plt.plot(li, li2, 'ro')\n plt.axis([0, 6, 0, 6])\n plt.xlabel(name1)\n plt.ylabel(name2)\n\n\ndrawGraph(critics, 'BTS', '유성룡')\ndrawGraph(critics, '이황', '조용필')\n\n\ndef sim_pearson(data, name1, name2):\n sumX = 0\n sumY = 0\n sumPowX = 0\n sumPowY = 0\n sumXY = 0\n count = 0\n for i in data[name1]:\n if i in data[name2]:\n sumX += data[name1][i]\n sumY += data[name2][i]\n sumPowX += pow(data[name1][i], 2)\n sumPowY += pow(data[name2][i], 2)\n sumXY += data[name1][i] * data[name2][i]\n count += 1\n return (sumXY - sumX * sumY / count) / sqrt((sumPowX - pow(sumX, 2) /\n count) * (sumPowY - pow(sumY, 2) / count))\n\n\nprint('BTS와 유성룡 피어슨 상관계수:', sim_pearson(critics, 'BTS', '유성룡'))\nprint('이황과 조용필 피어슨 상관계수:', sim_pearson(critics, '이황', '조용필'))\n\n\ndef top_match(data, name, index=2, sim_function=sim_pearson):\n li = []\n for i in data:\n if name != i:\n li.append((sim_function(data, name, i), i))\n li.sort()\n li.reverse()\n return li[:index]\n\n\nprint(top_match(critics, 'BTS', 3))\n<mask token>\n\n\ndef getRecommendation(data, person, sim_function=sim_pearson):\n li = []\n score_dic = {}\n sim_dic = {}\n score = 0\n result = top_match(data, person, len(data))\n print('중간:', result)\n for sim, name in result:\n if sim < 0:\n continue\n for movie in data[name]:\n if movie not in data[person]:\n score += sim * data[name][movie]\n score_dic.setdefault(movie, 0)\n score_dic[movie] += score\n sim_dic.setdefault(movie, 0)\n sim_dic[movie] += sim\n score = 0\n for key in score_dic:\n score_dic[key] = score_dic[key] / sim_dic[key]\n li.append((score_dic[key], key))\n li.sort()\n li.reverse()\n return li[0][1]\n\n\nprint('이이님에게는 ', getRecommendation(critics, '이이'), '영화를 가장 추천합니다.')\n",
"step-4": "<mask token>\nimport matplotlib as mpl\nmpl.rcParams['axes.unicode_minus'] = False\nfrom matplotlib import font_manager, rc\nimport matplotlib.pyplot as plt\nfrom math import sqrt\nfont_name = font_manager.FontProperties(fname='c:/Windows/Fonts/malgun.ttf'\n ).get_name()\nrc('font', family=font_name)\ncritics = {'조용필': {'택시운전사': 2.5, '겨울왕국': 3.5, '리빙라스베가스': 3.0, '넘버3': 3.5,\n '사랑과전쟁': 2.5, '세계대전': 3.0}, 'BTS': {'택시운전사': 1.0, '겨울왕국': 4.5,\n '리빙라스베가스': 0.5, '넘버3': 1.5, '사랑과전쟁': 4.5, '세계대전': 5.0}, '강감찬': {'택시운전사':\n 3.0, '겨울왕국': 3.5, '리빙라스베가스': 1.5, '넘버3': 5.0, '세계대전': 3.0, '사랑과전쟁': 3.5\n }, '을지문덕': {'택시운전사': 2.5, '겨울왕국': 3.0, '넘버3': 3.5, '세계대전': 4.0}, '김유신':\n {'겨울왕국': 3.5, '리빙라스베가스': 3.0, '세계대전': 4.5, '넘버3': 4.0, '사랑과전쟁': 2.5},\n '유성룡': {'택시운전사': 3.0, '겨울왕국': 4.0, '리빙라스베가스': 2.0, '넘버3': 3.0, '세계대전': \n 3.5, '사랑과전쟁': 2.0}, '이황': {'택시운전사': 3.0, '겨울왕국': 4.0, '세계대전': 3.0,\n '넘버3': 5.0, '사랑과전쟁': 3.5}, '이이': {'겨울왕국': 4.5, '사랑과전쟁': 1.0, '넘버3': 4.0}}\n\n\ndef drawGraph(data, name1, name2):\n plt.figure(figsize=(14, 8))\n li = []\n li2 = []\n for i in critics[name1]:\n if i in data[name2]:\n li.append(critics[name1][i])\n li2.append(critics[name2][i])\n plt.text(critics[name1][i], critics[name2][i], i)\n plt.plot(li, li2, 'ro')\n plt.axis([0, 6, 0, 6])\n plt.xlabel(name1)\n plt.ylabel(name2)\n\n\ndrawGraph(critics, 'BTS', '유성룡')\ndrawGraph(critics, '이황', '조용필')\n\n\ndef sim_pearson(data, name1, name2):\n sumX = 0\n sumY = 0\n sumPowX = 0\n sumPowY = 0\n sumXY = 0\n count = 0\n for i in data[name1]:\n if i in data[name2]:\n sumX += data[name1][i]\n sumY += data[name2][i]\n sumPowX += pow(data[name1][i], 2)\n sumPowY += pow(data[name2][i], 2)\n sumXY += data[name1][i] * data[name2][i]\n count += 1\n return (sumXY - sumX * sumY / count) / sqrt((sumPowX - pow(sumX, 2) /\n count) * (sumPowY - pow(sumY, 2) / count))\n\n\nprint('BTS와 유성룡 피어슨 상관계수:', sim_pearson(critics, 'BTS', '유성룡'))\nprint('이황과 조용필 피어슨 상관계수:', sim_pearson(critics, '이황', '조용필'))\n\n\ndef top_match(data, name, index=2, sim_function=sim_pearson):\n li = []\n for i in data:\n if name != i:\n li.append((sim_function(data, name, i), i))\n li.sort()\n li.reverse()\n return li[:index]\n\n\nprint(top_match(critics, 'BTS', 3))\n<mask token>\n\n\ndef getRecommendation(data, person, sim_function=sim_pearson):\n li = []\n score_dic = {}\n sim_dic = {}\n score = 0\n result = top_match(data, person, len(data))\n print('중간:', result)\n for sim, name in result:\n if sim < 0:\n continue\n for movie in data[name]:\n if movie not in data[person]:\n score += sim * data[name][movie]\n score_dic.setdefault(movie, 0)\n score_dic[movie] += score\n sim_dic.setdefault(movie, 0)\n sim_dic[movie] += sim\n score = 0\n for key in score_dic:\n score_dic[key] = score_dic[key] / sim_dic[key]\n li.append((score_dic[key], key))\n li.sort()\n li.reverse()\n return li[0][1]\n\n\nprint('이이님에게는 ', getRecommendation(critics, '이이'), '영화를 가장 추천합니다.')\n",
"step-5": "#상관분석\r\n\"\"\"\r\n유클리디안 거리 공식의 한계점: 특정인의 점수가 극단적으로 높거나 낮다면 제대로된 결과를 도출해내기 어렵다.\r\n=>상관분석:두 변수간의 선형적 관계를 분석하겠다는 의미\r\n\"\"\"\r\n#BTS와 유성룡 평점, 이황, 조용필\r\nimport matplotlib as mpl\r\nmpl.rcParams['axes.unicode_minus']=False #한글 깨짐 방지\r\nfrom matplotlib import font_manager, rc\r\nimport matplotlib.pyplot as plt\r\nfrom math import sqrt\r\nfont_name = font_manager.FontProperties(fname='c:/Windows/Fonts/malgun.ttf').get_name()\r\nrc('font',family=font_name)\r\n\r\ncritics = {\r\n '조용필': {\r\n '택시운전사': 2.5,\r\n '겨울왕국': 3.5,\r\n '리빙라스베가스': 3.0,\r\n '넘버3': 3.5,\r\n '사랑과전쟁': 2.5,\r\n '세계대전': 3.0,\r\n },\r\n 'BTS': {\r\n '택시운전사': 1.0,\r\n '겨울왕국': 4.5,\r\n '리빙라스베가스': 0.5,\r\n '넘버3': 1.5,\r\n '사랑과전쟁': 4.5,\r\n '세계대전': 5.0,\r\n },\r\n '강감찬': {\r\n '택시운전사': 3.0,\r\n '겨울왕국': 3.5,\r\n '리빙라스베가스': 1.5,\r\n '넘버3': 5.0,\r\n '세계대전': 3.0,\r\n '사랑과전쟁': 3.5,\r\n },\r\n '을지문덕': {\r\n '택시운전사': 2.5,\r\n '겨울왕국': 3.0,\r\n '넘버3': 3.5,\r\n '세계대전': 4.0,\r\n },\r\n '김유신': {\r\n '겨울왕국': 3.5,\r\n '리빙라스베가스': 3.0,\r\n '세계대전': 4.5,\r\n '넘버3': 4.0,\r\n '사랑과전쟁': 2.5,\r\n },\r\n '유성룡': {\r\n '택시운전사': 3.0,\r\n '겨울왕국': 4.0,\r\n '리빙라스베가스': 2.0,\r\n '넘버3': 3.0,\r\n '세계대전': 3.5,\r\n '사랑과전쟁': 2.0,\r\n },\r\n '이황': {\r\n '택시운전사': 3.0,\r\n '겨울왕국': 4.0,\r\n '세계대전': 3.0,\r\n '넘버3': 5.0,\r\n '사랑과전쟁': 3.5,\r\n },\r\n '이이': {'겨울왕국': 4.5, '사랑과전쟁': 1.0,\r\n '넘버3': 4.0},\r\n}\r\n\r\n\r\ndef drawGraph(data, name1, name2):\r\n plt.figure(figsize=(14,8))\r\n#plot하기 위한 좌표를 저장하는 list 정의\r\n li = []#name1의 평점을 저장\r\n li2 =[]#name2의 평점을 저장\r\n for i in critics[name1]:\r\n if i in data[name2]: #같은 영화에 대한 평점이 있다면\r\n li.append(critics[name1][i])#name1의 i영화에 대한 평점\r\n li2.append(critics[name2][i])\r\n plt.text(critics[name1][i],critics[name2][i],i)\r\n plt.plot(li,li2, 'ro')\r\n plt.axis([0,6,0,6])\r\n plt.xlabel(name1)\r\n plt.ylabel(name2)\r\n # plt.show()\r\n\r\ndrawGraph(critics, 'BTS','유성룡')\r\ndrawGraph(critics, '이황','조용필') #이황과 조용필의 상관계수가 높게 나옴\r\n\r\n## 피어슨 상관계수:x,y의 변화하는 정도를 -1~1 사이로 기술한 통계치, x,y가 함께 변화하는 정도(공분산)/(x가 변하는 정도*y가 변하는 정도)\r\ndef sim_pearson(data, name1, name2):\r\n sumX=0 #x의 합\r\n sumY=0 #y의 합\r\n sumPowX=0 #x제곱의 합\r\n sumPowY=0 #y제곱의 합\r\n sumXY=0 #X*Y의 합\r\n count=0 #영화의개수(n)\r\n\r\n for i in data[name1]:\r\n if i in data[name2]:#BTS와 유성룡이 모두 본 영화\r\n sumX+=data[name1][i]#BTS의 i영화에 대한 평점\r\n sumY+=data[name2][i]#유성룡의 i영화에 대한 평점\r\n sumPowX+=pow(data[name1][i],2)\r\n sumPowY+= pow(data[name2][i],2)\r\n sumXY+=data[name1][i]*data[name2][i]\r\n count+=1\r\n return (sumXY - ((sumX * sumY) / count)) / \\\r\n sqrt((sumPowX - (pow(sumX, 2) / count)) *\r\n (sumPowY - (pow(sumY, 2) / count)))\r\nprint(\"BTS와 유성룡 피어슨 상관계수:\", sim_pearson(critics,'BTS','유성룡'))\r\nprint(\"이황과 조용필 피어슨 상관계수:\",sim_pearson(critics,'이황','조용필'))\r\n\r\n#딕셔너리를 수행하면서 기준(BTS)과 다른 데이터(그외 관객)와의 상관계수->내림차순 정렬\r\ndef top_match(data, name, index=2, sim_function=sim_pearson):\r\n#data:영화평점 딕셔너리, name:기준이 되는 사람의 이름, index:피어슨 상관계수에서 상위(가장 가까운)몇명을 추출\r\n#피어슨 함수 호출\r\n li = []\r\n for i in data: #전체영화를 돌겠다\r\n if name !=i: #BTS, 자신이 아니라면\r\n li.append((sim_function(data, name, i),i))\r\n li.sort()\r\n li.reverse()\r\n return li[:index]\r\n#BTS와 성향이 가장 비슷한 3명 추출\r\nprint(top_match(critics, 'BTS',3))\r\n\r\n#영화를 추천하는 시스템 구성, 예상되는 평점 출력\r\n\"\"\"\r\n*추천 시스템 구성 순서*\r\n1)자신을 제외한 나머지 사람들과의 평점에 대한 유사도를 구함\r\n추측되는 평점 = 유사도*(다른사람의)영화평점\r\n2)추측되는 평점들의 총합을 구함\r\n3)추측되는 평점들의 총합/유사도의 총합 =>모든 사람들을 근거로 했을 때 예상되는 평점이 추출됨\r\n4)아직 안본 영화를 대상으로 예상되는 평점을 구하여, 예상되는 평점이 가장 높은 영화를 추천.\r\n\"\"\"\r\n\r\ndef getRecommendation(data, person, sim_function=sim_pearson):\r\n li=[] #결과를 최종적으로 리턴하는 리스트\r\n score_dic={} #유사도의 총합을 저장하기 위한 딕셔너리\r\n sim_dic={}#평점의 총합을 저장하기 위한 딕셔너리\r\n score = 0\r\n result = top_match(data, person, len(data))\r\n print(\"중간:\",result)\r\n for sim, name in result: #유사도, 이름\r\n if sim<0: continue #음의 상관관계를 갖고 있는 사람들 제외\r\n for movie in data[name]:\r\n # print(name, \"movie:\",movie)\r\n if movie not in data[person]:#다른사람들이 본 영화가 이이가 본영화 목록에 없다면 즉, 이이가 안본영화\r\n score+=sim*data[name][movie] #score변수에 (유사도*이이가 아닌 다름 사람의 영화 평점) 누적\r\n score_dic.setdefault(movie,0) #각 무비에 대한 점수 초기화(딕셔너리)\r\n score_dic[movie]+=score #평점 총합\r\n\r\n #유사도의 누적합\r\n sim_dic.setdefault(movie,0)\r\n sim_dic[movie] +=sim\r\n score = 0\r\n\r\n for key in score_dic:\r\n score_dic[key] = score_dic[key]/sim_dic[key] # 평점들의 총합/유사도의 총합\r\n li.append((score_dic[key], key))\r\n li.sort()\r\n li.reverse()\r\n return li[0][1]\r\n\r\n\r\nprint(\"이이님에게는 \",getRecommendation(critics,'이이'),\"영화를 가장 추천합니다.\")\r\n#기준이 되는 사람(이이)가 안본 영화를 추출, 안본 영화 각각에 대한 예상 평점 추출 => 예상평점이 가장 큰 영화를 추천\r\n\r\n\r\n# movie=\"가나다라\"\r\n# score_dic={}\r\n# score_dic.setdefault(movie,0)\r\n# print(score_dic)\r\n# 출력 결과 ==> {'가나다라': 0}",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# -*- coding: utf-8 -*-
"""
This is the very first A.I. in this series.
The vision is to devlop 'protocol droid' to talk to, to help with tasks, and with whom to play games.
The droid will be able to translate langages and connect ppl.
"""
import speech_recognition as sr
import pyttsx3
import pywhatkit
import datetime
import wikipedia
import dadjokes
listener = sr.Recognizer()
engine = pyttsx3.init()
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
engine.say("hey, My name is 'lisa, human cyborg relations. Please see the console for what I can do for you.")
#engine.say("hey, .")
engine.runAndWait()
print("I can play videos (Lisa, play....),\n teach (Lisa, teach me about...),\n tell you more (Lisa, tell me more about...),\n tell time (Lisa, what time is it),\n and tell jokes (Lisa, tell me a joke...).")
def talk(text):
engine.say("heyo"+text)
engine.runAndWait()
def take_command():
try:
with sr.Microphone() as source:
print('listening....')
voice = listener.listen(source)
command = listener.recognize_google(voice)
command = command.lower()
if 'lisa' in command:
command = command.replace('lisa','')
except:
print("something went wrong")
return command
def run_lisa():
command = take_command()
if 'play' in command:
song = command.replace('play','')
talk('hey playing' + song)
print('playing...'+ song)
pywhatkit.playonyt(song)
elif 'time' in command:
#needs a more natural way of expressing time
#i would like mil time
time = datetime.datetime.now().strftime('%H %M')
talk('Right now it is '+time)
elif "teach me about" in command:
info = command.replace('teach me about','')
teach = wikipedia.summary(info,2)
print(teach)
talk(teach)
elif "tell me more about" in command:
info = command.replace('tell me more about','')
teach = wikipedia.summary(info,6)
print(teach)
talk(teach)
elif "joke" in command:
talk(dadjokes.joke())
elif "good one" in command:
talk("yeah thanks! I'll be here all week folks!")
while True:
run_lisa()
|
normal
|
{
"blob_id": "f5d353694a719472320f4d6fa28bc9d2cc5a69b0",
"index": 951,
"step-1": "<mask token>\n\n\ndef talk(text):\n engine.say('heyo' + text)\n engine.runAndWait()\n\n\ndef take_command():\n try:\n with sr.Microphone() as source:\n print('listening....')\n voice = listener.listen(source)\n command = listener.recognize_google(voice)\n command = command.lower()\n if 'lisa' in command:\n command = command.replace('lisa', '')\n except:\n print('something went wrong')\n return command\n\n\n<mask token>\n",
"step-2": "<mask token>\nengine.setProperty('voice', voices[1].id)\nengine.say(\n \"hey, My name is 'lisa, human cyborg relations. Please see the console for what I can do for you.\"\n )\nengine.runAndWait()\nprint(\n \"\"\"I can play videos (Lisa, play....),\n teach (Lisa, teach me about...),\n tell you more (Lisa, tell me more about...),\n tell time (Lisa, what time is it),\n and tell jokes (Lisa, tell me a joke...).\"\"\"\n )\n\n\ndef talk(text):\n engine.say('heyo' + text)\n engine.runAndWait()\n\n\ndef take_command():\n try:\n with sr.Microphone() as source:\n print('listening....')\n voice = listener.listen(source)\n command = listener.recognize_google(voice)\n command = command.lower()\n if 'lisa' in command:\n command = command.replace('lisa', '')\n except:\n print('something went wrong')\n return command\n\n\ndef run_lisa():\n command = take_command()\n if 'play' in command:\n song = command.replace('play', '')\n talk('hey playing' + song)\n print('playing...' + song)\n pywhatkit.playonyt(song)\n elif 'time' in command:\n time = datetime.datetime.now().strftime('%H %M')\n talk('Right now it is ' + time)\n elif 'teach me about' in command:\n info = command.replace('teach me about', '')\n teach = wikipedia.summary(info, 2)\n print(teach)\n talk(teach)\n elif 'tell me more about' in command:\n info = command.replace('tell me more about', '')\n teach = wikipedia.summary(info, 6)\n print(teach)\n talk(teach)\n elif 'joke' in command:\n talk(dadjokes.joke())\n elif 'good one' in command:\n talk(\"yeah thanks! I'll be here all week folks!\")\n\n\nwhile True:\n run_lisa()\n",
"step-3": "<mask token>\nlistener = sr.Recognizer()\nengine = pyttsx3.init()\nvoices = engine.getProperty('voices')\nengine.setProperty('voice', voices[1].id)\nengine.say(\n \"hey, My name is 'lisa, human cyborg relations. Please see the console for what I can do for you.\"\n )\nengine.runAndWait()\nprint(\n \"\"\"I can play videos (Lisa, play....),\n teach (Lisa, teach me about...),\n tell you more (Lisa, tell me more about...),\n tell time (Lisa, what time is it),\n and tell jokes (Lisa, tell me a joke...).\"\"\"\n )\n\n\ndef talk(text):\n engine.say('heyo' + text)\n engine.runAndWait()\n\n\ndef take_command():\n try:\n with sr.Microphone() as source:\n print('listening....')\n voice = listener.listen(source)\n command = listener.recognize_google(voice)\n command = command.lower()\n if 'lisa' in command:\n command = command.replace('lisa', '')\n except:\n print('something went wrong')\n return command\n\n\ndef run_lisa():\n command = take_command()\n if 'play' in command:\n song = command.replace('play', '')\n talk('hey playing' + song)\n print('playing...' + song)\n pywhatkit.playonyt(song)\n elif 'time' in command:\n time = datetime.datetime.now().strftime('%H %M')\n talk('Right now it is ' + time)\n elif 'teach me about' in command:\n info = command.replace('teach me about', '')\n teach = wikipedia.summary(info, 2)\n print(teach)\n talk(teach)\n elif 'tell me more about' in command:\n info = command.replace('tell me more about', '')\n teach = wikipedia.summary(info, 6)\n print(teach)\n talk(teach)\n elif 'joke' in command:\n talk(dadjokes.joke())\n elif 'good one' in command:\n talk(\"yeah thanks! I'll be here all week folks!\")\n\n\nwhile True:\n run_lisa()\n",
"step-4": "<mask token>\nimport speech_recognition as sr\nimport pyttsx3\nimport pywhatkit\nimport datetime\nimport wikipedia\nimport dadjokes\nlistener = sr.Recognizer()\nengine = pyttsx3.init()\nvoices = engine.getProperty('voices')\nengine.setProperty('voice', voices[1].id)\nengine.say(\n \"hey, My name is 'lisa, human cyborg relations. Please see the console for what I can do for you.\"\n )\nengine.runAndWait()\nprint(\n \"\"\"I can play videos (Lisa, play....),\n teach (Lisa, teach me about...),\n tell you more (Lisa, tell me more about...),\n tell time (Lisa, what time is it),\n and tell jokes (Lisa, tell me a joke...).\"\"\"\n )\n\n\ndef talk(text):\n engine.say('heyo' + text)\n engine.runAndWait()\n\n\ndef take_command():\n try:\n with sr.Microphone() as source:\n print('listening....')\n voice = listener.listen(source)\n command = listener.recognize_google(voice)\n command = command.lower()\n if 'lisa' in command:\n command = command.replace('lisa', '')\n except:\n print('something went wrong')\n return command\n\n\ndef run_lisa():\n command = take_command()\n if 'play' in command:\n song = command.replace('play', '')\n talk('hey playing' + song)\n print('playing...' + song)\n pywhatkit.playonyt(song)\n elif 'time' in command:\n time = datetime.datetime.now().strftime('%H %M')\n talk('Right now it is ' + time)\n elif 'teach me about' in command:\n info = command.replace('teach me about', '')\n teach = wikipedia.summary(info, 2)\n print(teach)\n talk(teach)\n elif 'tell me more about' in command:\n info = command.replace('tell me more about', '')\n teach = wikipedia.summary(info, 6)\n print(teach)\n talk(teach)\n elif 'joke' in command:\n talk(dadjokes.joke())\n elif 'good one' in command:\n talk(\"yeah thanks! I'll be here all week folks!\")\n\n\nwhile True:\n run_lisa()\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nThis is the very first A.I. in this series. \r\nThe vision is to devlop 'protocol droid' to talk to, to help with tasks, and with whom to play games.\r\nThe droid will be able to translate langages and connect ppl.\r\n\r\n\r\n\"\"\"\r\n\r\nimport speech_recognition as sr\r\nimport pyttsx3\r\nimport pywhatkit\r\nimport datetime\r\nimport wikipedia\r\nimport dadjokes\r\n\r\n\r\nlistener = sr.Recognizer()\r\nengine = pyttsx3.init()\r\nvoices = engine.getProperty('voices')\r\nengine.setProperty('voice', voices[1].id)\r\nengine.say(\"hey, My name is 'lisa, human cyborg relations. Please see the console for what I can do for you.\")\r\n#engine.say(\"hey, .\")\r\nengine.runAndWait()\r\nprint(\"I can play videos (Lisa, play....),\\n teach (Lisa, teach me about...),\\n tell you more (Lisa, tell me more about...),\\n tell time (Lisa, what time is it),\\n and tell jokes (Lisa, tell me a joke...).\")\r\n\r\ndef talk(text):\r\n engine.say(\"heyo\"+text)\r\n engine.runAndWait()\r\n\r\n\r\n\r\ndef take_command():\r\n\r\n try:\r\n with sr.Microphone() as source:\r\n print('listening....')\r\n voice = listener.listen(source)\r\n command = listener.recognize_google(voice)\r\n command = command.lower()\r\n if 'lisa' in command:\r\n command = command.replace('lisa','')\r\n \r\n \r\n \r\n \r\n \r\n except:\r\n print(\"something went wrong\")\r\n \r\n \r\n return command\r\ndef run_lisa():\r\n command = take_command()\r\n if 'play' in command:\r\n song = command.replace('play','')\r\n talk('hey playing' + song)\r\n print('playing...'+ song)\r\n pywhatkit.playonyt(song)\r\n elif 'time' in command:\r\n #needs a more natural way of expressing time\r\n #i would like mil time\r\n time = datetime.datetime.now().strftime('%H %M')\r\n talk('Right now it is '+time)\r\n elif \"teach me about\" in command:\r\n info = command.replace('teach me about','')\r\n teach = wikipedia.summary(info,2)\r\n print(teach)\r\n talk(teach)\r\n elif \"tell me more about\" in command:\r\n info = command.replace('tell me more about','')\r\n teach = wikipedia.summary(info,6)\r\n print(teach)\r\n talk(teach)\r\n elif \"joke\" in command:\r\n talk(dadjokes.joke())\r\n elif \"good one\" in command:\r\n talk(\"yeah thanks! I'll be here all week folks!\")\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\nwhile True: \r\n run_lisa()\r\n ",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
#coding=utf-8
from numpy import *
#代码5-1,Logistic回归梯度上升优化算法。
def loadDataSet():
"""解析文件
Return: dataMat 文档列表 [[1,x1,x2]...]; labelMat 类别标签列表[1,0,1...]
@author:VPrincekin
"""
dataMat = []; labelMat= []
fr = open('testSet.txt')
#每行前两个分别是X1和X2,第三个只是数据对应的类别
for line in fr.readlines():
#strip()去除空格
lineArr = line.strip().split()
#为了方便计算,把X0设置为1。
dataMat.append([1.0,float(lineArr[0]),float(lineArr[1])])
labelMat.append(int(lineArr[2]))
return dataMat,labelMat
def sigmoid(inX):
"""sigmoid函数
@author:VPrincekin
"""
return 1/(1+exp(-inX))
def gradAscent(dataMatIn,classLabels):
"""梯度上升算法
Args: dataMatIn 文档矩阵 100*3 的矩阵;classLabels 类别标签列表 1*100向量
Return: weights 回归系数矩阵
@author:VPrincekin
"""
#mat()转换为NumPy矩阵数据类型
dataMatrix = mat(dataMatIn)
#transpose()转置矩阵
labelMat = mat(classLabels).transpose()
#shape()求出矩阵的维度(行,列)
m,n = shape(dataMatrix)
#alpha 向目标移动的步长
alpha = 0.001
#maxCyles 迭代次数
maxCycles = 500
#创建一个n*1的单位矩阵
weights = ones((n,1))
#开始迭代,梯度上升
for k in range(maxCycles):
h = sigmoid(dataMatrix * weights)
error = (labelMat - h)
weights = weights + alpha * dataMatrix.transpose() * error
return weights
######################################################################################
#代码5-2,画出数据集和Logistic回归最佳拟合直线的函数。
def plotBestFit(weights):
"""
Args:weights 回归系数
@author:VPrincekin
"""
import matplotlib.pyplot as plt
#解析文件,生成文档矩阵和类别标签矩阵
dataMat,labelMat = loadDataSet()
dataArr = array(dataMat)
n = shape(dataArr)[0]
xcord1 = []; ycord1 = []
xcord2 = []; ycord2 = []
for i in range(n):
if int(labelMat[i]) == 1:
xcord1.append(dataArr[i,1]); ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1]); ycord2.append(dataArr[i,2])
#开始画图
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord1,ycord1,s=30,c='red',marker='s')
ax.scatter(xcord2,ycord2,s=30,c='green')
x = arange(-3.0,3.0,0.1)
#此处设置了sigmoid函数为0,0是两个分类的分界处。w0x0+w1x1+w2x2=0
y = (-weights[0]-weights[1]*x)/weights[2]
ax.plot(x,y)
plt.xlabel('X1'); plt.ylabel('X2');
plt.show()
##############################################################################################
#代码5-3,随即梯度上升算法
def stocGradAscent0(dataMatrix,classLabels):
"""
Args: dataMatrix 文档列表; classLabels 类别标签列表
Return: weights 回归系数矩阵
@author:VPrincekin
"""
m,n = shape(dataMatrix)
alpha = 0.01
weights = ones(n)
for i in range(m):
#计算每一个样本的函数值
h = sigmoid(sum(dataMatrix[i]*weights))
#计算误差
error = classLabels[i]-h
#向梯度方向更新迭代
weights = weights + alpha*error*dataMatrix[i]
return weights
##############################################################################################
#代码5-4,改进的随即梯度上升算法
def stocGradAscent1(dataMatrix,classLabels,numIter=150):
"""
Args:dataMatrix 文档列表; classLabels 类别标签列表; numIter 迭代次数,如果没有给定,默认迭代150次。
Return:weights 回归系数矩阵
@author:VPrincekin
"""
m,n = shape(dataMatrix)
weights = ones(n)
for j in range(numIter):
dataIndex = range(m)
for i in range(m):
#第一处改进,alpha在每次迭代的时候都会调整,这会缓解数据波动或者高频波动。
alpha = 4/(1.0+i+j)+0.01
#第二处改进,通过随机选取样本来更新回归系数。
#这种方法将减少周期性波动,每次随即从列表中选出一个值,然后从列表中删掉该值。
randIndex=int(random.uniform(0,len(dataIndex)))
h = sigmoid(sum(dataMatrix[randIndex]*weights))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
return weights
########################################################################################################
#代码5-5,Logistic回归分类函数
def classifyVector(inX,weights):
"""测试算法
Args: inX 测试样本; weigths 训练算法得到的回归系数
Return: 返回类别,0或1.
@author:VPrincekin
"""
prob = sigmoid(sum(inX*weights))
if prob>0.5:
return 1.0
else:
return 0.0
def colicTest():
"""测试Logistic回归算法
Args: None
Return: Logistic回归算法错误率
"""
#每个样本有21个特征,一个类别。
frTrain = open('horseColicTraining.txt')
frTest = open('horseColicTest.txt')
trainingSet = []; trainingLabels = []
#开始解析训练文本,通过stocGradAscent1()计算并返回,回归系数向量。
for line in frTrain.readlines():
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
trainingSet.append(lineArr)
trainingLabels.append(float(currLine[21]))
trainWeights = stocGradAscent1(array(trainingSet),trainingLabels,500)
#开始解析测试文本,计算算法的错误率。
errorCount = 0; numTestVec = 0.0
for line in frTest.readlines():
numTestVec += 1.0
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
if int(classifyVector(array(lineArr),trainWeights)) != int(currLine[21]):
errorCount += 1
errorRate = (float(errorCount)/numTestVec)
print('the error rata of this test is : %f' % errorRate)
return errorRate
def multiTest():
"""调用colicTest()多次并求结果的平均值。
@author:VPrincekin
"""
numTests = 10; errorSum = 0.0
for k in range(numTests):
errorSum += colicTest()
print("after %d iterations the average error rate is : %f " %(numTests,errorSum/float(numTests)))
|
normal
|
{
"blob_id": "d47ea763ac1a4981fc5dee67cd396ad49570f923",
"index": 7821,
"step-1": "<mask token>\n\n\ndef loadDataSet():\n \"\"\"解析文件\n Return: dataMat 文档列表 [[1,x1,x2]...]; labelMat 类别标签列表[1,0,1...]\n @author:VPrincekin\n \"\"\"\n dataMat = []\n labelMat = []\n fr = open('testSet.txt')\n for line in fr.readlines():\n lineArr = line.strip().split()\n dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])\n labelMat.append(int(lineArr[2]))\n return dataMat, labelMat\n\n\ndef sigmoid(inX):\n \"\"\"sigmoid函数\n @author:VPrincekin\n \"\"\"\n return 1 / (1 + exp(-inX))\n\n\n<mask token>\n\n\ndef stocGradAscent1(dataMatrix, classLabels, numIter=150):\n \"\"\"\n Args:dataMatrix 文档列表; classLabels 类别标签列表; numIter 迭代次数,如果没有给定,默认迭代150次。\n Return:weights 回归系数矩阵\n @author:VPrincekin\n \"\"\"\n m, n = shape(dataMatrix)\n weights = ones(n)\n for j in range(numIter):\n dataIndex = range(m)\n for i in range(m):\n alpha = 4 / (1.0 + i + j) + 0.01\n randIndex = int(random.uniform(0, len(dataIndex)))\n h = sigmoid(sum(dataMatrix[randIndex] * weights))\n error = classLabels[randIndex] - h\n weights = weights + alpha * error * dataMatrix[randIndex]\n return weights\n\n\n<mask token>\n\n\ndef colicTest():\n \"\"\"测试Logistic回归算法\n Args: None\n Return: Logistic回归算法错误率\n \n \"\"\"\n frTrain = open('horseColicTraining.txt')\n frTest = open('horseColicTest.txt')\n trainingSet = []\n trainingLabels = []\n for line in frTrain.readlines():\n currLine = line.strip().split('\\t')\n lineArr = []\n for i in range(21):\n lineArr.append(float(currLine[i]))\n trainingSet.append(lineArr)\n trainingLabels.append(float(currLine[21]))\n trainWeights = stocGradAscent1(array(trainingSet), trainingLabels, 500)\n errorCount = 0\n numTestVec = 0.0\n for line in frTest.readlines():\n numTestVec += 1.0\n currLine = line.strip().split('\\t')\n lineArr = []\n for i in range(21):\n lineArr.append(float(currLine[i]))\n if int(classifyVector(array(lineArr), trainWeights)) != int(currLine\n [21]):\n errorCount += 1\n errorRate = float(errorCount) / numTestVec\n print('the error rata of this test is : %f' % errorRate)\n return errorRate\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef loadDataSet():\n \"\"\"解析文件\n Return: dataMat 文档列表 [[1,x1,x2]...]; labelMat 类别标签列表[1,0,1...]\n @author:VPrincekin\n \"\"\"\n dataMat = []\n labelMat = []\n fr = open('testSet.txt')\n for line in fr.readlines():\n lineArr = line.strip().split()\n dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])\n labelMat.append(int(lineArr[2]))\n return dataMat, labelMat\n\n\ndef sigmoid(inX):\n \"\"\"sigmoid函数\n @author:VPrincekin\n \"\"\"\n return 1 / (1 + exp(-inX))\n\n\n<mask token>\n\n\ndef stocGradAscent1(dataMatrix, classLabels, numIter=150):\n \"\"\"\n Args:dataMatrix 文档列表; classLabels 类别标签列表; numIter 迭代次数,如果没有给定,默认迭代150次。\n Return:weights 回归系数矩阵\n @author:VPrincekin\n \"\"\"\n m, n = shape(dataMatrix)\n weights = ones(n)\n for j in range(numIter):\n dataIndex = range(m)\n for i in range(m):\n alpha = 4 / (1.0 + i + j) + 0.01\n randIndex = int(random.uniform(0, len(dataIndex)))\n h = sigmoid(sum(dataMatrix[randIndex] * weights))\n error = classLabels[randIndex] - h\n weights = weights + alpha * error * dataMatrix[randIndex]\n return weights\n\n\ndef classifyVector(inX, weights):\n \"\"\"测试算法\n Args: inX 测试样本; weigths 训练算法得到的回归系数\n Return: 返回类别,0或1.\n @author:VPrincekin\n \"\"\"\n prob = sigmoid(sum(inX * weights))\n if prob > 0.5:\n return 1.0\n else:\n return 0.0\n\n\ndef colicTest():\n \"\"\"测试Logistic回归算法\n Args: None\n Return: Logistic回归算法错误率\n \n \"\"\"\n frTrain = open('horseColicTraining.txt')\n frTest = open('horseColicTest.txt')\n trainingSet = []\n trainingLabels = []\n for line in frTrain.readlines():\n currLine = line.strip().split('\\t')\n lineArr = []\n for i in range(21):\n lineArr.append(float(currLine[i]))\n trainingSet.append(lineArr)\n trainingLabels.append(float(currLine[21]))\n trainWeights = stocGradAscent1(array(trainingSet), trainingLabels, 500)\n errorCount = 0\n numTestVec = 0.0\n for line in frTest.readlines():\n numTestVec += 1.0\n currLine = line.strip().split('\\t')\n lineArr = []\n for i in range(21):\n lineArr.append(float(currLine[i]))\n if int(classifyVector(array(lineArr), trainWeights)) != int(currLine\n [21]):\n errorCount += 1\n errorRate = float(errorCount) / numTestVec\n print('the error rata of this test is : %f' % errorRate)\n return errorRate\n\n\ndef multiTest():\n \"\"\"调用colicTest()多次并求结果的平均值。\n @author:VPrincekin\n \"\"\"\n numTests = 10\n errorSum = 0.0\n for k in range(numTests):\n errorSum += colicTest()\n print('after %d iterations the average error rate is : %f ' % (numTests,\n errorSum / float(numTests)))\n",
"step-3": "<mask token>\n\n\ndef loadDataSet():\n \"\"\"解析文件\n Return: dataMat 文档列表 [[1,x1,x2]...]; labelMat 类别标签列表[1,0,1...]\n @author:VPrincekin\n \"\"\"\n dataMat = []\n labelMat = []\n fr = open('testSet.txt')\n for line in fr.readlines():\n lineArr = line.strip().split()\n dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])\n labelMat.append(int(lineArr[2]))\n return dataMat, labelMat\n\n\ndef sigmoid(inX):\n \"\"\"sigmoid函数\n @author:VPrincekin\n \"\"\"\n return 1 / (1 + exp(-inX))\n\n\ndef gradAscent(dataMatIn, classLabels):\n \"\"\"梯度上升算法\n Args: dataMatIn 文档矩阵 100*3 的矩阵;classLabels 类别标签列表 1*100向量\n Return: weights 回归系数矩阵\n @author:VPrincekin\n \"\"\"\n dataMatrix = mat(dataMatIn)\n labelMat = mat(classLabels).transpose()\n m, n = shape(dataMatrix)\n alpha = 0.001\n maxCycles = 500\n weights = ones((n, 1))\n for k in range(maxCycles):\n h = sigmoid(dataMatrix * weights)\n error = labelMat - h\n weights = weights + alpha * dataMatrix.transpose() * error\n return weights\n\n\n<mask token>\n\n\ndef stocGradAscent0(dataMatrix, classLabels):\n \"\"\"\n Args: dataMatrix 文档列表; classLabels 类别标签列表\n Return: weights 回归系数矩阵\n @author:VPrincekin\n \"\"\"\n m, n = shape(dataMatrix)\n alpha = 0.01\n weights = ones(n)\n for i in range(m):\n h = sigmoid(sum(dataMatrix[i] * weights))\n error = classLabels[i] - h\n weights = weights + alpha * error * dataMatrix[i]\n return weights\n\n\ndef stocGradAscent1(dataMatrix, classLabels, numIter=150):\n \"\"\"\n Args:dataMatrix 文档列表; classLabels 类别标签列表; numIter 迭代次数,如果没有给定,默认迭代150次。\n Return:weights 回归系数矩阵\n @author:VPrincekin\n \"\"\"\n m, n = shape(dataMatrix)\n weights = ones(n)\n for j in range(numIter):\n dataIndex = range(m)\n for i in range(m):\n alpha = 4 / (1.0 + i + j) + 0.01\n randIndex = int(random.uniform(0, len(dataIndex)))\n h = sigmoid(sum(dataMatrix[randIndex] * weights))\n error = classLabels[randIndex] - h\n weights = weights + alpha * error * dataMatrix[randIndex]\n return weights\n\n\ndef classifyVector(inX, weights):\n \"\"\"测试算法\n Args: inX 测试样本; weigths 训练算法得到的回归系数\n Return: 返回类别,0或1.\n @author:VPrincekin\n \"\"\"\n prob = sigmoid(sum(inX * weights))\n if prob > 0.5:\n return 1.0\n else:\n return 0.0\n\n\ndef colicTest():\n \"\"\"测试Logistic回归算法\n Args: None\n Return: Logistic回归算法错误率\n \n \"\"\"\n frTrain = open('horseColicTraining.txt')\n frTest = open('horseColicTest.txt')\n trainingSet = []\n trainingLabels = []\n for line in frTrain.readlines():\n currLine = line.strip().split('\\t')\n lineArr = []\n for i in range(21):\n lineArr.append(float(currLine[i]))\n trainingSet.append(lineArr)\n trainingLabels.append(float(currLine[21]))\n trainWeights = stocGradAscent1(array(trainingSet), trainingLabels, 500)\n errorCount = 0\n numTestVec = 0.0\n for line in frTest.readlines():\n numTestVec += 1.0\n currLine = line.strip().split('\\t')\n lineArr = []\n for i in range(21):\n lineArr.append(float(currLine[i]))\n if int(classifyVector(array(lineArr), trainWeights)) != int(currLine\n [21]):\n errorCount += 1\n errorRate = float(errorCount) / numTestVec\n print('the error rata of this test is : %f' % errorRate)\n return errorRate\n\n\ndef multiTest():\n \"\"\"调用colicTest()多次并求结果的平均值。\n @author:VPrincekin\n \"\"\"\n numTests = 10\n errorSum = 0.0\n for k in range(numTests):\n errorSum += colicTest()\n print('after %d iterations the average error rate is : %f ' % (numTests,\n errorSum / float(numTests)))\n",
"step-4": "<mask token>\n\n\ndef loadDataSet():\n \"\"\"解析文件\n Return: dataMat 文档列表 [[1,x1,x2]...]; labelMat 类别标签列表[1,0,1...]\n @author:VPrincekin\n \"\"\"\n dataMat = []\n labelMat = []\n fr = open('testSet.txt')\n for line in fr.readlines():\n lineArr = line.strip().split()\n dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])\n labelMat.append(int(lineArr[2]))\n return dataMat, labelMat\n\n\ndef sigmoid(inX):\n \"\"\"sigmoid函数\n @author:VPrincekin\n \"\"\"\n return 1 / (1 + exp(-inX))\n\n\ndef gradAscent(dataMatIn, classLabels):\n \"\"\"梯度上升算法\n Args: dataMatIn 文档矩阵 100*3 的矩阵;classLabels 类别标签列表 1*100向量\n Return: weights 回归系数矩阵\n @author:VPrincekin\n \"\"\"\n dataMatrix = mat(dataMatIn)\n labelMat = mat(classLabels).transpose()\n m, n = shape(dataMatrix)\n alpha = 0.001\n maxCycles = 500\n weights = ones((n, 1))\n for k in range(maxCycles):\n h = sigmoid(dataMatrix * weights)\n error = labelMat - h\n weights = weights + alpha * dataMatrix.transpose() * error\n return weights\n\n\ndef plotBestFit(weights):\n \"\"\"\n Args:weights 回归系数\n @author:VPrincekin\n \"\"\"\n import matplotlib.pyplot as plt\n dataMat, labelMat = loadDataSet()\n dataArr = array(dataMat)\n n = shape(dataArr)[0]\n xcord1 = []\n ycord1 = []\n xcord2 = []\n ycord2 = []\n for i in range(n):\n if int(labelMat[i]) == 1:\n xcord1.append(dataArr[i, 1])\n ycord1.append(dataArr[i, 2])\n else:\n xcord2.append(dataArr[i, 1])\n ycord2.append(dataArr[i, 2])\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')\n ax.scatter(xcord2, ycord2, s=30, c='green')\n x = arange(-3.0, 3.0, 0.1)\n y = (-weights[0] - weights[1] * x) / weights[2]\n ax.plot(x, y)\n plt.xlabel('X1')\n plt.ylabel('X2')\n plt.show()\n\n\ndef stocGradAscent0(dataMatrix, classLabels):\n \"\"\"\n Args: dataMatrix 文档列表; classLabels 类别标签列表\n Return: weights 回归系数矩阵\n @author:VPrincekin\n \"\"\"\n m, n = shape(dataMatrix)\n alpha = 0.01\n weights = ones(n)\n for i in range(m):\n h = sigmoid(sum(dataMatrix[i] * weights))\n error = classLabels[i] - h\n weights = weights + alpha * error * dataMatrix[i]\n return weights\n\n\ndef stocGradAscent1(dataMatrix, classLabels, numIter=150):\n \"\"\"\n Args:dataMatrix 文档列表; classLabels 类别标签列表; numIter 迭代次数,如果没有给定,默认迭代150次。\n Return:weights 回归系数矩阵\n @author:VPrincekin\n \"\"\"\n m, n = shape(dataMatrix)\n weights = ones(n)\n for j in range(numIter):\n dataIndex = range(m)\n for i in range(m):\n alpha = 4 / (1.0 + i + j) + 0.01\n randIndex = int(random.uniform(0, len(dataIndex)))\n h = sigmoid(sum(dataMatrix[randIndex] * weights))\n error = classLabels[randIndex] - h\n weights = weights + alpha * error * dataMatrix[randIndex]\n return weights\n\n\ndef classifyVector(inX, weights):\n \"\"\"测试算法\n Args: inX 测试样本; weigths 训练算法得到的回归系数\n Return: 返回类别,0或1.\n @author:VPrincekin\n \"\"\"\n prob = sigmoid(sum(inX * weights))\n if prob > 0.5:\n return 1.0\n else:\n return 0.0\n\n\ndef colicTest():\n \"\"\"测试Logistic回归算法\n Args: None\n Return: Logistic回归算法错误率\n \n \"\"\"\n frTrain = open('horseColicTraining.txt')\n frTest = open('horseColicTest.txt')\n trainingSet = []\n trainingLabels = []\n for line in frTrain.readlines():\n currLine = line.strip().split('\\t')\n lineArr = []\n for i in range(21):\n lineArr.append(float(currLine[i]))\n trainingSet.append(lineArr)\n trainingLabels.append(float(currLine[21]))\n trainWeights = stocGradAscent1(array(trainingSet), trainingLabels, 500)\n errorCount = 0\n numTestVec = 0.0\n for line in frTest.readlines():\n numTestVec += 1.0\n currLine = line.strip().split('\\t')\n lineArr = []\n for i in range(21):\n lineArr.append(float(currLine[i]))\n if int(classifyVector(array(lineArr), trainWeights)) != int(currLine\n [21]):\n errorCount += 1\n errorRate = float(errorCount) / numTestVec\n print('the error rata of this test is : %f' % errorRate)\n return errorRate\n\n\ndef multiTest():\n \"\"\"调用colicTest()多次并求结果的平均值。\n @author:VPrincekin\n \"\"\"\n numTests = 10\n errorSum = 0.0\n for k in range(numTests):\n errorSum += colicTest()\n print('after %d iterations the average error rate is : %f ' % (numTests,\n errorSum / float(numTests)))\n",
"step-5": "#coding=utf-8\nfrom numpy import *\n\n#代码5-1,Logistic回归梯度上升优化算法。\ndef loadDataSet():\n \"\"\"解析文件\n Return: dataMat 文档列表 [[1,x1,x2]...]; labelMat 类别标签列表[1,0,1...]\n @author:VPrincekin\n \"\"\"\n dataMat = []; labelMat= []\n fr = open('testSet.txt')\n #每行前两个分别是X1和X2,第三个只是数据对应的类别\n for line in fr.readlines():\n #strip()去除空格\n lineArr = line.strip().split()\n #为了方便计算,把X0设置为1。\n dataMat.append([1.0,float(lineArr[0]),float(lineArr[1])])\n labelMat.append(int(lineArr[2]))\n return dataMat,labelMat\n\ndef sigmoid(inX):\n \"\"\"sigmoid函数\n @author:VPrincekin\n \"\"\"\n return 1/(1+exp(-inX))\n\ndef gradAscent(dataMatIn,classLabels):\n \"\"\"梯度上升算法\n Args: dataMatIn 文档矩阵 100*3 的矩阵;classLabels 类别标签列表 1*100向量\n Return: weights 回归系数矩阵\n @author:VPrincekin\n \"\"\" \n #mat()转换为NumPy矩阵数据类型\n dataMatrix = mat(dataMatIn)\n #transpose()转置矩阵\n labelMat = mat(classLabels).transpose()\n #shape()求出矩阵的维度(行,列)\n m,n = shape(dataMatrix)\n #alpha 向目标移动的步长\n alpha = 0.001\n #maxCyles 迭代次数\n maxCycles = 500\n #创建一个n*1的单位矩阵\n weights = ones((n,1))\n #开始迭代,梯度上升\n for k in range(maxCycles):\n h = sigmoid(dataMatrix * weights)\n error = (labelMat - h)\n weights = weights + alpha * dataMatrix.transpose() * error\n return weights\n \n######################################################################################\n\n#代码5-2,画出数据集和Logistic回归最佳拟合直线的函数。\ndef plotBestFit(weights):\n \"\"\"\n Args:weights 回归系数\n @author:VPrincekin\n \"\"\"\n import matplotlib.pyplot as plt\n #解析文件,生成文档矩阵和类别标签矩阵\n dataMat,labelMat = loadDataSet()\n dataArr = array(dataMat)\n n = shape(dataArr)[0]\n xcord1 = []; ycord1 = []\n xcord2 = []; ycord2 = []\n for i in range(n):\n if int(labelMat[i]) == 1:\n xcord1.append(dataArr[i,1]); ycord1.append(dataArr[i,2])\n else:\n xcord2.append(dataArr[i,1]); ycord2.append(dataArr[i,2])\n #开始画图\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(xcord1,ycord1,s=30,c='red',marker='s')\n ax.scatter(xcord2,ycord2,s=30,c='green')\n x = arange(-3.0,3.0,0.1)\n #此处设置了sigmoid函数为0,0是两个分类的分界处。w0x0+w1x1+w2x2=0\n y = (-weights[0]-weights[1]*x)/weights[2]\n ax.plot(x,y)\n plt.xlabel('X1'); plt.ylabel('X2');\n plt.show()\n \n##############################################################################################\n\n#代码5-3,随即梯度上升算法\ndef stocGradAscent0(dataMatrix,classLabels):\n \"\"\"\n Args: dataMatrix 文档列表; classLabels 类别标签列表\n Return: weights 回归系数矩阵\n @author:VPrincekin\n \"\"\"\n m,n = shape(dataMatrix)\n alpha = 0.01\n weights = ones(n)\n for i in range(m):\n #计算每一个样本的函数值\n h = sigmoid(sum(dataMatrix[i]*weights))\n #计算误差\n error = classLabels[i]-h\n #向梯度方向更新迭代\n weights = weights + alpha*error*dataMatrix[i]\n return weights\n\n##############################################################################################\n\n#代码5-4,改进的随即梯度上升算法\ndef stocGradAscent1(dataMatrix,classLabels,numIter=150):\n \"\"\"\n Args:dataMatrix 文档列表; classLabels 类别标签列表; numIter 迭代次数,如果没有给定,默认迭代150次。\n Return:weights 回归系数矩阵\n @author:VPrincekin\n \"\"\"\n m,n = shape(dataMatrix)\n weights = ones(n)\n for j in range(numIter): \n dataIndex = range(m)\n for i in range(m):\n #第一处改进,alpha在每次迭代的时候都会调整,这会缓解数据波动或者高频波动。\n alpha = 4/(1.0+i+j)+0.01\n #第二处改进,通过随机选取样本来更新回归系数。\n #这种方法将减少周期性波动,每次随即从列表中选出一个值,然后从列表中删掉该值。\n randIndex=int(random.uniform(0,len(dataIndex)))\n h = sigmoid(sum(dataMatrix[randIndex]*weights))\n error = classLabels[randIndex] - h\n weights = weights + alpha * error * dataMatrix[randIndex]\n return weights\n \n########################################################################################################\n \n#代码5-5,Logistic回归分类函数\ndef classifyVector(inX,weights):\n \"\"\"测试算法\n Args: inX 测试样本; weigths 训练算法得到的回归系数\n Return: 返回类别,0或1.\n @author:VPrincekin\n \"\"\"\n prob = sigmoid(sum(inX*weights))\n if prob>0.5:\n return 1.0\n else:\n return 0.0\n \ndef colicTest():\n \"\"\"测试Logistic回归算法\n Args: None\n Return: Logistic回归算法错误率\n \n \"\"\"\n #每个样本有21个特征,一个类别。\n frTrain = open('horseColicTraining.txt')\n frTest = open('horseColicTest.txt')\n trainingSet = []; trainingLabels = []\n #开始解析训练文本,通过stocGradAscent1()计算并返回,回归系数向量。\n for line in frTrain.readlines():\n currLine = line.strip().split('\\t')\n lineArr = []\n for i in range(21):\n lineArr.append(float(currLine[i]))\n trainingSet.append(lineArr)\n trainingLabels.append(float(currLine[21]))\n trainWeights = stocGradAscent1(array(trainingSet),trainingLabels,500)\n #开始解析测试文本,计算算法的错误率。\n errorCount = 0; numTestVec = 0.0\n for line in frTest.readlines():\n numTestVec += 1.0\n currLine = line.strip().split('\\t')\n lineArr = []\n for i in range(21):\n lineArr.append(float(currLine[i]))\n if int(classifyVector(array(lineArr),trainWeights)) != int(currLine[21]):\n errorCount += 1\n errorRate = (float(errorCount)/numTestVec)\n print('the error rata of this test is : %f' % errorRate)\n return errorRate\n\ndef multiTest():\n \"\"\"调用colicTest()多次并求结果的平均值。\n @author:VPrincekin\n \"\"\"\n numTests = 10; errorSum = 0.0 \n for k in range(numTests):\n errorSum += colicTest()\n print(\"after %d iterations the average error rate is : %f \" %(numTests,errorSum/float(numTests)))\n\n",
"step-ids": [
4,
6,
8,
9,
11
]
}
|
[
4,
6,
8,
9,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def to_path(key):
if isinstance(key, tuple):
if len(key) == 1:
key = key[0]
else:
raise ValueError(error_msg)
if '/' in key or '\\' in key or os.path.sep in key:
raise ValueError(error_msg)
return key,
def from_path(path):
if len(path) != 1:
raise ValueError(error_msg)
return path[0]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
error_msg = """The default transformer cannot handle slashes (subdirectories);
try another transformer in vlermv.transformers."""
def to_path(key):
if isinstance(key, tuple):
if len(key) == 1:
key = key[0]
else:
raise ValueError(error_msg)
if '/' in key or '\\' in key or os.path.sep in key:
raise ValueError(error_msg)
return key,
def from_path(path):
if len(path) != 1:
raise ValueError(error_msg)
return path[0]
<|reserved_special_token_1|>
import os
error_msg = """The default transformer cannot handle slashes (subdirectories);
try another transformer in vlermv.transformers."""
def to_path(key):
if isinstance(key, tuple):
if len(key) == 1:
key = key[0]
else:
raise ValueError(error_msg)
if '/' in key or '\\' in key or os.path.sep in key:
raise ValueError(error_msg)
return key,
def from_path(path):
if len(path) != 1:
raise ValueError(error_msg)
return path[0]
<|reserved_special_token_1|>
import os
error_msg = '''The default transformer cannot handle slashes (subdirectories);
try another transformer in vlermv.transformers.'''
def to_path(key):
if isinstance(key, tuple):
if len(key) == 1:
key = key[0]
else:
raise ValueError(error_msg)
if '/' in key or '\\' in key or os.path.sep in key:
raise ValueError(error_msg)
return (key,)
def from_path(path):
if len(path) != 1:
raise ValueError(error_msg)
return path[0]
|
flexible
|
{
"blob_id": "e4ff6d689a7da5b16786fd59d6a4707b9b6e3e7d",
"index": 8076,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef to_path(key):\n if isinstance(key, tuple):\n if len(key) == 1:\n key = key[0]\n else:\n raise ValueError(error_msg)\n if '/' in key or '\\\\' in key or os.path.sep in key:\n raise ValueError(error_msg)\n return key,\n\n\ndef from_path(path):\n if len(path) != 1:\n raise ValueError(error_msg)\n return path[0]\n",
"step-3": "<mask token>\nerror_msg = \"\"\"The default transformer cannot handle slashes (subdirectories);\ntry another transformer in vlermv.transformers.\"\"\"\n\n\ndef to_path(key):\n if isinstance(key, tuple):\n if len(key) == 1:\n key = key[0]\n else:\n raise ValueError(error_msg)\n if '/' in key or '\\\\' in key or os.path.sep in key:\n raise ValueError(error_msg)\n return key,\n\n\ndef from_path(path):\n if len(path) != 1:\n raise ValueError(error_msg)\n return path[0]\n",
"step-4": "import os\nerror_msg = \"\"\"The default transformer cannot handle slashes (subdirectories);\ntry another transformer in vlermv.transformers.\"\"\"\n\n\ndef to_path(key):\n if isinstance(key, tuple):\n if len(key) == 1:\n key = key[0]\n else:\n raise ValueError(error_msg)\n if '/' in key or '\\\\' in key or os.path.sep in key:\n raise ValueError(error_msg)\n return key,\n\n\ndef from_path(path):\n if len(path) != 1:\n raise ValueError(error_msg)\n return path[0]\n",
"step-5": "import os\n\nerror_msg = '''The default transformer cannot handle slashes (subdirectories);\ntry another transformer in vlermv.transformers.'''\n\ndef to_path(key):\n if isinstance(key, tuple):\n if len(key) == 1:\n key = key[0]\n else:\n raise ValueError(error_msg)\n\n if '/' in key or '\\\\' in key or os.path.sep in key:\n raise ValueError(error_msg)\n\n return (key,)\n\ndef from_path(path):\n if len(path) != 1:\n raise ValueError(error_msg)\n return path[0]\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
# Generated by Django 2.0.5 on 2018-07-12 11:08
import assessment.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('assessment', '0006_auto_20180712_1428'),
]
operations = [
migrations.AlterModelManagers(
name='season',
managers=[
('objects', assessment.models.SeasonManager()),
],
),
migrations.AlterField(
model_name='punishmentreward',
name='method',
field=models.TextField(verbose_name='روش'),
),
migrations.AlterField(
model_name='scaleanswer',
name='carried_on',
field=models.BooleanField(default=False, verbose_name='انجام\u200cشده'),
),
migrations.AlterField(
model_name='scaleanswer',
name='qualitativeAnswer',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='پاسخ کیفی'),
),
migrations.AlterField(
model_name='scaleanswer',
name='quantitativeAnswer',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='پاسخ کمی'),
),
]
|
normal
|
{
"blob_id": "adff75857a1de24267e771c599e4d89486a6ad32",
"index": 7439,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('assessment', '0006_auto_20180712_1428')]\n operations = [migrations.AlterModelManagers(name='season', managers=[(\n 'objects', assessment.models.SeasonManager())]), migrations.\n AlterField(model_name='punishmentreward', name='method', field=\n models.TextField(verbose_name='روش')), migrations.AlterField(\n model_name='scaleanswer', name='carried_on', field=models.\n BooleanField(default=False, verbose_name='انجام\\u200cشده')),\n migrations.AlterField(model_name='scaleanswer', name=\n 'qualitativeAnswer', field=models.CharField(blank=True, max_length=\n 100, null=True, verbose_name='پاسخ کیفی')), migrations.AlterField(\n model_name='scaleanswer', name='quantitativeAnswer', field=models.\n CharField(blank=True, max_length=100, null=True, verbose_name=\n 'پاسخ کمی'))]\n",
"step-4": "import assessment.models\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('assessment', '0006_auto_20180712_1428')]\n operations = [migrations.AlterModelManagers(name='season', managers=[(\n 'objects', assessment.models.SeasonManager())]), migrations.\n AlterField(model_name='punishmentreward', name='method', field=\n models.TextField(verbose_name='روش')), migrations.AlterField(\n model_name='scaleanswer', name='carried_on', field=models.\n BooleanField(default=False, verbose_name='انجام\\u200cشده')),\n migrations.AlterField(model_name='scaleanswer', name=\n 'qualitativeAnswer', field=models.CharField(blank=True, max_length=\n 100, null=True, verbose_name='پاسخ کیفی')), migrations.AlterField(\n model_name='scaleanswer', name='quantitativeAnswer', field=models.\n CharField(blank=True, max_length=100, null=True, verbose_name=\n 'پاسخ کمی'))]\n",
"step-5": "# Generated by Django 2.0.5 on 2018-07-12 11:08\n\nimport assessment.models\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('assessment', '0006_auto_20180712_1428'),\n ]\n\n operations = [\n migrations.AlterModelManagers(\n name='season',\n managers=[\n ('objects', assessment.models.SeasonManager()),\n ],\n ),\n migrations.AlterField(\n model_name='punishmentreward',\n name='method',\n field=models.TextField(verbose_name='روش'),\n ),\n migrations.AlterField(\n model_name='scaleanswer',\n name='carried_on',\n field=models.BooleanField(default=False, verbose_name='انجام\\u200cشده'),\n ),\n migrations.AlterField(\n model_name='scaleanswer',\n name='qualitativeAnswer',\n field=models.CharField(blank=True, max_length=100, null=True, verbose_name='پاسخ کیفی'),\n ),\n migrations.AlterField(\n model_name='scaleanswer',\n name='quantitativeAnswer',\n field=models.CharField(blank=True, max_length=100, null=True, verbose_name='پاسخ کمی'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('', admin.site.urls),
path('upload/', include('links.urls')),
]
|
normal
|
{
"blob_id": "45e8bdacad4ed293f7267d96abc9cbe8c8e192ae",
"index": 4148,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('', admin.site.urls), path('upload/', include(\n 'links.urls'))]\n",
"step-3": "from django.contrib import admin\nfrom django.urls import include, path\nurlpatterns = [path('', admin.site.urls), path('upload/', include(\n 'links.urls'))]\n",
"step-4": "from django.contrib import admin\nfrom django.urls import include, path\n\nurlpatterns = [\n path('', admin.site.urls),\n path('upload/', include('links.urls')),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if number <= 100:
bonus = 5
total_point = number + bonus
elif number > 1000:
bonus = 0.1 * number
total_point = number + bonus
else:
bonus = 0.2 * number
total_point = number + bonus
if number % 2 == 0:
bonus = bonus + 1
total_point = number + bonus
print(bonus)
print(total_point)
elif number % 10 == 5:
bonus = bonus + 2
total_point = number + bonus
print(bonus)
print(total_point)
<|reserved_special_token_1|>
number = int(input())
bonus = 0
if number <= 100:
bonus = 5
total_point = number + bonus
elif number > 1000:
bonus = 0.1 * number
total_point = number + bonus
else:
bonus = 0.2 * number
total_point = number + bonus
if number % 2 == 0:
bonus = bonus + 1
total_point = number + bonus
print(bonus)
print(total_point)
elif number % 10 == 5:
bonus = bonus + 2
total_point = number + bonus
print(bonus)
print(total_point)
|
flexible
|
{
"blob_id": "7ee3301b55d323d156bd394f8525e37502d19430",
"index": 7669,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif number <= 100:\n bonus = 5\n total_point = number + bonus\nelif number > 1000:\n bonus = 0.1 * number\n total_point = number + bonus\nelse:\n bonus = 0.2 * number\n total_point = number + bonus\nif number % 2 == 0:\n bonus = bonus + 1\n total_point = number + bonus\n print(bonus)\n print(total_point)\nelif number % 10 == 5:\n bonus = bonus + 2\n total_point = number + bonus\n print(bonus)\n print(total_point)\n",
"step-3": "number = int(input())\nbonus = 0\nif number <= 100:\n bonus = 5\n total_point = number + bonus\nelif number > 1000:\n bonus = 0.1 * number\n total_point = number + bonus\nelse:\n bonus = 0.2 * number\n total_point = number + bonus\nif number % 2 == 0:\n bonus = bonus + 1\n total_point = number + bonus\n print(bonus)\n print(total_point)\nelif number % 10 == 5:\n bonus = bonus + 2\n total_point = number + bonus\n print(bonus)\n print(total_point)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def worker():
display_subtitle = ''
while True:
item = q.get()
image = np.zeros((480, 640))
if item is not None:
vertices = item
show_img = plot_vertices(np.zeros_like(image), vertices)
else:
show_img = image
if not subtitles.empty():
text = subtitles.get()
subtitles.task_done()
display_subtitle = text
cv2.putText(show_img, display_subtitle, bottomLeftCornerOfText,
font, fontScale, fontColor, lineType)
cv2.imshow('frame', show_img)
if cv2.waitKey(1) & 255 == ord('q'):
break
q.task_done()
class FakeServer(prediction_service_pb2_grpc.PredictionServiceServicer):
def Predict(self, request, context):
"""Predict -- provides access to loaded TensorFlow model.
"""
global q
global stream
if 'vertices' in request.inputs:
print('vertices')
vertices = tensor_util.MakeNdarray(request.inputs['vertices'])
q.put(vertices)
elif 'audio' in request.inputs:
print('audio')
print(type(request.inputs['audio'].string_val[0]))
audio = request.inputs['audio'].string_val[0]
stream.write(audio)
elif 'subtitle' in request.inputs:
print('subtitle')
subtitles.put(request.inputs['subtitle'].string_val[0])
dumbresult = predict_pb2.PredictResponse()
dumbresult.outputs['message'].CopyFrom(tf.make_tensor_proto('OK'))
return dumbresult
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append(
'/home/yitao/Documents/fun-project/tensorflow-related/miniature-winner/')
<|reserved_special_token_0|>
def worker():
display_subtitle = ''
while True:
item = q.get()
image = np.zeros((480, 640))
if item is not None:
vertices = item
show_img = plot_vertices(np.zeros_like(image), vertices)
else:
show_img = image
if not subtitles.empty():
text = subtitles.get()
subtitles.task_done()
display_subtitle = text
cv2.putText(show_img, display_subtitle, bottomLeftCornerOfText,
font, fontScale, fontColor, lineType)
cv2.imshow('frame', show_img)
if cv2.waitKey(1) & 255 == ord('q'):
break
q.task_done()
class FakeServer(prediction_service_pb2_grpc.PredictionServiceServicer):
def Predict(self, request, context):
"""Predict -- provides access to loaded TensorFlow model.
"""
global q
global stream
if 'vertices' in request.inputs:
print('vertices')
vertices = tensor_util.MakeNdarray(request.inputs['vertices'])
q.put(vertices)
elif 'audio' in request.inputs:
print('audio')
print(type(request.inputs['audio'].string_val[0]))
audio = request.inputs['audio'].string_val[0]
stream.write(audio)
elif 'subtitle' in request.inputs:
print('subtitle')
subtitles.put(request.inputs['subtitle'].string_val[0])
dumbresult = predict_pb2.PredictResponse()
dumbresult.outputs['message'].CopyFrom(tf.make_tensor_proto('OK'))
return dumbresult
def serve():
t = threading.Thread(target=worker)
t.daemon = True
t.start()
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
prediction_service_pb2_grpc.add_PredictionServiceServicer_to_server(
FakeServer(), server)
server.add_insecure_port('[::]:50051')
server.start()
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
stream.stop_stream()
stream.close()
p.terminate()
q.join()
subtitles.join()
if __name__ == '__main__':
logging.basicConfig()
serve()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append(
'/home/yitao/Documents/fun-project/tensorflow-related/miniature-winner/')
<|reserved_special_token_0|>
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16, channels=1, rate=22500, output=True)
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = 10, 350
fontScale = 1
fontColor = 255, 255, 255
lineType = 2
subtitles = Queue()
q = Queue()
def worker():
display_subtitle = ''
while True:
item = q.get()
image = np.zeros((480, 640))
if item is not None:
vertices = item
show_img = plot_vertices(np.zeros_like(image), vertices)
else:
show_img = image
if not subtitles.empty():
text = subtitles.get()
subtitles.task_done()
display_subtitle = text
cv2.putText(show_img, display_subtitle, bottomLeftCornerOfText,
font, fontScale, fontColor, lineType)
cv2.imshow('frame', show_img)
if cv2.waitKey(1) & 255 == ord('q'):
break
q.task_done()
class FakeServer(prediction_service_pb2_grpc.PredictionServiceServicer):
def Predict(self, request, context):
"""Predict -- provides access to loaded TensorFlow model.
"""
global q
global stream
if 'vertices' in request.inputs:
print('vertices')
vertices = tensor_util.MakeNdarray(request.inputs['vertices'])
q.put(vertices)
elif 'audio' in request.inputs:
print('audio')
print(type(request.inputs['audio'].string_val[0]))
audio = request.inputs['audio'].string_val[0]
stream.write(audio)
elif 'subtitle' in request.inputs:
print('subtitle')
subtitles.put(request.inputs['subtitle'].string_val[0])
dumbresult = predict_pb2.PredictResponse()
dumbresult.outputs['message'].CopyFrom(tf.make_tensor_proto('OK'))
return dumbresult
def serve():
t = threading.Thread(target=worker)
t.daemon = True
t.start()
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
prediction_service_pb2_grpc.add_PredictionServiceServicer_to_server(
FakeServer(), server)
server.add_insecure_port('[::]:50051')
server.start()
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
stream.stop_stream()
stream.close()
p.terminate()
q.join()
subtitles.join()
if __name__ == '__main__':
logging.basicConfig()
serve()
<|reserved_special_token_1|>
from concurrent import futures
import time
import math
import logging
import grpc
import tensorflow as tf
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
import sys
sys.path.append(
'/home/yitao/Documents/fun-project/tensorflow-related/miniature-winner/')
import cv2
from PRNet.utils.cv_plot import plot_kpt, plot_vertices
import pymesh
import threading
from Queue import Queue
from tensorflow.python.framework import tensor_util
import numpy as np
import pyaudio
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16, channels=1, rate=22500, output=True)
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = 10, 350
fontScale = 1
fontColor = 255, 255, 255
lineType = 2
subtitles = Queue()
q = Queue()
def worker():
display_subtitle = ''
while True:
item = q.get()
image = np.zeros((480, 640))
if item is not None:
vertices = item
show_img = plot_vertices(np.zeros_like(image), vertices)
else:
show_img = image
if not subtitles.empty():
text = subtitles.get()
subtitles.task_done()
display_subtitle = text
cv2.putText(show_img, display_subtitle, bottomLeftCornerOfText,
font, fontScale, fontColor, lineType)
cv2.imshow('frame', show_img)
if cv2.waitKey(1) & 255 == ord('q'):
break
q.task_done()
class FakeServer(prediction_service_pb2_grpc.PredictionServiceServicer):
def Predict(self, request, context):
"""Predict -- provides access to loaded TensorFlow model.
"""
global q
global stream
if 'vertices' in request.inputs:
print('vertices')
vertices = tensor_util.MakeNdarray(request.inputs['vertices'])
q.put(vertices)
elif 'audio' in request.inputs:
print('audio')
print(type(request.inputs['audio'].string_val[0]))
audio = request.inputs['audio'].string_val[0]
stream.write(audio)
elif 'subtitle' in request.inputs:
print('subtitle')
subtitles.put(request.inputs['subtitle'].string_val[0])
dumbresult = predict_pb2.PredictResponse()
dumbresult.outputs['message'].CopyFrom(tf.make_tensor_proto('OK'))
return dumbresult
def serve():
t = threading.Thread(target=worker)
t.daemon = True
t.start()
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
prediction_service_pb2_grpc.add_PredictionServiceServicer_to_server(
FakeServer(), server)
server.add_insecure_port('[::]:50051')
server.start()
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
stream.stop_stream()
stream.close()
p.terminate()
q.join()
subtitles.join()
if __name__ == '__main__':
logging.basicConfig()
serve()
<|reserved_special_token_1|>
from concurrent import futures
import time
import math
import logging
import grpc
import tensorflow as tf
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
import sys
sys.path.append('/home/yitao/Documents/fun-project/tensorflow-related/miniature-winner/')
import cv2
from PRNet.utils.cv_plot import plot_kpt, plot_vertices
import pymesh
import threading
from Queue import Queue
from tensorflow.python.framework import tensor_util
import numpy as np
import pyaudio
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16,
channels=1,
rate=22500,
output=True)
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (10,350)
fontScale = 1
fontColor = (255,255,255)
lineType = 2
subtitles = Queue()
q = Queue()
def worker():
display_subtitle = ""
while True:
item = q.get()
image = np.zeros((480, 640))
if item is not None:
vertices = item
show_img = plot_vertices(np.zeros_like(image), vertices)
else:
show_img = image
# Display the resulting frame
if not subtitles.empty():
text = subtitles.get()
subtitles.task_done()
display_subtitle = text
cv2.putText(show_img,display_subtitle,
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType)
cv2.imshow('frame',show_img)
# Press Q on keyboard to stop recording
if cv2.waitKey(1) & 0xFF == ord('q'):
break
q.task_done()
class FakeServer(prediction_service_pb2_grpc.PredictionServiceServicer):
def Predict(self, request, context):
"""Predict -- provides access to loaded TensorFlow model.
"""
global q
global stream
if "vertices" in request.inputs:
print("vertices")
vertices = tensor_util.MakeNdarray(request.inputs["vertices"])
q.put(vertices)
elif "audio" in request.inputs:
print('audio')
# audio = tensor_util.MakeNdarray(request.inputs['audio'])
print(type(request.inputs['audio'].string_val[0]))
audio = request.inputs['audio'].string_val[0]
# print(request.inputs['audio'])
stream.write(audio)
elif "subtitle" in request.inputs:
print('subtitle')
subtitles.put(request.inputs['subtitle'].string_val[0])
dumbresult = predict_pb2.PredictResponse()
dumbresult.outputs["message"].CopyFrom(tf.make_tensor_proto("OK"))
return dumbresult
def serve():
t = threading.Thread(target=worker)
t.daemon = True
t.start()
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
prediction_service_pb2_grpc.add_PredictionServiceServicer_to_server(
FakeServer(), server)
server.add_insecure_port('[::]:50051')
server.start()
# server.wait_for_termination()
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
stream.stop_stream()
stream.close()
p.terminate()
q.join() # block until all tasks are donet
subtitles.join()
if __name__ == '__main__':
logging.basicConfig()
serve()
|
flexible
|
{
"blob_id": "0ec5d6ce11851a577046cf73cf98c91b6dfb9f67",
"index": 1550,
"step-1": "<mask token>\n\n\ndef worker():\n display_subtitle = ''\n while True:\n item = q.get()\n image = np.zeros((480, 640))\n if item is not None:\n vertices = item\n show_img = plot_vertices(np.zeros_like(image), vertices)\n else:\n show_img = image\n if not subtitles.empty():\n text = subtitles.get()\n subtitles.task_done()\n display_subtitle = text\n cv2.putText(show_img, display_subtitle, bottomLeftCornerOfText,\n font, fontScale, fontColor, lineType)\n cv2.imshow('frame', show_img)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n q.task_done()\n\n\nclass FakeServer(prediction_service_pb2_grpc.PredictionServiceServicer):\n\n def Predict(self, request, context):\n \"\"\"Predict -- provides access to loaded TensorFlow model.\n \"\"\"\n global q\n global stream\n if 'vertices' in request.inputs:\n print('vertices')\n vertices = tensor_util.MakeNdarray(request.inputs['vertices'])\n q.put(vertices)\n elif 'audio' in request.inputs:\n print('audio')\n print(type(request.inputs['audio'].string_val[0]))\n audio = request.inputs['audio'].string_val[0]\n stream.write(audio)\n elif 'subtitle' in request.inputs:\n print('subtitle')\n subtitles.put(request.inputs['subtitle'].string_val[0])\n dumbresult = predict_pb2.PredictResponse()\n dumbresult.outputs['message'].CopyFrom(tf.make_tensor_proto('OK'))\n return dumbresult\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.append(\n '/home/yitao/Documents/fun-project/tensorflow-related/miniature-winner/')\n<mask token>\n\n\ndef worker():\n display_subtitle = ''\n while True:\n item = q.get()\n image = np.zeros((480, 640))\n if item is not None:\n vertices = item\n show_img = plot_vertices(np.zeros_like(image), vertices)\n else:\n show_img = image\n if not subtitles.empty():\n text = subtitles.get()\n subtitles.task_done()\n display_subtitle = text\n cv2.putText(show_img, display_subtitle, bottomLeftCornerOfText,\n font, fontScale, fontColor, lineType)\n cv2.imshow('frame', show_img)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n q.task_done()\n\n\nclass FakeServer(prediction_service_pb2_grpc.PredictionServiceServicer):\n\n def Predict(self, request, context):\n \"\"\"Predict -- provides access to loaded TensorFlow model.\n \"\"\"\n global q\n global stream\n if 'vertices' in request.inputs:\n print('vertices')\n vertices = tensor_util.MakeNdarray(request.inputs['vertices'])\n q.put(vertices)\n elif 'audio' in request.inputs:\n print('audio')\n print(type(request.inputs['audio'].string_val[0]))\n audio = request.inputs['audio'].string_val[0]\n stream.write(audio)\n elif 'subtitle' in request.inputs:\n print('subtitle')\n subtitles.put(request.inputs['subtitle'].string_val[0])\n dumbresult = predict_pb2.PredictResponse()\n dumbresult.outputs['message'].CopyFrom(tf.make_tensor_proto('OK'))\n return dumbresult\n\n\ndef serve():\n t = threading.Thread(target=worker)\n t.daemon = True\n t.start()\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n prediction_service_pb2_grpc.add_PredictionServiceServicer_to_server(\n FakeServer(), server)\n server.add_insecure_port('[::]:50051')\n server.start()\n _ONE_DAY_IN_SECONDS = 60 * 60 * 24\n try:\n while True:\n time.sleep(_ONE_DAY_IN_SECONDS)\n except KeyboardInterrupt:\n server.stop(0)\n stream.stop_stream()\n stream.close()\n p.terminate()\n q.join()\n subtitles.join()\n\n\nif __name__ == '__main__':\n logging.basicConfig()\n serve()\n",
"step-3": "<mask token>\nsys.path.append(\n '/home/yitao/Documents/fun-project/tensorflow-related/miniature-winner/')\n<mask token>\np = pyaudio.PyAudio()\nstream = p.open(format=pyaudio.paInt16, channels=1, rate=22500, output=True)\nfont = cv2.FONT_HERSHEY_SIMPLEX\nbottomLeftCornerOfText = 10, 350\nfontScale = 1\nfontColor = 255, 255, 255\nlineType = 2\nsubtitles = Queue()\nq = Queue()\n\n\ndef worker():\n display_subtitle = ''\n while True:\n item = q.get()\n image = np.zeros((480, 640))\n if item is not None:\n vertices = item\n show_img = plot_vertices(np.zeros_like(image), vertices)\n else:\n show_img = image\n if not subtitles.empty():\n text = subtitles.get()\n subtitles.task_done()\n display_subtitle = text\n cv2.putText(show_img, display_subtitle, bottomLeftCornerOfText,\n font, fontScale, fontColor, lineType)\n cv2.imshow('frame', show_img)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n q.task_done()\n\n\nclass FakeServer(prediction_service_pb2_grpc.PredictionServiceServicer):\n\n def Predict(self, request, context):\n \"\"\"Predict -- provides access to loaded TensorFlow model.\n \"\"\"\n global q\n global stream\n if 'vertices' in request.inputs:\n print('vertices')\n vertices = tensor_util.MakeNdarray(request.inputs['vertices'])\n q.put(vertices)\n elif 'audio' in request.inputs:\n print('audio')\n print(type(request.inputs['audio'].string_val[0]))\n audio = request.inputs['audio'].string_val[0]\n stream.write(audio)\n elif 'subtitle' in request.inputs:\n print('subtitle')\n subtitles.put(request.inputs['subtitle'].string_val[0])\n dumbresult = predict_pb2.PredictResponse()\n dumbresult.outputs['message'].CopyFrom(tf.make_tensor_proto('OK'))\n return dumbresult\n\n\ndef serve():\n t = threading.Thread(target=worker)\n t.daemon = True\n t.start()\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n prediction_service_pb2_grpc.add_PredictionServiceServicer_to_server(\n FakeServer(), server)\n server.add_insecure_port('[::]:50051')\n server.start()\n _ONE_DAY_IN_SECONDS = 60 * 60 * 24\n try:\n while True:\n time.sleep(_ONE_DAY_IN_SECONDS)\n except KeyboardInterrupt:\n server.stop(0)\n stream.stop_stream()\n stream.close()\n p.terminate()\n q.join()\n subtitles.join()\n\n\nif __name__ == '__main__':\n logging.basicConfig()\n serve()\n",
"step-4": "from concurrent import futures\nimport time\nimport math\nimport logging\nimport grpc\nimport tensorflow as tf\nfrom tensorflow_serving.apis import predict_pb2\nfrom tensorflow_serving.apis import prediction_service_pb2_grpc\nimport sys\nsys.path.append(\n '/home/yitao/Documents/fun-project/tensorflow-related/miniature-winner/')\nimport cv2\nfrom PRNet.utils.cv_plot import plot_kpt, plot_vertices\nimport pymesh\nimport threading\nfrom Queue import Queue\nfrom tensorflow.python.framework import tensor_util\nimport numpy as np\nimport pyaudio\np = pyaudio.PyAudio()\nstream = p.open(format=pyaudio.paInt16, channels=1, rate=22500, output=True)\nfont = cv2.FONT_HERSHEY_SIMPLEX\nbottomLeftCornerOfText = 10, 350\nfontScale = 1\nfontColor = 255, 255, 255\nlineType = 2\nsubtitles = Queue()\nq = Queue()\n\n\ndef worker():\n display_subtitle = ''\n while True:\n item = q.get()\n image = np.zeros((480, 640))\n if item is not None:\n vertices = item\n show_img = plot_vertices(np.zeros_like(image), vertices)\n else:\n show_img = image\n if not subtitles.empty():\n text = subtitles.get()\n subtitles.task_done()\n display_subtitle = text\n cv2.putText(show_img, display_subtitle, bottomLeftCornerOfText,\n font, fontScale, fontColor, lineType)\n cv2.imshow('frame', show_img)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n q.task_done()\n\n\nclass FakeServer(prediction_service_pb2_grpc.PredictionServiceServicer):\n\n def Predict(self, request, context):\n \"\"\"Predict -- provides access to loaded TensorFlow model.\n \"\"\"\n global q\n global stream\n if 'vertices' in request.inputs:\n print('vertices')\n vertices = tensor_util.MakeNdarray(request.inputs['vertices'])\n q.put(vertices)\n elif 'audio' in request.inputs:\n print('audio')\n print(type(request.inputs['audio'].string_val[0]))\n audio = request.inputs['audio'].string_val[0]\n stream.write(audio)\n elif 'subtitle' in request.inputs:\n print('subtitle')\n subtitles.put(request.inputs['subtitle'].string_val[0])\n dumbresult = predict_pb2.PredictResponse()\n dumbresult.outputs['message'].CopyFrom(tf.make_tensor_proto('OK'))\n return dumbresult\n\n\ndef serve():\n t = threading.Thread(target=worker)\n t.daemon = True\n t.start()\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n prediction_service_pb2_grpc.add_PredictionServiceServicer_to_server(\n FakeServer(), server)\n server.add_insecure_port('[::]:50051')\n server.start()\n _ONE_DAY_IN_SECONDS = 60 * 60 * 24\n try:\n while True:\n time.sleep(_ONE_DAY_IN_SECONDS)\n except KeyboardInterrupt:\n server.stop(0)\n stream.stop_stream()\n stream.close()\n p.terminate()\n q.join()\n subtitles.join()\n\n\nif __name__ == '__main__':\n logging.basicConfig()\n serve()\n",
"step-5": "from concurrent import futures\nimport time\nimport math\nimport logging\n\nimport grpc\nimport tensorflow as tf\nfrom tensorflow_serving.apis import predict_pb2\nfrom tensorflow_serving.apis import prediction_service_pb2_grpc\n\nimport sys\nsys.path.append('/home/yitao/Documents/fun-project/tensorflow-related/miniature-winner/')\n\nimport cv2\nfrom PRNet.utils.cv_plot import plot_kpt, plot_vertices\nimport pymesh\nimport threading\nfrom Queue import Queue\nfrom tensorflow.python.framework import tensor_util\nimport numpy as np\n\n\nimport pyaudio\n\np = pyaudio.PyAudio()\n\nstream = p.open(format=pyaudio.paInt16,\n channels=1,\n rate=22500,\n output=True)\n\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\nbottomLeftCornerOfText = (10,350)\nfontScale = 1\nfontColor = (255,255,255)\nlineType = 2\n\n\nsubtitles = Queue()\n\nq = Queue()\ndef worker():\n display_subtitle = \"\"\n while True:\n item = q.get()\n image = np.zeros((480, 640))\n if item is not None:\n vertices = item\n show_img = plot_vertices(np.zeros_like(image), vertices)\n else:\n show_img = image \n # Display the resulting frame\n\n if not subtitles.empty():\n text = subtitles.get()\n subtitles.task_done()\n display_subtitle = text\n cv2.putText(show_img,display_subtitle, \n bottomLeftCornerOfText, \n font, \n fontScale,\n fontColor,\n lineType)\n cv2.imshow('frame',show_img)\n\n\n # Press Q on keyboard to stop recording\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break \n q.task_done()\n\n\n\n\nclass FakeServer(prediction_service_pb2_grpc.PredictionServiceServicer):\n def Predict(self, request, context):\n \"\"\"Predict -- provides access to loaded TensorFlow model.\n \"\"\"\n global q\n global stream\n if \"vertices\" in request.inputs:\n print(\"vertices\")\n vertices = tensor_util.MakeNdarray(request.inputs[\"vertices\"])\n q.put(vertices)\n elif \"audio\" in request.inputs:\n print('audio')\n # audio = tensor_util.MakeNdarray(request.inputs['audio'])\n print(type(request.inputs['audio'].string_val[0]))\n audio = request.inputs['audio'].string_val[0]\n # print(request.inputs['audio'])\n stream.write(audio)\n elif \"subtitle\" in request.inputs:\n print('subtitle')\n subtitles.put(request.inputs['subtitle'].string_val[0])\n\n\n\n\n dumbresult = predict_pb2.PredictResponse()\n dumbresult.outputs[\"message\"].CopyFrom(tf.make_tensor_proto(\"OK\"))\n return dumbresult\n\n\n\ndef serve():\n\n t = threading.Thread(target=worker)\n t.daemon = True\n t.start()\n\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n prediction_service_pb2_grpc.add_PredictionServiceServicer_to_server(\n FakeServer(), server)\n server.add_insecure_port('[::]:50051')\n server.start()\n # server.wait_for_termination()\n\n\n\n\n _ONE_DAY_IN_SECONDS = 60 * 60 * 24\n\n try:\n while True:\n time.sleep(_ONE_DAY_IN_SECONDS)\n except KeyboardInterrupt:\n server.stop(0)\n\n stream.stop_stream()\n stream.close()\n\n p.terminate()\n q.join() # block until all tasks are donet\n subtitles.join()\n\nif __name__ == '__main__':\n logging.basicConfig()\n serve()\n\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with tf.Session() as sess:
predictions = []
labels = []
accuracies = []
for i in range(4):
sess.run(init)
choice = i + 1
choose_test_set(str(choice))
train_data, train_labels = get_network_input(train_path)
test_data, test_labels = get_network_input(test_path)
fold_accuracy = 0
for i in range(len(test_data)):
nn_index = sess.run(pred, feed_dict={x_train: train_data,
x_test: test_data[i, :]})
predictions.append(np.argmax(train_labels[nn_index]))
labels.append(np.argmax(test_labels[i]))
if predictions[-1] == labels[-1]:
fold_accuracy += 1.0 / len(test_data)
accuracies.append(fold_accuracy)
overall_accuracy = np.mean(accuracies)
print('Average accuracy over 4 folds:', overall_accuracy)
confusion = tf.confusion_matrix(labels=labels, predictions=predictions)
print(confusion.eval())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
train_path = 'Data/NetworkTrain/'
test_path = 'Data/NetworkTest/'
x_train = tf.placeholder('float', [None, 200])
x_test = tf.placeholder('float', [200])
distance = tf.reduce_sum(tf.abs(tf.add(x_train, tf.negative(x_test))),
reduction_indices=1)
pred = tf.argmin(distance, 0)
init = tf.global_variables_initializer()
with tf.Session() as sess:
predictions = []
labels = []
accuracies = []
for i in range(4):
sess.run(init)
choice = i + 1
choose_test_set(str(choice))
train_data, train_labels = get_network_input(train_path)
test_data, test_labels = get_network_input(test_path)
fold_accuracy = 0
for i in range(len(test_data)):
nn_index = sess.run(pred, feed_dict={x_train: train_data,
x_test: test_data[i, :]})
predictions.append(np.argmax(train_labels[nn_index]))
labels.append(np.argmax(test_labels[i]))
if predictions[-1] == labels[-1]:
fold_accuracy += 1.0 / len(test_data)
accuracies.append(fold_accuracy)
overall_accuracy = np.mean(accuracies)
print('Average accuracy over 4 folds:', overall_accuracy)
confusion = tf.confusion_matrix(labels=labels, predictions=predictions)
print(confusion.eval())
<|reserved_special_token_1|>
import tensorflow as tf
import numpy as np
from classifier_input_functions import choose_test_set, get_network_input
train_path = 'Data/NetworkTrain/'
test_path = 'Data/NetworkTest/'
x_train = tf.placeholder('float', [None, 200])
x_test = tf.placeholder('float', [200])
distance = tf.reduce_sum(tf.abs(tf.add(x_train, tf.negative(x_test))),
reduction_indices=1)
pred = tf.argmin(distance, 0)
init = tf.global_variables_initializer()
with tf.Session() as sess:
predictions = []
labels = []
accuracies = []
for i in range(4):
sess.run(init)
choice = i + 1
choose_test_set(str(choice))
train_data, train_labels = get_network_input(train_path)
test_data, test_labels = get_network_input(test_path)
fold_accuracy = 0
for i in range(len(test_data)):
nn_index = sess.run(pred, feed_dict={x_train: train_data,
x_test: test_data[i, :]})
predictions.append(np.argmax(train_labels[nn_index]))
labels.append(np.argmax(test_labels[i]))
if predictions[-1] == labels[-1]:
fold_accuracy += 1.0 / len(test_data)
accuracies.append(fold_accuracy)
overall_accuracy = np.mean(accuracies)
print('Average accuracy over 4 folds:', overall_accuracy)
confusion = tf.confusion_matrix(labels=labels, predictions=predictions)
print(confusion.eval())
<|reserved_special_token_1|>
################################################################################
# run_experiment.py #
# Ian Marci 2017 #
# Defines knn classifier and runs 4-fold cross validation on data in #
# Data/NetworkInput folder. #
# Prints accuracy for each fold as well as confusion matrix. #
################################################################################
# Imports
import tensorflow as tf
import numpy as np
from classifier_input_functions import choose_test_set, get_network_input
# Path and placeholder definitions
train_path = 'Data/NetworkTrain/'
test_path = 'Data/NetworkTest/'
x_train = tf.placeholder('float', [None, 200])
x_test = tf.placeholder('float', [200])
# Distance to decide nearest neighbor
distance = tf.reduce_sum(tf.abs(tf.add(x_train, tf.negative(x_test))),
reduction_indices=1)
# Prediction chooses lowest distance
pred = tf.argmin(distance, 0)
################################
# 4-fold cross validation loop #
################################
init = tf.global_variables_initializer()
with tf.Session() as sess:
predictions = []
labels = []
accuracies = []
for i in range(4):
sess.run(init)
choice = i + 1
choose_test_set(str(choice))
train_data, train_labels = get_network_input(train_path)
test_data, test_labels = get_network_input(test_path)
fold_accuracy = 0
for i in range(len(test_data)):
nn_index = sess.run(pred, feed_dict={x_train: train_data,
x_test: test_data[i, :]})
predictions.append(np.argmax(train_labels[nn_index]))
labels.append(np.argmax(test_labels[i]))
if predictions[-1] == labels[-1]:
fold_accuracy += 1./len(test_data)
accuracies.append(fold_accuracy)
overall_accuracy = np.mean(accuracies)
print('Average accuracy over 4 folds:', overall_accuracy)
confusion = tf.confusion_matrix(labels=labels, predictions=predictions)
print(confusion.eval())
|
flexible
|
{
"blob_id": "dbc599a03d91f369d862f6cc90c31221747ead80",
"index": 2811,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith tf.Session() as sess:\n predictions = []\n labels = []\n accuracies = []\n for i in range(4):\n sess.run(init)\n choice = i + 1\n choose_test_set(str(choice))\n train_data, train_labels = get_network_input(train_path)\n test_data, test_labels = get_network_input(test_path)\n fold_accuracy = 0\n for i in range(len(test_data)):\n nn_index = sess.run(pred, feed_dict={x_train: train_data,\n x_test: test_data[i, :]})\n predictions.append(np.argmax(train_labels[nn_index]))\n labels.append(np.argmax(test_labels[i]))\n if predictions[-1] == labels[-1]:\n fold_accuracy += 1.0 / len(test_data)\n accuracies.append(fold_accuracy)\n overall_accuracy = np.mean(accuracies)\n print('Average accuracy over 4 folds:', overall_accuracy)\n confusion = tf.confusion_matrix(labels=labels, predictions=predictions)\n print(confusion.eval())\n",
"step-3": "<mask token>\ntrain_path = 'Data/NetworkTrain/'\ntest_path = 'Data/NetworkTest/'\nx_train = tf.placeholder('float', [None, 200])\nx_test = tf.placeholder('float', [200])\ndistance = tf.reduce_sum(tf.abs(tf.add(x_train, tf.negative(x_test))),\n reduction_indices=1)\npred = tf.argmin(distance, 0)\ninit = tf.global_variables_initializer()\nwith tf.Session() as sess:\n predictions = []\n labels = []\n accuracies = []\n for i in range(4):\n sess.run(init)\n choice = i + 1\n choose_test_set(str(choice))\n train_data, train_labels = get_network_input(train_path)\n test_data, test_labels = get_network_input(test_path)\n fold_accuracy = 0\n for i in range(len(test_data)):\n nn_index = sess.run(pred, feed_dict={x_train: train_data,\n x_test: test_data[i, :]})\n predictions.append(np.argmax(train_labels[nn_index]))\n labels.append(np.argmax(test_labels[i]))\n if predictions[-1] == labels[-1]:\n fold_accuracy += 1.0 / len(test_data)\n accuracies.append(fold_accuracy)\n overall_accuracy = np.mean(accuracies)\n print('Average accuracy over 4 folds:', overall_accuracy)\n confusion = tf.confusion_matrix(labels=labels, predictions=predictions)\n print(confusion.eval())\n",
"step-4": "import tensorflow as tf\nimport numpy as np\nfrom classifier_input_functions import choose_test_set, get_network_input\ntrain_path = 'Data/NetworkTrain/'\ntest_path = 'Data/NetworkTest/'\nx_train = tf.placeholder('float', [None, 200])\nx_test = tf.placeholder('float', [200])\ndistance = tf.reduce_sum(tf.abs(tf.add(x_train, tf.negative(x_test))),\n reduction_indices=1)\npred = tf.argmin(distance, 0)\ninit = tf.global_variables_initializer()\nwith tf.Session() as sess:\n predictions = []\n labels = []\n accuracies = []\n for i in range(4):\n sess.run(init)\n choice = i + 1\n choose_test_set(str(choice))\n train_data, train_labels = get_network_input(train_path)\n test_data, test_labels = get_network_input(test_path)\n fold_accuracy = 0\n for i in range(len(test_data)):\n nn_index = sess.run(pred, feed_dict={x_train: train_data,\n x_test: test_data[i, :]})\n predictions.append(np.argmax(train_labels[nn_index]))\n labels.append(np.argmax(test_labels[i]))\n if predictions[-1] == labels[-1]:\n fold_accuracy += 1.0 / len(test_data)\n accuracies.append(fold_accuracy)\n overall_accuracy = np.mean(accuracies)\n print('Average accuracy over 4 folds:', overall_accuracy)\n confusion = tf.confusion_matrix(labels=labels, predictions=predictions)\n print(confusion.eval())\n",
"step-5": "################################################################################\n# run_experiment.py #\n# Ian Marci 2017 #\n# Defines knn classifier and runs 4-fold cross validation on data in #\n# Data/NetworkInput folder. #\n# Prints accuracy for each fold as well as confusion matrix. #\n################################################################################\n\n# Imports\nimport tensorflow as tf\nimport numpy as np\nfrom classifier_input_functions import choose_test_set, get_network_input\n\n# Path and placeholder definitions\ntrain_path = 'Data/NetworkTrain/'\ntest_path = 'Data/NetworkTest/'\n\nx_train = tf.placeholder('float', [None, 200])\nx_test = tf.placeholder('float', [200])\n\n# Distance to decide nearest neighbor\ndistance = tf.reduce_sum(tf.abs(tf.add(x_train, tf.negative(x_test))),\n reduction_indices=1)\n# Prediction chooses lowest distance\npred = tf.argmin(distance, 0)\n\n################################\n# 4-fold cross validation loop #\n################################\n\ninit = tf.global_variables_initializer()\nwith tf.Session() as sess:\n predictions = []\n labels = []\n accuracies = []\n for i in range(4):\n sess.run(init)\n\n choice = i + 1\n choose_test_set(str(choice))\n\n train_data, train_labels = get_network_input(train_path)\n test_data, test_labels = get_network_input(test_path)\n\n fold_accuracy = 0\n\n for i in range(len(test_data)):\n nn_index = sess.run(pred, feed_dict={x_train: train_data,\n x_test: test_data[i, :]})\n predictions.append(np.argmax(train_labels[nn_index]))\n labels.append(np.argmax(test_labels[i]))\n\n if predictions[-1] == labels[-1]:\n fold_accuracy += 1./len(test_data)\n accuracies.append(fold_accuracy)\n\n overall_accuracy = np.mean(accuracies)\n print('Average accuracy over 4 folds:', overall_accuracy)\n confusion = tf.confusion_matrix(labels=labels, predictions=predictions)\n print(confusion.eval())\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class IntCode:
def __init__(self, code):
self.code = code
self.base = 0
self.idx = 0
self.terminated = False
@staticmethod
def load_code(code_string):
return IntCode(read_code(code_string))
@staticmethod
def load_from_file(filename):
return IntCode.load_code(open(filename, 'r').read())
def copy(self):
"""
Returns a fresh copy of the code, **in the same state**.
"""
return IntCode(self.code.copy())
def get_value(self, mode, value):
if mode == 0:
return self.code[value]
elif mode == 1:
return value
elif mode == 2:
return self.code[value + self.base]
def get_values(self, modes):
return [self.get_value(mode, self.code[self.idx + i]) for i, mode in
enumerate(modes, start=1)]
def get_modes(self, value, n_modes):
value = value // 100
modes = []
for _ in range(n_modes):
modes.append(int(value % 10))
value //= 10
return modes
def write_to(self, mode, param, value):
"""
write value to the location given by param, based on the mode.
"""
if mode == 0:
self.code[param] = value
elif mode == 1:
raise ValueError
elif mode == 2:
self.code[param + self.base] = value
def run(self, inputs=None, print_outputs=False):
"""
Resumes the code from the current instruction, using the
given 'inputs' for any required inputs.
When it halts, the outputs from this run are returned.
If the program has terminated, the 'terminated' flag is set.
"""
input_idx = 0
outputs = []
while True:
value = self.code[self.idx]
opcode = value % 100
if opcode == 1:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
self.write_to(modes[2], self.code[self.idx + 3], values[0] +
values[1])
self.idx += 4
elif opcode == 2:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
self.write_to(modes[2], self.code[self.idx + 3], values[0] *
values[1])
self.idx += 4
elif opcode == 3:
if inputs is None or input_idx >= len(inputs):
return outputs
input_val = inputs[input_idx]
input_idx += 1
modes = self.get_modes(value, 1)
self.write_to(modes[0], self.code[self.idx + 1], input_val)
self.idx += 2
elif opcode == 4:
modes = self.get_modes(value, 1)
v = self.get_value(modes[0], self.code[self.idx + 1])
outputs.append(v)
if print_outputs:
print(v)
self.idx += 2
elif opcode == 5:
modes = self.get_modes(value, 2)
values = self.get_values(modes)
if values[0] != 0:
self.idx = values[1]
else:
self.idx += 3
elif opcode == 6:
modes = self.get_modes(value, 2)
values = self.get_values(modes)
if values[0] == 0:
self.idx = values[1]
else:
self.idx += 3
elif opcode == 7:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
compare_val = 1 if values[0] < values[1] else 0
self.write_to(modes[2], self.code[self.idx + 3], compare_val)
self.idx += 4
elif opcode == 8:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
compare_val = 1 if values[0] == values[1] else 0
self.write_to(modes[2], self.code[self.idx + 3], compare_val)
self.idx += 4
elif opcode == 9:
modes = self.get_modes(value, 1)
values = self.get_values(modes)
self.base += values[0]
self.idx += 2
elif opcode == 99:
self.terminated = True
return outputs
else:
raise ValueError
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def to_ascii(line):
"""
Writes a string as ASCII code. Appends a newline at the end.
"""
data = [ord(c) for c in line]
data.append(10)
return data
class IntCode:
def __init__(self, code):
self.code = code
self.base = 0
self.idx = 0
self.terminated = False
@staticmethod
def load_code(code_string):
return IntCode(read_code(code_string))
@staticmethod
def load_from_file(filename):
return IntCode.load_code(open(filename, 'r').read())
def copy(self):
"""
Returns a fresh copy of the code, **in the same state**.
"""
return IntCode(self.code.copy())
def get_value(self, mode, value):
if mode == 0:
return self.code[value]
elif mode == 1:
return value
elif mode == 2:
return self.code[value + self.base]
def get_values(self, modes):
return [self.get_value(mode, self.code[self.idx + i]) for i, mode in
enumerate(modes, start=1)]
def get_modes(self, value, n_modes):
value = value // 100
modes = []
for _ in range(n_modes):
modes.append(int(value % 10))
value //= 10
return modes
def write_to(self, mode, param, value):
"""
write value to the location given by param, based on the mode.
"""
if mode == 0:
self.code[param] = value
elif mode == 1:
raise ValueError
elif mode == 2:
self.code[param + self.base] = value
def run(self, inputs=None, print_outputs=False):
"""
Resumes the code from the current instruction, using the
given 'inputs' for any required inputs.
When it halts, the outputs from this run are returned.
If the program has terminated, the 'terminated' flag is set.
"""
input_idx = 0
outputs = []
while True:
value = self.code[self.idx]
opcode = value % 100
if opcode == 1:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
self.write_to(modes[2], self.code[self.idx + 3], values[0] +
values[1])
self.idx += 4
elif opcode == 2:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
self.write_to(modes[2], self.code[self.idx + 3], values[0] *
values[1])
self.idx += 4
elif opcode == 3:
if inputs is None or input_idx >= len(inputs):
return outputs
input_val = inputs[input_idx]
input_idx += 1
modes = self.get_modes(value, 1)
self.write_to(modes[0], self.code[self.idx + 1], input_val)
self.idx += 2
elif opcode == 4:
modes = self.get_modes(value, 1)
v = self.get_value(modes[0], self.code[self.idx + 1])
outputs.append(v)
if print_outputs:
print(v)
self.idx += 2
elif opcode == 5:
modes = self.get_modes(value, 2)
values = self.get_values(modes)
if values[0] != 0:
self.idx = values[1]
else:
self.idx += 3
elif opcode == 6:
modes = self.get_modes(value, 2)
values = self.get_values(modes)
if values[0] == 0:
self.idx = values[1]
else:
self.idx += 3
elif opcode == 7:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
compare_val = 1 if values[0] < values[1] else 0
self.write_to(modes[2], self.code[self.idx + 3], compare_val)
self.idx += 4
elif opcode == 8:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
compare_val = 1 if values[0] == values[1] else 0
self.write_to(modes[2], self.code[self.idx + 3], compare_val)
self.idx += 4
elif opcode == 9:
modes = self.get_modes(value, 1)
values = self.get_values(modes)
self.base += values[0]
self.idx += 2
elif opcode == 99:
self.terminated = True
return outputs
else:
raise ValueError
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def read_code(string):
"""
string should be a comma-separated string.
"""
code = defaultdict(int)
for i, x in enumerate(string.split(',')):
code[i] = int(x)
return code
def to_ascii(line):
"""
Writes a string as ASCII code. Appends a newline at the end.
"""
data = [ord(c) for c in line]
data.append(10)
return data
class IntCode:
def __init__(self, code):
self.code = code
self.base = 0
self.idx = 0
self.terminated = False
@staticmethod
def load_code(code_string):
return IntCode(read_code(code_string))
@staticmethod
def load_from_file(filename):
return IntCode.load_code(open(filename, 'r').read())
def copy(self):
"""
Returns a fresh copy of the code, **in the same state**.
"""
return IntCode(self.code.copy())
def get_value(self, mode, value):
if mode == 0:
return self.code[value]
elif mode == 1:
return value
elif mode == 2:
return self.code[value + self.base]
def get_values(self, modes):
return [self.get_value(mode, self.code[self.idx + i]) for i, mode in
enumerate(modes, start=1)]
def get_modes(self, value, n_modes):
value = value // 100
modes = []
for _ in range(n_modes):
modes.append(int(value % 10))
value //= 10
return modes
def write_to(self, mode, param, value):
"""
write value to the location given by param, based on the mode.
"""
if mode == 0:
self.code[param] = value
elif mode == 1:
raise ValueError
elif mode == 2:
self.code[param + self.base] = value
def run(self, inputs=None, print_outputs=False):
"""
Resumes the code from the current instruction, using the
given 'inputs' for any required inputs.
When it halts, the outputs from this run are returned.
If the program has terminated, the 'terminated' flag is set.
"""
input_idx = 0
outputs = []
while True:
value = self.code[self.idx]
opcode = value % 100
if opcode == 1:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
self.write_to(modes[2], self.code[self.idx + 3], values[0] +
values[1])
self.idx += 4
elif opcode == 2:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
self.write_to(modes[2], self.code[self.idx + 3], values[0] *
values[1])
self.idx += 4
elif opcode == 3:
if inputs is None or input_idx >= len(inputs):
return outputs
input_val = inputs[input_idx]
input_idx += 1
modes = self.get_modes(value, 1)
self.write_to(modes[0], self.code[self.idx + 1], input_val)
self.idx += 2
elif opcode == 4:
modes = self.get_modes(value, 1)
v = self.get_value(modes[0], self.code[self.idx + 1])
outputs.append(v)
if print_outputs:
print(v)
self.idx += 2
elif opcode == 5:
modes = self.get_modes(value, 2)
values = self.get_values(modes)
if values[0] != 0:
self.idx = values[1]
else:
self.idx += 3
elif opcode == 6:
modes = self.get_modes(value, 2)
values = self.get_values(modes)
if values[0] == 0:
self.idx = values[1]
else:
self.idx += 3
elif opcode == 7:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
compare_val = 1 if values[0] < values[1] else 0
self.write_to(modes[2], self.code[self.idx + 3], compare_val)
self.idx += 4
elif opcode == 8:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
compare_val = 1 if values[0] == values[1] else 0
self.write_to(modes[2], self.code[self.idx + 3], compare_val)
self.idx += 4
elif opcode == 9:
modes = self.get_modes(value, 1)
values = self.get_values(modes)
self.base += values[0]
self.idx += 2
elif opcode == 99:
self.terminated = True
return outputs
else:
raise ValueError
<|reserved_special_token_1|>
from collections import defaultdict
def read_code(string):
"""
string should be a comma-separated string.
"""
code = defaultdict(int)
for i, x in enumerate(string.split(',')):
code[i] = int(x)
return code
def to_ascii(line):
"""
Writes a string as ASCII code. Appends a newline at the end.
"""
data = [ord(c) for c in line]
data.append(10)
return data
class IntCode:
def __init__(self, code):
self.code = code
self.base = 0
self.idx = 0
self.terminated = False
@staticmethod
def load_code(code_string):
return IntCode(read_code(code_string))
@staticmethod
def load_from_file(filename):
return IntCode.load_code(open(filename, 'r').read())
def copy(self):
"""
Returns a fresh copy of the code, **in the same state**.
"""
return IntCode(self.code.copy())
def get_value(self, mode, value):
if mode == 0:
return self.code[value]
elif mode == 1:
return value
elif mode == 2:
return self.code[value + self.base]
def get_values(self, modes):
return [self.get_value(mode, self.code[self.idx + i]) for i, mode in
enumerate(modes, start=1)]
def get_modes(self, value, n_modes):
value = value // 100
modes = []
for _ in range(n_modes):
modes.append(int(value % 10))
value //= 10
return modes
def write_to(self, mode, param, value):
"""
write value to the location given by param, based on the mode.
"""
if mode == 0:
self.code[param] = value
elif mode == 1:
raise ValueError
elif mode == 2:
self.code[param + self.base] = value
def run(self, inputs=None, print_outputs=False):
"""
Resumes the code from the current instruction, using the
given 'inputs' for any required inputs.
When it halts, the outputs from this run are returned.
If the program has terminated, the 'terminated' flag is set.
"""
input_idx = 0
outputs = []
while True:
value = self.code[self.idx]
opcode = value % 100
if opcode == 1:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
self.write_to(modes[2], self.code[self.idx + 3], values[0] +
values[1])
self.idx += 4
elif opcode == 2:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
self.write_to(modes[2], self.code[self.idx + 3], values[0] *
values[1])
self.idx += 4
elif opcode == 3:
if inputs is None or input_idx >= len(inputs):
return outputs
input_val = inputs[input_idx]
input_idx += 1
modes = self.get_modes(value, 1)
self.write_to(modes[0], self.code[self.idx + 1], input_val)
self.idx += 2
elif opcode == 4:
modes = self.get_modes(value, 1)
v = self.get_value(modes[0], self.code[self.idx + 1])
outputs.append(v)
if print_outputs:
print(v)
self.idx += 2
elif opcode == 5:
modes = self.get_modes(value, 2)
values = self.get_values(modes)
if values[0] != 0:
self.idx = values[1]
else:
self.idx += 3
elif opcode == 6:
modes = self.get_modes(value, 2)
values = self.get_values(modes)
if values[0] == 0:
self.idx = values[1]
else:
self.idx += 3
elif opcode == 7:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
compare_val = 1 if values[0] < values[1] else 0
self.write_to(modes[2], self.code[self.idx + 3], compare_val)
self.idx += 4
elif opcode == 8:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
compare_val = 1 if values[0] == values[1] else 0
self.write_to(modes[2], self.code[self.idx + 3], compare_val)
self.idx += 4
elif opcode == 9:
modes = self.get_modes(value, 1)
values = self.get_values(modes)
self.base += values[0]
self.idx += 2
elif opcode == 99:
self.terminated = True
return outputs
else:
raise ValueError
<|reserved_special_token_1|>
# helper functions to handle intcode
from collections import defaultdict
def read_code(string):
"""
string should be a comma-separated string.
"""
code = defaultdict(int)
for i, x in enumerate(string.split(',')):
code[i] = int(x)
return code
def to_ascii(line):
"""
Writes a string as ASCII code. Appends a newline at the end.
"""
data = [ord(c) for c in line]
data.append(10)
return data
class IntCode:
def __init__(self, code):
self.code = code
self.base = 0
# instruction pointer
self.idx = 0
self.terminated = False
@staticmethod
def load_code(code_string):
return IntCode(read_code(code_string))
@staticmethod
def load_from_file(filename):
return IntCode.load_code(open(filename, 'r').read())
def copy(self):
"""
Returns a fresh copy of the code, **in the same state**.
"""
return IntCode(self.code.copy())
def get_value(self, mode, value):
if mode == 0:
# position mode
return self.code[value]
elif mode == 1:
# immediate mode
return value
elif mode == 2:
# relative mode
return self.code[value + self.base]
def get_values(self, modes):
return [
self.get_value(mode, self.code[self.idx + i])
for i, mode in enumerate(modes, start=1)
]
def get_modes(self, value, n_modes):
value = value // 100
modes = []
for _ in range(n_modes):
modes.append(int(value % 10))
value //= 10
return modes
def write_to(self, mode, param, value):
"""
write value to the location given by param, based on the mode.
"""
if mode == 0:
# position mode
self.code[param] = value
elif mode == 1:
# cannot be in immediate mode
raise ValueError
elif mode == 2:
# relative mode
self.code[param + self.base] = value
def run(self, inputs=None, print_outputs=False):
"""
Resumes the code from the current instruction, using the
given 'inputs' for any required inputs.
When it halts, the outputs from this run are returned.
If the program has terminated, the 'terminated' flag is set.
"""
input_idx = 0
outputs = []
while True:
# parse the value
value = self.code[self.idx]
opcode = value % 100
if opcode == 1:
# Day 2
modes = self.get_modes(value, 3)
values = self.get_values(modes)
self.write_to(modes[2], self.code[self.idx+3], values[0] + values[1])
self.idx += 4
elif opcode == 2:
# Day 2
modes = self.get_modes(value, 3)
values = self.get_values(modes)
self.write_to(modes[2], self.code[self.idx+3], values[0] * values[1])
self.idx += 4
elif opcode == 3:
# Day 5
if inputs is None or input_idx >= len(inputs):
# halt if we are expecting an input, resume later
return outputs
input_val = inputs[input_idx]
input_idx += 1
modes = self.get_modes(value, 1)
self.write_to(modes[0], self.code[self.idx+1], input_val)
self.idx += 2
elif opcode == 4:
# Day 5
modes = self.get_modes(value, 1)
v = self.get_value(modes[0], self.code[self.idx+1])
outputs.append(v)
if print_outputs:
print(v)
self.idx += 2
elif opcode == 5:
# Day 5
modes = self.get_modes(value, 2)
values = self.get_values(modes)
if values[0] != 0:
self.idx = values[1]
else:
self.idx += 3
elif opcode == 6:
# Day 5
modes = self.get_modes(value, 2)
values = self.get_values(modes)
if values[0] == 0:
self.idx = values[1]
else:
self.idx += 3
elif opcode == 7:
# Day 5
modes = self.get_modes(value, 3)
values = self.get_values(modes)
compare_val = 1 if values[0] < values[1] else 0
self.write_to(modes[2], self.code[self.idx+3], compare_val)
self.idx += 4
elif opcode == 8:
# Day 5
modes = self.get_modes(value, 3)
values = self.get_values(modes)
compare_val = 1 if values[0] == values[1] else 0
self.write_to(modes[2], self.code[self.idx+3], compare_val)
self.idx += 4
elif opcode == 9:
# Day 9
modes = self.get_modes(value, 1)
values = self.get_values(modes)
self.base += values[0]
self.idx += 2
elif opcode == 99:
self.terminated = True
return outputs
else:
raise ValueError
|
flexible
|
{
"blob_id": "68c2fd1d8ca9e1dd9373ca9f641c2920c87b2392",
"index": 1346,
"step-1": "<mask token>\n\n\nclass IntCode:\n\n def __init__(self, code):\n self.code = code\n self.base = 0\n self.idx = 0\n self.terminated = False\n\n @staticmethod\n def load_code(code_string):\n return IntCode(read_code(code_string))\n\n @staticmethod\n def load_from_file(filename):\n return IntCode.load_code(open(filename, 'r').read())\n\n def copy(self):\n \"\"\"\n Returns a fresh copy of the code, **in the same state**.\n \"\"\"\n return IntCode(self.code.copy())\n\n def get_value(self, mode, value):\n if mode == 0:\n return self.code[value]\n elif mode == 1:\n return value\n elif mode == 2:\n return self.code[value + self.base]\n\n def get_values(self, modes):\n return [self.get_value(mode, self.code[self.idx + i]) for i, mode in\n enumerate(modes, start=1)]\n\n def get_modes(self, value, n_modes):\n value = value // 100\n modes = []\n for _ in range(n_modes):\n modes.append(int(value % 10))\n value //= 10\n return modes\n\n def write_to(self, mode, param, value):\n \"\"\"\n write value to the location given by param, based on the mode.\n \"\"\"\n if mode == 0:\n self.code[param] = value\n elif mode == 1:\n raise ValueError\n elif mode == 2:\n self.code[param + self.base] = value\n\n def run(self, inputs=None, print_outputs=False):\n \"\"\"\n Resumes the code from the current instruction, using the\n given 'inputs' for any required inputs.\n\n When it halts, the outputs from this run are returned.\n\n If the program has terminated, the 'terminated' flag is set.\n \"\"\"\n input_idx = 0\n outputs = []\n while True:\n value = self.code[self.idx]\n opcode = value % 100\n if opcode == 1:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n self.write_to(modes[2], self.code[self.idx + 3], values[0] +\n values[1])\n self.idx += 4\n elif opcode == 2:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n self.write_to(modes[2], self.code[self.idx + 3], values[0] *\n values[1])\n self.idx += 4\n elif opcode == 3:\n if inputs is None or input_idx >= len(inputs):\n return outputs\n input_val = inputs[input_idx]\n input_idx += 1\n modes = self.get_modes(value, 1)\n self.write_to(modes[0], self.code[self.idx + 1], input_val)\n self.idx += 2\n elif opcode == 4:\n modes = self.get_modes(value, 1)\n v = self.get_value(modes[0], self.code[self.idx + 1])\n outputs.append(v)\n if print_outputs:\n print(v)\n self.idx += 2\n elif opcode == 5:\n modes = self.get_modes(value, 2)\n values = self.get_values(modes)\n if values[0] != 0:\n self.idx = values[1]\n else:\n self.idx += 3\n elif opcode == 6:\n modes = self.get_modes(value, 2)\n values = self.get_values(modes)\n if values[0] == 0:\n self.idx = values[1]\n else:\n self.idx += 3\n elif opcode == 7:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n compare_val = 1 if values[0] < values[1] else 0\n self.write_to(modes[2], self.code[self.idx + 3], compare_val)\n self.idx += 4\n elif opcode == 8:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n compare_val = 1 if values[0] == values[1] else 0\n self.write_to(modes[2], self.code[self.idx + 3], compare_val)\n self.idx += 4\n elif opcode == 9:\n modes = self.get_modes(value, 1)\n values = self.get_values(modes)\n self.base += values[0]\n self.idx += 2\n elif opcode == 99:\n self.terminated = True\n return outputs\n else:\n raise ValueError\n",
"step-2": "<mask token>\n\n\ndef to_ascii(line):\n \"\"\"\n Writes a string as ASCII code. Appends a newline at the end.\n \"\"\"\n data = [ord(c) for c in line]\n data.append(10)\n return data\n\n\nclass IntCode:\n\n def __init__(self, code):\n self.code = code\n self.base = 0\n self.idx = 0\n self.terminated = False\n\n @staticmethod\n def load_code(code_string):\n return IntCode(read_code(code_string))\n\n @staticmethod\n def load_from_file(filename):\n return IntCode.load_code(open(filename, 'r').read())\n\n def copy(self):\n \"\"\"\n Returns a fresh copy of the code, **in the same state**.\n \"\"\"\n return IntCode(self.code.copy())\n\n def get_value(self, mode, value):\n if mode == 0:\n return self.code[value]\n elif mode == 1:\n return value\n elif mode == 2:\n return self.code[value + self.base]\n\n def get_values(self, modes):\n return [self.get_value(mode, self.code[self.idx + i]) for i, mode in\n enumerate(modes, start=1)]\n\n def get_modes(self, value, n_modes):\n value = value // 100\n modes = []\n for _ in range(n_modes):\n modes.append(int(value % 10))\n value //= 10\n return modes\n\n def write_to(self, mode, param, value):\n \"\"\"\n write value to the location given by param, based on the mode.\n \"\"\"\n if mode == 0:\n self.code[param] = value\n elif mode == 1:\n raise ValueError\n elif mode == 2:\n self.code[param + self.base] = value\n\n def run(self, inputs=None, print_outputs=False):\n \"\"\"\n Resumes the code from the current instruction, using the\n given 'inputs' for any required inputs.\n\n When it halts, the outputs from this run are returned.\n\n If the program has terminated, the 'terminated' flag is set.\n \"\"\"\n input_idx = 0\n outputs = []\n while True:\n value = self.code[self.idx]\n opcode = value % 100\n if opcode == 1:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n self.write_to(modes[2], self.code[self.idx + 3], values[0] +\n values[1])\n self.idx += 4\n elif opcode == 2:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n self.write_to(modes[2], self.code[self.idx + 3], values[0] *\n values[1])\n self.idx += 4\n elif opcode == 3:\n if inputs is None or input_idx >= len(inputs):\n return outputs\n input_val = inputs[input_idx]\n input_idx += 1\n modes = self.get_modes(value, 1)\n self.write_to(modes[0], self.code[self.idx + 1], input_val)\n self.idx += 2\n elif opcode == 4:\n modes = self.get_modes(value, 1)\n v = self.get_value(modes[0], self.code[self.idx + 1])\n outputs.append(v)\n if print_outputs:\n print(v)\n self.idx += 2\n elif opcode == 5:\n modes = self.get_modes(value, 2)\n values = self.get_values(modes)\n if values[0] != 0:\n self.idx = values[1]\n else:\n self.idx += 3\n elif opcode == 6:\n modes = self.get_modes(value, 2)\n values = self.get_values(modes)\n if values[0] == 0:\n self.idx = values[1]\n else:\n self.idx += 3\n elif opcode == 7:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n compare_val = 1 if values[0] < values[1] else 0\n self.write_to(modes[2], self.code[self.idx + 3], compare_val)\n self.idx += 4\n elif opcode == 8:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n compare_val = 1 if values[0] == values[1] else 0\n self.write_to(modes[2], self.code[self.idx + 3], compare_val)\n self.idx += 4\n elif opcode == 9:\n modes = self.get_modes(value, 1)\n values = self.get_values(modes)\n self.base += values[0]\n self.idx += 2\n elif opcode == 99:\n self.terminated = True\n return outputs\n else:\n raise ValueError\n",
"step-3": "<mask token>\n\n\ndef read_code(string):\n \"\"\"\n string should be a comma-separated string.\n \"\"\"\n code = defaultdict(int)\n for i, x in enumerate(string.split(',')):\n code[i] = int(x)\n return code\n\n\ndef to_ascii(line):\n \"\"\"\n Writes a string as ASCII code. Appends a newline at the end.\n \"\"\"\n data = [ord(c) for c in line]\n data.append(10)\n return data\n\n\nclass IntCode:\n\n def __init__(self, code):\n self.code = code\n self.base = 0\n self.idx = 0\n self.terminated = False\n\n @staticmethod\n def load_code(code_string):\n return IntCode(read_code(code_string))\n\n @staticmethod\n def load_from_file(filename):\n return IntCode.load_code(open(filename, 'r').read())\n\n def copy(self):\n \"\"\"\n Returns a fresh copy of the code, **in the same state**.\n \"\"\"\n return IntCode(self.code.copy())\n\n def get_value(self, mode, value):\n if mode == 0:\n return self.code[value]\n elif mode == 1:\n return value\n elif mode == 2:\n return self.code[value + self.base]\n\n def get_values(self, modes):\n return [self.get_value(mode, self.code[self.idx + i]) for i, mode in\n enumerate(modes, start=1)]\n\n def get_modes(self, value, n_modes):\n value = value // 100\n modes = []\n for _ in range(n_modes):\n modes.append(int(value % 10))\n value //= 10\n return modes\n\n def write_to(self, mode, param, value):\n \"\"\"\n write value to the location given by param, based on the mode.\n \"\"\"\n if mode == 0:\n self.code[param] = value\n elif mode == 1:\n raise ValueError\n elif mode == 2:\n self.code[param + self.base] = value\n\n def run(self, inputs=None, print_outputs=False):\n \"\"\"\n Resumes the code from the current instruction, using the\n given 'inputs' for any required inputs.\n\n When it halts, the outputs from this run are returned.\n\n If the program has terminated, the 'terminated' flag is set.\n \"\"\"\n input_idx = 0\n outputs = []\n while True:\n value = self.code[self.idx]\n opcode = value % 100\n if opcode == 1:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n self.write_to(modes[2], self.code[self.idx + 3], values[0] +\n values[1])\n self.idx += 4\n elif opcode == 2:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n self.write_to(modes[2], self.code[self.idx + 3], values[0] *\n values[1])\n self.idx += 4\n elif opcode == 3:\n if inputs is None or input_idx >= len(inputs):\n return outputs\n input_val = inputs[input_idx]\n input_idx += 1\n modes = self.get_modes(value, 1)\n self.write_to(modes[0], self.code[self.idx + 1], input_val)\n self.idx += 2\n elif opcode == 4:\n modes = self.get_modes(value, 1)\n v = self.get_value(modes[0], self.code[self.idx + 1])\n outputs.append(v)\n if print_outputs:\n print(v)\n self.idx += 2\n elif opcode == 5:\n modes = self.get_modes(value, 2)\n values = self.get_values(modes)\n if values[0] != 0:\n self.idx = values[1]\n else:\n self.idx += 3\n elif opcode == 6:\n modes = self.get_modes(value, 2)\n values = self.get_values(modes)\n if values[0] == 0:\n self.idx = values[1]\n else:\n self.idx += 3\n elif opcode == 7:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n compare_val = 1 if values[0] < values[1] else 0\n self.write_to(modes[2], self.code[self.idx + 3], compare_val)\n self.idx += 4\n elif opcode == 8:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n compare_val = 1 if values[0] == values[1] else 0\n self.write_to(modes[2], self.code[self.idx + 3], compare_val)\n self.idx += 4\n elif opcode == 9:\n modes = self.get_modes(value, 1)\n values = self.get_values(modes)\n self.base += values[0]\n self.idx += 2\n elif opcode == 99:\n self.terminated = True\n return outputs\n else:\n raise ValueError\n",
"step-4": "from collections import defaultdict\n\n\ndef read_code(string):\n \"\"\"\n string should be a comma-separated string.\n \"\"\"\n code = defaultdict(int)\n for i, x in enumerate(string.split(',')):\n code[i] = int(x)\n return code\n\n\ndef to_ascii(line):\n \"\"\"\n Writes a string as ASCII code. Appends a newline at the end.\n \"\"\"\n data = [ord(c) for c in line]\n data.append(10)\n return data\n\n\nclass IntCode:\n\n def __init__(self, code):\n self.code = code\n self.base = 0\n self.idx = 0\n self.terminated = False\n\n @staticmethod\n def load_code(code_string):\n return IntCode(read_code(code_string))\n\n @staticmethod\n def load_from_file(filename):\n return IntCode.load_code(open(filename, 'r').read())\n\n def copy(self):\n \"\"\"\n Returns a fresh copy of the code, **in the same state**.\n \"\"\"\n return IntCode(self.code.copy())\n\n def get_value(self, mode, value):\n if mode == 0:\n return self.code[value]\n elif mode == 1:\n return value\n elif mode == 2:\n return self.code[value + self.base]\n\n def get_values(self, modes):\n return [self.get_value(mode, self.code[self.idx + i]) for i, mode in\n enumerate(modes, start=1)]\n\n def get_modes(self, value, n_modes):\n value = value // 100\n modes = []\n for _ in range(n_modes):\n modes.append(int(value % 10))\n value //= 10\n return modes\n\n def write_to(self, mode, param, value):\n \"\"\"\n write value to the location given by param, based on the mode.\n \"\"\"\n if mode == 0:\n self.code[param] = value\n elif mode == 1:\n raise ValueError\n elif mode == 2:\n self.code[param + self.base] = value\n\n def run(self, inputs=None, print_outputs=False):\n \"\"\"\n Resumes the code from the current instruction, using the\n given 'inputs' for any required inputs.\n\n When it halts, the outputs from this run are returned.\n\n If the program has terminated, the 'terminated' flag is set.\n \"\"\"\n input_idx = 0\n outputs = []\n while True:\n value = self.code[self.idx]\n opcode = value % 100\n if opcode == 1:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n self.write_to(modes[2], self.code[self.idx + 3], values[0] +\n values[1])\n self.idx += 4\n elif opcode == 2:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n self.write_to(modes[2], self.code[self.idx + 3], values[0] *\n values[1])\n self.idx += 4\n elif opcode == 3:\n if inputs is None or input_idx >= len(inputs):\n return outputs\n input_val = inputs[input_idx]\n input_idx += 1\n modes = self.get_modes(value, 1)\n self.write_to(modes[0], self.code[self.idx + 1], input_val)\n self.idx += 2\n elif opcode == 4:\n modes = self.get_modes(value, 1)\n v = self.get_value(modes[0], self.code[self.idx + 1])\n outputs.append(v)\n if print_outputs:\n print(v)\n self.idx += 2\n elif opcode == 5:\n modes = self.get_modes(value, 2)\n values = self.get_values(modes)\n if values[0] != 0:\n self.idx = values[1]\n else:\n self.idx += 3\n elif opcode == 6:\n modes = self.get_modes(value, 2)\n values = self.get_values(modes)\n if values[0] == 0:\n self.idx = values[1]\n else:\n self.idx += 3\n elif opcode == 7:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n compare_val = 1 if values[0] < values[1] else 0\n self.write_to(modes[2], self.code[self.idx + 3], compare_val)\n self.idx += 4\n elif opcode == 8:\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n compare_val = 1 if values[0] == values[1] else 0\n self.write_to(modes[2], self.code[self.idx + 3], compare_val)\n self.idx += 4\n elif opcode == 9:\n modes = self.get_modes(value, 1)\n values = self.get_values(modes)\n self.base += values[0]\n self.idx += 2\n elif opcode == 99:\n self.terminated = True\n return outputs\n else:\n raise ValueError\n",
"step-5": "# helper functions to handle intcode\n\nfrom collections import defaultdict\n\ndef read_code(string):\n \"\"\"\n string should be a comma-separated string.\n \"\"\"\n\n code = defaultdict(int)\n for i, x in enumerate(string.split(',')):\n code[i] = int(x)\n return code\n\n\ndef to_ascii(line):\n \"\"\"\n Writes a string as ASCII code. Appends a newline at the end.\n \"\"\"\n data = [ord(c) for c in line]\n data.append(10)\n return data\n\n\nclass IntCode:\n def __init__(self, code):\n self.code = code\n self.base = 0\n\n # instruction pointer\n self.idx = 0\n self.terminated = False\n\n @staticmethod\n def load_code(code_string):\n return IntCode(read_code(code_string))\n\n @staticmethod\n def load_from_file(filename):\n return IntCode.load_code(open(filename, 'r').read())\n\n def copy(self):\n \"\"\"\n Returns a fresh copy of the code, **in the same state**.\n \"\"\"\n return IntCode(self.code.copy())\n\n def get_value(self, mode, value):\n if mode == 0:\n # position mode\n return self.code[value]\n elif mode == 1:\n # immediate mode\n return value\n elif mode == 2:\n # relative mode\n return self.code[value + self.base]\n\n def get_values(self, modes):\n return [\n self.get_value(mode, self.code[self.idx + i])\n for i, mode in enumerate(modes, start=1)\n ]\n\n def get_modes(self, value, n_modes):\n value = value // 100\n\n modes = []\n for _ in range(n_modes):\n modes.append(int(value % 10))\n value //= 10\n \n return modes\n\n def write_to(self, mode, param, value):\n \"\"\"\n write value to the location given by param, based on the mode.\n \"\"\"\n if mode == 0:\n # position mode\n self.code[param] = value\n elif mode == 1:\n # cannot be in immediate mode\n raise ValueError\n elif mode == 2:\n # relative mode\n self.code[param + self.base] = value\n\n def run(self, inputs=None, print_outputs=False):\n \"\"\"\n Resumes the code from the current instruction, using the\n given 'inputs' for any required inputs.\n\n When it halts, the outputs from this run are returned.\n\n If the program has terminated, the 'terminated' flag is set.\n \"\"\"\n input_idx = 0\n outputs = []\n\n while True:\n # parse the value\n value = self.code[self.idx]\n opcode = value % 100\n\n if opcode == 1:\n # Day 2\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n self.write_to(modes[2], self.code[self.idx+3], values[0] + values[1])\n\n self.idx += 4\n\n elif opcode == 2:\n # Day 2\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n self.write_to(modes[2], self.code[self.idx+3], values[0] * values[1])\n\n self.idx += 4\n\n elif opcode == 3:\n # Day 5\n if inputs is None or input_idx >= len(inputs):\n # halt if we are expecting an input, resume later\n return outputs\n\n input_val = inputs[input_idx]\n input_idx += 1\n\n modes = self.get_modes(value, 1)\n self.write_to(modes[0], self.code[self.idx+1], input_val)\n\n self.idx += 2\n\n elif opcode == 4:\n # Day 5\n modes = self.get_modes(value, 1)\n v = self.get_value(modes[0], self.code[self.idx+1])\n outputs.append(v)\n if print_outputs:\n print(v)\n\n self.idx += 2\n\n elif opcode == 5:\n # Day 5\n modes = self.get_modes(value, 2)\n values = self.get_values(modes)\n if values[0] != 0:\n self.idx = values[1]\n else:\n self.idx += 3\n\n elif opcode == 6:\n # Day 5\n modes = self.get_modes(value, 2)\n values = self.get_values(modes)\n if values[0] == 0:\n self.idx = values[1]\n else:\n self.idx += 3\n\n elif opcode == 7:\n # Day 5\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n\n compare_val = 1 if values[0] < values[1] else 0\n self.write_to(modes[2], self.code[self.idx+3], compare_val)\n\n self.idx += 4\n\n elif opcode == 8:\n # Day 5\n modes = self.get_modes(value, 3)\n values = self.get_values(modes)\n\n compare_val = 1 if values[0] == values[1] else 0\n self.write_to(modes[2], self.code[self.idx+3], compare_val)\n\n self.idx += 4\n\n elif opcode == 9:\n # Day 9\n modes = self.get_modes(value, 1)\n values = self.get_values(modes)\n self.base += values[0]\n\n self.idx += 2\n\n elif opcode == 99:\n self.terminated = True\n return outputs\n\n else:\n raise ValueError",
"step-ids": [
10,
11,
12,
13,
14
]
}
|
[
10,
11,
12,
13,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
admin.site.register(Sport)
admin.site.register(Action)
<|reserved_special_token_1|>
from django.contrib import admin
from .models import Sport
from .models import Action
admin.site.register(Sport)
admin.site.register(Action)
|
flexible
|
{
"blob_id": "ab38371ee3941e214344497b7e56786908a9b3d1",
"index": 2236,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(Sport)\nadmin.site.register(Action)\n",
"step-3": "from django.contrib import admin\nfrom .models import Sport\nfrom .models import Action\nadmin.site.register(Sport)\nadmin.site.register(Action)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: UTF-8 -*-
from flask import Blueprint, jsonify, request, abort, current_app
import json
from config import config_orm_initial
orm = config_orm_initial.initialize_orm()
session = orm['dict_session']
Article_list = orm['dict_Articlelist']
user = orm['dict_user']
app = Blueprint('api_get_comments', __name__)
@app.route('/comments/<article_id>', methods = ['POST'])
def get_comments(article_id):
comments_range = request.form.get('comments_for_single')
# 尝试把前端传来的参数解析成list
try:
temp_list = json.loads(comments_range)
# 判断参数是否是list,并且只有2个元素
if isinstance(temp_list, list) and len(temp_list) == 2:
# 先找到对应的article
target_article = session.query(Article_list).filter_by(id = article_id).one_or_none()
# 如果能找到这篇文章
if target_article:
# 然后调用一对多方法,拿到这篇article对应的comments和comments总数
target_comments = target_article.relate_comments
# 拿到的结果和list差不多,所以取倒数排序
comments_in_range = target_comments[-1-temp_list[0] : -1-temp_list[1]: -1]
comments_count = len(target_comments)
comments_list = list(map(
lambda x:{
'comment':x.content,
'time':x.time,
'user_name':session.query(user).filter_by(id=x.user_id).one().nickname,
'user_avatar':session.query(user).filter_by(id=x.user_id).one().avatar
},
comments_in_range)
)
resp = {'status': 200, 'result': {'count': comments_count, 'commentsList': comments_list}}
session.close()
return jsonify(resp)
# 如果不能找到这篇文章
else:
abort(400)
else:
abort(400)
except Exception as e:
current_app.logger.info(e)
abort(400)
|
normal
|
{
"blob_id": "016c004fd95d901a6d55b6f7460397223a6baa3b",
"index": 1881,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/comments/<article_id>', methods=['POST'])\ndef get_comments(article_id):\n comments_range = request.form.get('comments_for_single')\n try:\n temp_list = json.loads(comments_range)\n if isinstance(temp_list, list) and len(temp_list) == 2:\n target_article = session.query(Article_list).filter_by(id=\n article_id).one_or_none()\n if target_article:\n target_comments = target_article.relate_comments\n comments_in_range = target_comments[-1 - temp_list[0]:-1 -\n temp_list[1]:-1]\n comments_count = len(target_comments)\n comments_list = list(map(lambda x: {'comment': x.content,\n 'time': x.time, 'user_name': session.query(user).\n filter_by(id=x.user_id).one().nickname, 'user_avatar':\n session.query(user).filter_by(id=x.user_id).one().\n avatar}, comments_in_range))\n resp = {'status': 200, 'result': {'count': comments_count,\n 'commentsList': comments_list}}\n session.close()\n return jsonify(resp)\n else:\n abort(400)\n else:\n abort(400)\n except Exception as e:\n current_app.logger.info(e)\n abort(400)\n",
"step-3": "<mask token>\norm = config_orm_initial.initialize_orm()\nsession = orm['dict_session']\nArticle_list = orm['dict_Articlelist']\nuser = orm['dict_user']\napp = Blueprint('api_get_comments', __name__)\n\n\[email protected]('/comments/<article_id>', methods=['POST'])\ndef get_comments(article_id):\n comments_range = request.form.get('comments_for_single')\n try:\n temp_list = json.loads(comments_range)\n if isinstance(temp_list, list) and len(temp_list) == 2:\n target_article = session.query(Article_list).filter_by(id=\n article_id).one_or_none()\n if target_article:\n target_comments = target_article.relate_comments\n comments_in_range = target_comments[-1 - temp_list[0]:-1 -\n temp_list[1]:-1]\n comments_count = len(target_comments)\n comments_list = list(map(lambda x: {'comment': x.content,\n 'time': x.time, 'user_name': session.query(user).\n filter_by(id=x.user_id).one().nickname, 'user_avatar':\n session.query(user).filter_by(id=x.user_id).one().\n avatar}, comments_in_range))\n resp = {'status': 200, 'result': {'count': comments_count,\n 'commentsList': comments_list}}\n session.close()\n return jsonify(resp)\n else:\n abort(400)\n else:\n abort(400)\n except Exception as e:\n current_app.logger.info(e)\n abort(400)\n",
"step-4": "from flask import Blueprint, jsonify, request, abort, current_app\nimport json\nfrom config import config_orm_initial\norm = config_orm_initial.initialize_orm()\nsession = orm['dict_session']\nArticle_list = orm['dict_Articlelist']\nuser = orm['dict_user']\napp = Blueprint('api_get_comments', __name__)\n\n\[email protected]('/comments/<article_id>', methods=['POST'])\ndef get_comments(article_id):\n comments_range = request.form.get('comments_for_single')\n try:\n temp_list = json.loads(comments_range)\n if isinstance(temp_list, list) and len(temp_list) == 2:\n target_article = session.query(Article_list).filter_by(id=\n article_id).one_or_none()\n if target_article:\n target_comments = target_article.relate_comments\n comments_in_range = target_comments[-1 - temp_list[0]:-1 -\n temp_list[1]:-1]\n comments_count = len(target_comments)\n comments_list = list(map(lambda x: {'comment': x.content,\n 'time': x.time, 'user_name': session.query(user).\n filter_by(id=x.user_id).one().nickname, 'user_avatar':\n session.query(user).filter_by(id=x.user_id).one().\n avatar}, comments_in_range))\n resp = {'status': 200, 'result': {'count': comments_count,\n 'commentsList': comments_list}}\n session.close()\n return jsonify(resp)\n else:\n abort(400)\n else:\n abort(400)\n except Exception as e:\n current_app.logger.info(e)\n abort(400)\n",
"step-5": "# -*- coding: UTF-8 -*-\nfrom flask import Blueprint, jsonify, request, abort, current_app\nimport json\nfrom config import config_orm_initial\n\norm = config_orm_initial.initialize_orm()\nsession = orm['dict_session']\nArticle_list = orm['dict_Articlelist']\nuser = orm['dict_user']\n\napp = Blueprint('api_get_comments', __name__)\n\[email protected]('/comments/<article_id>', methods = ['POST'])\ndef get_comments(article_id):\n comments_range = request.form.get('comments_for_single')\n # 尝试把前端传来的参数解析成list\n try:\n temp_list = json.loads(comments_range)\n\n # 判断参数是否是list,并且只有2个元素\n if isinstance(temp_list, list) and len(temp_list) == 2:\n # 先找到对应的article\n target_article = session.query(Article_list).filter_by(id = article_id).one_or_none()\n # 如果能找到这篇文章\n if target_article:\n # 然后调用一对多方法,拿到这篇article对应的comments和comments总数\n target_comments = target_article.relate_comments\n # 拿到的结果和list差不多,所以取倒数排序\n comments_in_range = target_comments[-1-temp_list[0] : -1-temp_list[1]: -1]\n comments_count = len(target_comments)\n comments_list = list(map(\n lambda x:{\n 'comment':x.content, \n 'time':x.time, \n 'user_name':session.query(user).filter_by(id=x.user_id).one().nickname,\n 'user_avatar':session.query(user).filter_by(id=x.user_id).one().avatar\n },\n comments_in_range)\n )\n resp = {'status': 200, 'result': {'count': comments_count, 'commentsList': comments_list}}\n session.close()\n return jsonify(resp)\n # 如果不能找到这篇文章\n else:\n abort(400)\n else:\n abort(400)\n except Exception as e:\n current_app.logger.info(e)\n abort(400)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='train', help=
'could be either infer or train')
parser.add_argument('--model_dir', type=str, default='model', help=
'directory to save models')
parser.add_argument('--batch_size', type=int, default='20', help=
'train batch size')
parser.add_argument('--epoch', type=int, default='10', help=
'train epoch num')
parser.add_argument('--nd', type=int, default='100', help='noise dimension'
)
parser.add_argument('--num', type=int, default='1', help=
'which number to infer')
args = parser.parse_args()
if args.mode == 'train':
train(args)
elif args.mode == 'infer':
infer(args)
else:
print('unknown mode')
<|reserved_special_token_1|>
import argparse
from train import train
from test import infer
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='train', help=
'could be either infer or train')
parser.add_argument('--model_dir', type=str, default='model', help=
'directory to save models')
parser.add_argument('--batch_size', type=int, default='20', help=
'train batch size')
parser.add_argument('--epoch', type=int, default='10', help=
'train epoch num')
parser.add_argument('--nd', type=int, default='100', help='noise dimension'
)
parser.add_argument('--num', type=int, default='1', help=
'which number to infer')
args = parser.parse_args()
if args.mode == 'train':
train(args)
elif args.mode == 'infer':
infer(args)
else:
print('unknown mode')
<|reserved_special_token_1|>
import argparse
from train import train
from test import infer
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='train',
help='could be either infer or train')
parser.add_argument('--model_dir', type=str, default='model',
help='directory to save models')
parser.add_argument('--batch_size', type=int, default='20',
help='train batch size')
parser.add_argument('--epoch', type=int, default='10',
help='train epoch num')
parser.add_argument('--nd', type=int, default='100',
help='noise dimension')
parser.add_argument('--num', type=int, default='1',
help='which number to infer')
args = parser.parse_args()
# if not os.path.exists(args.model_dir):
# os.mkdir(args.model_dir)
if args.mode == 'train':
train(args)
elif args.mode == 'infer':
infer(args)
else:
print('unknown mode')
|
flexible
|
{
"blob_id": "f0fa85f240b74b003ade767ffe8642feacdfaa32",
"index": 5807,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--mode', type=str, default='train', help=\n 'could be either infer or train')\n parser.add_argument('--model_dir', type=str, default='model', help=\n 'directory to save models')\n parser.add_argument('--batch_size', type=int, default='20', help=\n 'train batch size')\n parser.add_argument('--epoch', type=int, default='10', help=\n 'train epoch num')\n parser.add_argument('--nd', type=int, default='100', help='noise dimension'\n )\n parser.add_argument('--num', type=int, default='1', help=\n 'which number to infer')\n args = parser.parse_args()\n if args.mode == 'train':\n train(args)\n elif args.mode == 'infer':\n infer(args)\n else:\n print('unknown mode')\n",
"step-3": "import argparse\nfrom train import train\nfrom test import infer\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--mode', type=str, default='train', help=\n 'could be either infer or train')\n parser.add_argument('--model_dir', type=str, default='model', help=\n 'directory to save models')\n parser.add_argument('--batch_size', type=int, default='20', help=\n 'train batch size')\n parser.add_argument('--epoch', type=int, default='10', help=\n 'train epoch num')\n parser.add_argument('--nd', type=int, default='100', help='noise dimension'\n )\n parser.add_argument('--num', type=int, default='1', help=\n 'which number to infer')\n args = parser.parse_args()\n if args.mode == 'train':\n train(args)\n elif args.mode == 'infer':\n infer(args)\n else:\n print('unknown mode')\n",
"step-4": "import argparse\nfrom train import train\nfrom test import infer\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--mode', type=str, default='train',\n help='could be either infer or train')\n parser.add_argument('--model_dir', type=str, default='model',\n help='directory to save models')\n parser.add_argument('--batch_size', type=int, default='20',\n help='train batch size')\n parser.add_argument('--epoch', type=int, default='10',\n help='train epoch num')\n parser.add_argument('--nd', type=int, default='100',\n help='noise dimension')\n parser.add_argument('--num', type=int, default='1',\n help='which number to infer')\n args = parser.parse_args()\n\n # if not os.path.exists(args.model_dir):\n # os.mkdir(args.model_dir)\n\n if args.mode == 'train':\n train(args)\n elif args.mode == 'infer':\n infer(args)\n else:\n print('unknown mode')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Agile Business Group sagl (<http://www.agilebg.com>)
# Author: Nicola Malcontenti <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm
class StockPicking(orm.Model):
_inherit = "stock.picking"
#def _get_invoice_vals(self, cr, uid, key, inv_type,
# journal_id, origin, context=None):
# invoice_vals = super(StockPicking, self)._get_invoice_vals(
# cr, uid, key, inv_type, journal_id, origin, context=context)
# if context.get('active_id'):
# picking_id = int(context['active_id'])
# partner_id = self.browse(cr, uid, picking_id, context=context).partner_id
# if partner_id:
# invoice_vals['address_shipping_id'] = partner_id.id
# return invoice_vals
def _get_invoice_vals(self, cr, uid, key, inv_type, journal_id, origin, context=None):
if context is None:
context = {}
partner, currency_id, company_id, user_id = key
if inv_type in ('out_invoice', 'out_refund'):
account_id = partner.property_account_receivable.id
payment_term = partner.property_payment_term.id or False
else:
account_id = partner.property_account_payable.id
payment_term = partner.property_supplier_payment_term.id or False
return {
'origin': origin and origin.picking_id.name or origin,
'date_invoice': context.get('date_inv', False),
'address_shipping_id': partner.id,
'user_id': user_id,
'partner_id': partner.id,
'account_id': account_id,
'payment_term': payment_term,
'type': inv_type,
'fiscal_position': partner.property_account_position.id,
'company_id': company_id,
'currency_id': currency_id,
'journal_id': journal_id,
}
|
normal
|
{
"blob_id": "b111d799b9e71cf36253c37f83dc0cdc8887a32e",
"index": 7404,
"step-1": "<mask token>\n\n\nclass StockPicking(orm.Model):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass StockPicking(orm.Model):\n <mask token>\n\n def _get_invoice_vals(self, cr, uid, key, inv_type, journal_id, origin,\n context=None):\n if context is None:\n context = {}\n partner, currency_id, company_id, user_id = key\n if inv_type in ('out_invoice', 'out_refund'):\n account_id = partner.property_account_receivable.id\n payment_term = partner.property_payment_term.id or False\n else:\n account_id = partner.property_account_payable.id\n payment_term = partner.property_supplier_payment_term.id or False\n return {'origin': origin and origin.picking_id.name or origin,\n 'date_invoice': context.get('date_inv', False),\n 'address_shipping_id': partner.id, 'user_id': user_id,\n 'partner_id': partner.id, 'account_id': account_id,\n 'payment_term': payment_term, 'type': inv_type,\n 'fiscal_position': partner.property_account_position.id,\n 'company_id': company_id, 'currency_id': currency_id,\n 'journal_id': journal_id}\n",
"step-3": "<mask token>\n\n\nclass StockPicking(orm.Model):\n _inherit = 'stock.picking'\n\n def _get_invoice_vals(self, cr, uid, key, inv_type, journal_id, origin,\n context=None):\n if context is None:\n context = {}\n partner, currency_id, company_id, user_id = key\n if inv_type in ('out_invoice', 'out_refund'):\n account_id = partner.property_account_receivable.id\n payment_term = partner.property_payment_term.id or False\n else:\n account_id = partner.property_account_payable.id\n payment_term = partner.property_supplier_payment_term.id or False\n return {'origin': origin and origin.picking_id.name or origin,\n 'date_invoice': context.get('date_inv', False),\n 'address_shipping_id': partner.id, 'user_id': user_id,\n 'partner_id': partner.id, 'account_id': account_id,\n 'payment_term': payment_term, 'type': inv_type,\n 'fiscal_position': partner.property_account_position.id,\n 'company_id': company_id, 'currency_id': currency_id,\n 'journal_id': journal_id}\n",
"step-4": "from openerp.osv import orm\n\n\nclass StockPicking(orm.Model):\n _inherit = 'stock.picking'\n\n def _get_invoice_vals(self, cr, uid, key, inv_type, journal_id, origin,\n context=None):\n if context is None:\n context = {}\n partner, currency_id, company_id, user_id = key\n if inv_type in ('out_invoice', 'out_refund'):\n account_id = partner.property_account_receivable.id\n payment_term = partner.property_payment_term.id or False\n else:\n account_id = partner.property_account_payable.id\n payment_term = partner.property_supplier_payment_term.id or False\n return {'origin': origin and origin.picking_id.name or origin,\n 'date_invoice': context.get('date_inv', False),\n 'address_shipping_id': partner.id, 'user_id': user_id,\n 'partner_id': partner.id, 'account_id': account_id,\n 'payment_term': payment_term, 'type': inv_type,\n 'fiscal_position': partner.property_account_position.id,\n 'company_id': company_id, 'currency_id': currency_id,\n 'journal_id': journal_id}\n",
"step-5": "# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Copyright (C) 2014 Agile Business Group sagl (<http://www.agilebg.com>)\n# Author: Nicola Malcontenti <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n##############################################################################\n\nfrom openerp.osv import orm\n\n\nclass StockPicking(orm.Model):\n _inherit = \"stock.picking\"\n\n #def _get_invoice_vals(self, cr, uid, key, inv_type,\n # journal_id, origin, context=None):\n # invoice_vals = super(StockPicking, self)._get_invoice_vals(\n # cr, uid, key, inv_type, journal_id, origin, context=context)\n # if context.get('active_id'):\n # picking_id = int(context['active_id'])\n # partner_id = self.browse(cr, uid, picking_id, context=context).partner_id\n # if partner_id:\n # invoice_vals['address_shipping_id'] = partner_id.id\n # return invoice_vals\n\n def _get_invoice_vals(self, cr, uid, key, inv_type, journal_id, origin, context=None):\n if context is None:\n context = {}\n partner, currency_id, company_id, user_id = key\n if inv_type in ('out_invoice', 'out_refund'):\n account_id = partner.property_account_receivable.id\n payment_term = partner.property_payment_term.id or False\n else:\n account_id = partner.property_account_payable.id\n payment_term = partner.property_supplier_payment_term.id or False\n return {\n 'origin': origin and origin.picking_id.name or origin,\n 'date_invoice': context.get('date_inv', False),\n 'address_shipping_id': partner.id,\n 'user_id': user_id,\n 'partner_id': partner.id,\n 'account_id': account_id,\n 'payment_term': payment_term,\n 'type': inv_type,\n 'fiscal_position': partner.property_account_position.id,\n 'company_id': company_id,\n 'currency_id': currency_id,\n 'journal_id': journal_id,\n }\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import cx_Oracle
import datetime
SDATE = '01.01.2014'
FDATE = '01.01.2020'
#p.PRESZAB,
#GDR_RATE.RFLUID,
#p.NRES
#join GDR_RATE on GDR_RATE.IDWELL = p.IDWELL and GDR_RATE.DTBGN = p.DTBGN and GDR_RATE.NRES = p.NRES)
pbu_query_raw = f"""
select
WELLNAME,
DTBGN,
DPDEVICE,
(TVDSS-(MD - DPDEVICE)*(cos(INKL/57.2958))) as TVDDEVICE
from(
select
p.IDWELL as IDWELL,
BASP_REGISTRYWELL.WELLNAME as WELLNAME,
p.DTBGN as DTBGN,
GDR_TEST.DPDEVICE as DPDEVICE,
itb.MD as MD,
itb.TVDSS as TVDSS,
itb.INKL as INKL,
itb.AZIM as AZIM,
row_number() over(partition by p.IDWELL, p.DTBGN order by abs(itb.MD-GDR_TEST.DPDEVICE) asc) as RN
from GDR_MSRPRESS p
join GDR_TEST on GDR_TEST.IDWELL = p.IDWELL and GDR_TEST.DTBGN = p.DTBGN and GDR_TEST.NRES = p.NRES
join BASP_REGISTRYWELL on BASP_REGISTRYWELL.IDWELL = p.IDWELL
join (select
RSRC_REGISTRYINKL.IDWELL as IDWELL,
i.DPTINKL as MD,
i.AGLINKL as INKL,
i.AZMINKL as AZIM,
i.AOINKL as TVDSS
from RSRC_INKL i
JOIN RSRC_REGISTRYINKL ON i.IDINKL = RSRC_REGISTRYINKL.IDINKL
order by RSRC_REGISTRYINKL.IDWELL, i.DPTINKL) itb
on itb.IDWELL=p.IDWELL and itb.MD > GDR_TEST.DPDEVICE
where p.DTBGN > TO_DATE('{SDATE}','DD.MM.YYYY')
order by p.DTBGN, p.IDWELL
)
where RN = 1
order by IDWELL, DTBGN
""" # PBU press
def get_data_from_database_cns(connection, query_string, delimiter = ';'):
with connection.cursor() as cur:
cur.execute(query_string)
[print(x[0], end=delimiter) for x in cur.description] # print table headers
print()
for result in cur:
#print(result)
for w in result:
if w == None:
print("",end = delimiter)
elif isinstance(w, datetime.datetime):
print(f"{w:%d.%m.%Y %H:%M:%S}",end = delimiter)
else:
print(f"{w}",end = delimiter)
print()
def connect_database():
host_name = '10.201.194.37'
port_number = 1521
service_name = 'WQ2'
user = 'WQ2_RO'
password = user
dsn_tns = cx_Oracle.makedsn(host_name, port_number, service_name)
return cx_Oracle.connect(user, password, dsn_tns)
def connect_and_query():
connection = connect_database() #print(connection.version)
get_data_from_database_cns(connection, pbu_query_raw,' ') #
connection.close()
connect_and_query()
|
normal
|
{
"blob_id": "39f1595374147c71bc2d4c945a0f1149891f1883",
"index": 5300,
"step-1": "<mask token>\n\n\ndef get_data_from_database_cns(connection, query_string, delimiter=';'):\n with connection.cursor() as cur:\n cur.execute(query_string)\n [print(x[0], end=delimiter) for x in cur.description]\n print()\n for result in cur:\n for w in result:\n if w == None:\n print('', end=delimiter)\n elif isinstance(w, datetime.datetime):\n print(f'{w:%d.%m.%Y %H:%M:%S}', end=delimiter)\n else:\n print(f'{w}', end=delimiter)\n print()\n\n\ndef connect_database():\n host_name = '10.201.194.37'\n port_number = 1521\n service_name = 'WQ2'\n user = 'WQ2_RO'\n password = user\n dsn_tns = cx_Oracle.makedsn(host_name, port_number, service_name)\n return cx_Oracle.connect(user, password, dsn_tns)\n\n\ndef connect_and_query():\n connection = connect_database()\n get_data_from_database_cns(connection, pbu_query_raw, ' ')\n connection.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_data_from_database_cns(connection, query_string, delimiter=';'):\n with connection.cursor() as cur:\n cur.execute(query_string)\n [print(x[0], end=delimiter) for x in cur.description]\n print()\n for result in cur:\n for w in result:\n if w == None:\n print('', end=delimiter)\n elif isinstance(w, datetime.datetime):\n print(f'{w:%d.%m.%Y %H:%M:%S}', end=delimiter)\n else:\n print(f'{w}', end=delimiter)\n print()\n\n\ndef connect_database():\n host_name = '10.201.194.37'\n port_number = 1521\n service_name = 'WQ2'\n user = 'WQ2_RO'\n password = user\n dsn_tns = cx_Oracle.makedsn(host_name, port_number, service_name)\n return cx_Oracle.connect(user, password, dsn_tns)\n\n\ndef connect_and_query():\n connection = connect_database()\n get_data_from_database_cns(connection, pbu_query_raw, ' ')\n connection.close()\n\n\nconnect_and_query()\n",
"step-3": "<mask token>\nSDATE = '01.01.2014'\nFDATE = '01.01.2020'\npbu_query_raw = f\"\"\"\nselect \n WELLNAME,\n DTBGN,\n DPDEVICE,\n (TVDSS-(MD - DPDEVICE)*(cos(INKL/57.2958))) as TVDDEVICE\nfrom(\n select \n p.IDWELL as IDWELL, \n BASP_REGISTRYWELL.WELLNAME as WELLNAME,\n p.DTBGN as DTBGN, \n GDR_TEST.DPDEVICE as DPDEVICE,\n itb.MD as MD,\n itb.TVDSS as TVDSS,\n itb.INKL as INKL,\n itb.AZIM as AZIM,\n row_number() over(partition by p.IDWELL, p.DTBGN order by abs(itb.MD-GDR_TEST.DPDEVICE) asc) as RN\n from GDR_MSRPRESS p \n join GDR_TEST on GDR_TEST.IDWELL = p.IDWELL and GDR_TEST.DTBGN = p.DTBGN and GDR_TEST.NRES = p.NRES \n join BASP_REGISTRYWELL on BASP_REGISTRYWELL.IDWELL = p.IDWELL\n\n join (select \n RSRC_REGISTRYINKL.IDWELL as IDWELL,\n i.DPTINKL as MD, \n i.AGLINKL as INKL, \n i.AZMINKL as AZIM, \n i.AOINKL as TVDSS\n from RSRC_INKL i \n JOIN RSRC_REGISTRYINKL ON i.IDINKL = RSRC_REGISTRYINKL.IDINKL\n order by RSRC_REGISTRYINKL.IDWELL, i.DPTINKL) itb\n on itb.IDWELL=p.IDWELL and itb.MD > GDR_TEST.DPDEVICE\n where p.DTBGN > TO_DATE('{SDATE}','DD.MM.YYYY') \n order by p.DTBGN, p.IDWELL\n ) \n where RN = 1\n order by IDWELL, DTBGN\n\n \"\"\"\n\n\ndef get_data_from_database_cns(connection, query_string, delimiter=';'):\n with connection.cursor() as cur:\n cur.execute(query_string)\n [print(x[0], end=delimiter) for x in cur.description]\n print()\n for result in cur:\n for w in result:\n if w == None:\n print('', end=delimiter)\n elif isinstance(w, datetime.datetime):\n print(f'{w:%d.%m.%Y %H:%M:%S}', end=delimiter)\n else:\n print(f'{w}', end=delimiter)\n print()\n\n\ndef connect_database():\n host_name = '10.201.194.37'\n port_number = 1521\n service_name = 'WQ2'\n user = 'WQ2_RO'\n password = user\n dsn_tns = cx_Oracle.makedsn(host_name, port_number, service_name)\n return cx_Oracle.connect(user, password, dsn_tns)\n\n\ndef connect_and_query():\n connection = connect_database()\n get_data_from_database_cns(connection, pbu_query_raw, ' ')\n connection.close()\n\n\nconnect_and_query()\n",
"step-4": "import cx_Oracle\nimport datetime\nSDATE = '01.01.2014'\nFDATE = '01.01.2020'\npbu_query_raw = f\"\"\"\nselect \n WELLNAME,\n DTBGN,\n DPDEVICE,\n (TVDSS-(MD - DPDEVICE)*(cos(INKL/57.2958))) as TVDDEVICE\nfrom(\n select \n p.IDWELL as IDWELL, \n BASP_REGISTRYWELL.WELLNAME as WELLNAME,\n p.DTBGN as DTBGN, \n GDR_TEST.DPDEVICE as DPDEVICE,\n itb.MD as MD,\n itb.TVDSS as TVDSS,\n itb.INKL as INKL,\n itb.AZIM as AZIM,\n row_number() over(partition by p.IDWELL, p.DTBGN order by abs(itb.MD-GDR_TEST.DPDEVICE) asc) as RN\n from GDR_MSRPRESS p \n join GDR_TEST on GDR_TEST.IDWELL = p.IDWELL and GDR_TEST.DTBGN = p.DTBGN and GDR_TEST.NRES = p.NRES \n join BASP_REGISTRYWELL on BASP_REGISTRYWELL.IDWELL = p.IDWELL\n\n join (select \n RSRC_REGISTRYINKL.IDWELL as IDWELL,\n i.DPTINKL as MD, \n i.AGLINKL as INKL, \n i.AZMINKL as AZIM, \n i.AOINKL as TVDSS\n from RSRC_INKL i \n JOIN RSRC_REGISTRYINKL ON i.IDINKL = RSRC_REGISTRYINKL.IDINKL\n order by RSRC_REGISTRYINKL.IDWELL, i.DPTINKL) itb\n on itb.IDWELL=p.IDWELL and itb.MD > GDR_TEST.DPDEVICE\n where p.DTBGN > TO_DATE('{SDATE}','DD.MM.YYYY') \n order by p.DTBGN, p.IDWELL\n ) \n where RN = 1\n order by IDWELL, DTBGN\n\n \"\"\"\n\n\ndef get_data_from_database_cns(connection, query_string, delimiter=';'):\n with connection.cursor() as cur:\n cur.execute(query_string)\n [print(x[0], end=delimiter) for x in cur.description]\n print()\n for result in cur:\n for w in result:\n if w == None:\n print('', end=delimiter)\n elif isinstance(w, datetime.datetime):\n print(f'{w:%d.%m.%Y %H:%M:%S}', end=delimiter)\n else:\n print(f'{w}', end=delimiter)\n print()\n\n\ndef connect_database():\n host_name = '10.201.194.37'\n port_number = 1521\n service_name = 'WQ2'\n user = 'WQ2_RO'\n password = user\n dsn_tns = cx_Oracle.makedsn(host_name, port_number, service_name)\n return cx_Oracle.connect(user, password, dsn_tns)\n\n\ndef connect_and_query():\n connection = connect_database()\n get_data_from_database_cns(connection, pbu_query_raw, ' ')\n connection.close()\n\n\nconnect_and_query()\n",
"step-5": "import cx_Oracle\nimport datetime\n\nSDATE = '01.01.2014'\nFDATE = '01.01.2020'\n\n#p.PRESZAB, \n#GDR_RATE.RFLUID, \n#p.NRES \n#join GDR_RATE on GDR_RATE.IDWELL = p.IDWELL and GDR_RATE.DTBGN = p.DTBGN and GDR_RATE.NRES = p.NRES)\n\npbu_query_raw = f\"\"\"\nselect \n WELLNAME,\n DTBGN,\n DPDEVICE,\n (TVDSS-(MD - DPDEVICE)*(cos(INKL/57.2958))) as TVDDEVICE\nfrom(\n select \n p.IDWELL as IDWELL, \n BASP_REGISTRYWELL.WELLNAME as WELLNAME,\n p.DTBGN as DTBGN, \n GDR_TEST.DPDEVICE as DPDEVICE,\n itb.MD as MD,\n itb.TVDSS as TVDSS,\n itb.INKL as INKL,\n itb.AZIM as AZIM,\n row_number() over(partition by p.IDWELL, p.DTBGN order by abs(itb.MD-GDR_TEST.DPDEVICE) asc) as RN\n from GDR_MSRPRESS p \n join GDR_TEST on GDR_TEST.IDWELL = p.IDWELL and GDR_TEST.DTBGN = p.DTBGN and GDR_TEST.NRES = p.NRES \n join BASP_REGISTRYWELL on BASP_REGISTRYWELL.IDWELL = p.IDWELL\n\n join (select \n RSRC_REGISTRYINKL.IDWELL as IDWELL,\n i.DPTINKL as MD, \n i.AGLINKL as INKL, \n i.AZMINKL as AZIM, \n i.AOINKL as TVDSS\n from RSRC_INKL i \n JOIN RSRC_REGISTRYINKL ON i.IDINKL = RSRC_REGISTRYINKL.IDINKL\n order by RSRC_REGISTRYINKL.IDWELL, i.DPTINKL) itb\n on itb.IDWELL=p.IDWELL and itb.MD > GDR_TEST.DPDEVICE\n where p.DTBGN > TO_DATE('{SDATE}','DD.MM.YYYY') \n order by p.DTBGN, p.IDWELL\n ) \n where RN = 1\n order by IDWELL, DTBGN\n\n \"\"\" # PBU press\n\n\ndef get_data_from_database_cns(connection, query_string, delimiter = ';'):\n with connection.cursor() as cur:\n cur.execute(query_string)\n [print(x[0], end=delimiter) for x in cur.description] # print table headers\n print()\n for result in cur:\n #print(result)\n for w in result:\n if w == None:\n print(\"\",end = delimiter)\n elif isinstance(w, datetime.datetime):\n print(f\"{w:%d.%m.%Y %H:%M:%S}\",end = delimiter)\n else:\n print(f\"{w}\",end = delimiter)\n print()\n\ndef connect_database():\n host_name = '10.201.194.37'\n port_number = 1521\n service_name = 'WQ2'\n user = 'WQ2_RO'\n password = user\n dsn_tns = cx_Oracle.makedsn(host_name, port_number, service_name)\n return cx_Oracle.connect(user, password, dsn_tns)\n\ndef connect_and_query():\n connection = connect_database() #print(connection.version)\n get_data_from_database_cns(connection, pbu_query_raw,' ') # \n connection.close()\n\nconnect_and_query()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#os for file system
import os
from sys import platform as _platform
import fnmatch
import inspect
files = 0
lines = 0
extension0 = '.c'
extension1 = '.cpp'
extension2 = '.h'
extension3 = '.hpp'
filename = inspect.getframeinfo(inspect.currentframe()).filename
startPath = os.path.dirname(os.path.abspath(filename))
with open("files_with_extensions.txt", "w", encoding="utf-8") as filewrite:
for r, d, f in os.walk(startPath):
for file in f:
if file.endswith(extension0) or file.endswith(extension1) or file.endswith(extension2) or file.endswith(extension3):
if _platform == "linux" or _platform == "linux2":
ss = '/'
elif _platform == "win32" or _platform == "win64":
ss = '\\'
filePathAndName = r + ss + file
files += 1
filewrite.write(f"{filePathAndName}")
fi = open(filePathAndName, 'r')
pos = fi.tell()
fileLines = 0
while (True):
li = fi.readline()
# check for any hidden symbols
if li.isspace():
continue
newpos = fi.tell()
fileLines += 1
if newpos == pos: # stream position hasn't changed -> EOF
break
else:
pos = newpos
lines += fileLines
filewrite.write(f"{fileLines}\n")
print(file + " " + str(fileLines))
fi.close()
print(files)
print(lines)
filewrite.write(f"{files}\n")
filewrite.write(f"{lines}\n")
|
normal
|
{
"blob_id": "d287123acdbabdd5a223e774c89945ab888fcbcc",
"index": 5439,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('files_with_extensions.txt', 'w', encoding='utf-8') as filewrite:\n for r, d, f in os.walk(startPath):\n for file in f:\n if file.endswith(extension0) or file.endswith(extension1\n ) or file.endswith(extension2) or file.endswith(extension3):\n if _platform == 'linux' or _platform == 'linux2':\n ss = '/'\n elif _platform == 'win32' or _platform == 'win64':\n ss = '\\\\'\n filePathAndName = r + ss + file\n files += 1\n filewrite.write(f'{filePathAndName}')\n fi = open(filePathAndName, 'r')\n pos = fi.tell()\n fileLines = 0\n while True:\n li = fi.readline()\n if li.isspace():\n continue\n newpos = fi.tell()\n fileLines += 1\n if newpos == pos:\n break\n else:\n pos = newpos\n lines += fileLines\n filewrite.write(f'{fileLines}\\n')\n print(file + ' ' + str(fileLines))\n fi.close()\n print(files)\n print(lines)\n filewrite.write(f'{files}\\n')\n filewrite.write(f'{lines}\\n')\n",
"step-3": "<mask token>\nfiles = 0\nlines = 0\nextension0 = '.c'\nextension1 = '.cpp'\nextension2 = '.h'\nextension3 = '.hpp'\nfilename = inspect.getframeinfo(inspect.currentframe()).filename\nstartPath = os.path.dirname(os.path.abspath(filename))\nwith open('files_with_extensions.txt', 'w', encoding='utf-8') as filewrite:\n for r, d, f in os.walk(startPath):\n for file in f:\n if file.endswith(extension0) or file.endswith(extension1\n ) or file.endswith(extension2) or file.endswith(extension3):\n if _platform == 'linux' or _platform == 'linux2':\n ss = '/'\n elif _platform == 'win32' or _platform == 'win64':\n ss = '\\\\'\n filePathAndName = r + ss + file\n files += 1\n filewrite.write(f'{filePathAndName}')\n fi = open(filePathAndName, 'r')\n pos = fi.tell()\n fileLines = 0\n while True:\n li = fi.readline()\n if li.isspace():\n continue\n newpos = fi.tell()\n fileLines += 1\n if newpos == pos:\n break\n else:\n pos = newpos\n lines += fileLines\n filewrite.write(f'{fileLines}\\n')\n print(file + ' ' + str(fileLines))\n fi.close()\n print(files)\n print(lines)\n filewrite.write(f'{files}\\n')\n filewrite.write(f'{lines}\\n')\n",
"step-4": "import os\nfrom sys import platform as _platform\nimport fnmatch\nimport inspect\nfiles = 0\nlines = 0\nextension0 = '.c'\nextension1 = '.cpp'\nextension2 = '.h'\nextension3 = '.hpp'\nfilename = inspect.getframeinfo(inspect.currentframe()).filename\nstartPath = os.path.dirname(os.path.abspath(filename))\nwith open('files_with_extensions.txt', 'w', encoding='utf-8') as filewrite:\n for r, d, f in os.walk(startPath):\n for file in f:\n if file.endswith(extension0) or file.endswith(extension1\n ) or file.endswith(extension2) or file.endswith(extension3):\n if _platform == 'linux' or _platform == 'linux2':\n ss = '/'\n elif _platform == 'win32' or _platform == 'win64':\n ss = '\\\\'\n filePathAndName = r + ss + file\n files += 1\n filewrite.write(f'{filePathAndName}')\n fi = open(filePathAndName, 'r')\n pos = fi.tell()\n fileLines = 0\n while True:\n li = fi.readline()\n if li.isspace():\n continue\n newpos = fi.tell()\n fileLines += 1\n if newpos == pos:\n break\n else:\n pos = newpos\n lines += fileLines\n filewrite.write(f'{fileLines}\\n')\n print(file + ' ' + str(fileLines))\n fi.close()\n print(files)\n print(lines)\n filewrite.write(f'{files}\\n')\n filewrite.write(f'{lines}\\n')\n",
"step-5": "#os for file system\nimport os\n\nfrom sys import platform as _platform\n\nimport fnmatch\nimport inspect\n\nfiles = 0\nlines = 0 \n \nextension0 = '.c'\nextension1 = '.cpp'\nextension2 = '.h'\t\nextension3 = '.hpp'\t\n\nfilename = inspect.getframeinfo(inspect.currentframe()).filename\nstartPath = os.path.dirname(os.path.abspath(filename))\n\nwith open(\"files_with_extensions.txt\", \"w\", encoding=\"utf-8\") as filewrite:\n for r, d, f in os.walk(startPath):\n for file in f:\n if file.endswith(extension0) or file.endswith(extension1) or file.endswith(extension2) or file.endswith(extension3):\n\n if _platform == \"linux\" or _platform == \"linux2\":\n ss = '/'\n elif _platform == \"win32\" or _platform == \"win64\":\n ss = '\\\\'\n\n filePathAndName = r + ss + file\n\n files += 1\n\n filewrite.write(f\"{filePathAndName}\")\n \n fi = open(filePathAndName, 'r')\n pos = fi.tell()\n\n fileLines = 0\n while (True):\n li = fi.readline()\n\n # check for any hidden symbols\n if li.isspace():\n continue\n \n newpos = fi.tell()\n fileLines += 1\n if newpos == pos: # stream position hasn't changed -> EOF\n break\n else:\n pos = newpos\n\n lines += fileLines\n\n filewrite.write(f\"{fileLines}\\n\")\n print(file + \" \" + str(fileLines))\n\n fi.close()\n \n\n print(files)\n print(lines)\n\n filewrite.write(f\"{files}\\n\")\n filewrite.write(f\"{lines}\\n\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def subsets(self, nums: List[int]) ->List[List[int]]:
"""
ans = set()
n = len(nums)
for x, val in enumerate(nums):
for y in range(x + 1, n + 1):
ans.add(frozenset(nums[x:y]))
for u in range(0, x + 1):
for z in range(y + 1, n + 1):
ans.add(frozenset([nums[u]] + nums[y:z + 1]))
ans.add(frozenset(nums[0:u + 1] + nums[y:z + 1]))
ans.add(frozenset([nums[u]] + nums[z:n + 1]))
ans.add(frozenset(nums[0:u + 1] + nums[z:n + 1]))
ans.add(frozenset([]))
return ans
"""
all_subsets = [[]]
if nums:
for num in nums:
for idx in range(len(all_subsets)):
all_subsets.append(all_subsets[idx] + [num])
return all_subsets
<|reserved_special_token_1|>
class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
'''
ans = set()
n = len(nums)
for x, val in enumerate(nums):
for y in range(x + 1, n + 1):
ans.add(frozenset(nums[x:y]))
for u in range(0, x + 1):
for z in range(y + 1, n + 1):
ans.add(frozenset([nums[u]] + nums[y:z + 1]))
ans.add(frozenset(nums[0:u + 1] + nums[y:z + 1]))
ans.add(frozenset([nums[u]] + nums[z:n + 1]))
ans.add(frozenset(nums[0:u + 1] + nums[z:n + 1]))
ans.add(frozenset([]))
return ans
'''
all_subsets = [[]]
if nums:
for num in nums:
for idx in range(len(all_subsets)):
all_subsets.append(all_subsets[idx] + [num])
return all_subsets
|
flexible
|
{
"blob_id": "7d873ed216355d1688ec79ff337304d8ebfd2754",
"index": 7625,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def subsets(self, nums: List[int]) ->List[List[int]]:\n \"\"\"\n ans = set()\n n = len(nums)\n for x, val in enumerate(nums):\n for y in range(x + 1, n + 1):\n ans.add(frozenset(nums[x:y]))\n for u in range(0, x + 1):\n for z in range(y + 1, n + 1):\n \n ans.add(frozenset([nums[u]] + nums[y:z + 1]))\n ans.add(frozenset(nums[0:u + 1] + nums[y:z + 1]))\n\n ans.add(frozenset([nums[u]] + nums[z:n + 1]))\n ans.add(frozenset(nums[0:u + 1] + nums[z:n + 1]))\n \n ans.add(frozenset([]))\n return ans\n \"\"\"\n all_subsets = [[]]\n if nums:\n for num in nums:\n for idx in range(len(all_subsets)):\n all_subsets.append(all_subsets[idx] + [num])\n return all_subsets\n",
"step-4": "class Solution:\n def subsets(self, nums: List[int]) -> List[List[int]]:\n '''\n ans = set()\n n = len(nums)\n for x, val in enumerate(nums):\n for y in range(x + 1, n + 1):\n ans.add(frozenset(nums[x:y]))\n for u in range(0, x + 1):\n for z in range(y + 1, n + 1):\n \n ans.add(frozenset([nums[u]] + nums[y:z + 1]))\n ans.add(frozenset(nums[0:u + 1] + nums[y:z + 1]))\n\n ans.add(frozenset([nums[u]] + nums[z:n + 1]))\n ans.add(frozenset(nums[0:u + 1] + nums[z:n + 1]))\n \n ans.add(frozenset([]))\n return ans\n '''\n \n all_subsets = [[]]\n \n if nums:\n \n for num in nums:\n for idx in range(len(all_subsets)):\n \n all_subsets.append(all_subsets[idx] + [num])\n \n \n return all_subsets\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
class Honda:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Audi:
def __init__(self, name, color):
self.name = name
self.color = color
def display(self):
print('Audi car name is : ', self.name, ' and color is : ', self.color)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Honda:
def __init__(self, name, color):
self.name = name
self.color = color
def display(self):
print('Honda car name is : ', self.name, ' and color is : ', self.color
)
class Audi:
def __init__(self, name, color):
self.name = name
self.color = color
def display(self):
print('Audi car name is : ', self.name, ' and color is : ', self.color)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Honda:
def __init__(self, name, color):
self.name = name
self.color = color
def display(self):
print('Honda car name is : ', self.name, ' and color is : ', self.color
)
class Audi:
def __init__(self, name, color):
self.name = name
self.color = color
def display(self):
print('Audi car name is : ', self.name, ' and color is : ', self.color)
<|reserved_special_token_0|>
for car in (HondaCar, AudiCar):
car.display()
<|reserved_special_token_1|>
class Honda:
def __init__(self, name, color):
self.name = name
self.color = color
def display(self):
print('Honda car name is : ', self.name, ' and color is : ', self.color
)
class Audi:
def __init__(self, name, color):
self.name = name
self.color = color
def display(self):
print('Audi car name is : ', self.name, ' and color is : ', self.color)
HondaCar = Honda('Honda City', 'White')
AudiCar = Audi('A6', 'Black')
for car in (HondaCar, AudiCar):
car.display()
<|reserved_special_token_1|>
# 5. Write a program to implement polymorphism.
class Honda:
def __init__(self, name, color):
self.name = name
self.color = color
def display(self):
print("Honda car name is : ", self.name, " and color is : ", self.color)
class Audi:
def __init__(self, name, color):
self.name = name
self.color = color
def display(self):
print("Audi car name is : ", self.name, " and color is : ", self.color)
HondaCar = Honda("Honda City", "White")
AudiCar = Audi("A6", "Black")
for car in (HondaCar, AudiCar):
car.display()
|
flexible
|
{
"blob_id": "92f59612b2697db155da1bdc625fdabc115867b0",
"index": 9600,
"step-1": "class Honda:\n <mask token>\n <mask token>\n\n\nclass Audi:\n\n def __init__(self, name, color):\n self.name = name\n self.color = color\n\n def display(self):\n print('Audi car name is : ', self.name, ' and color is : ', self.color)\n\n\n<mask token>\n",
"step-2": "class Honda:\n\n def __init__(self, name, color):\n self.name = name\n self.color = color\n\n def display(self):\n print('Honda car name is : ', self.name, ' and color is : ', self.color\n )\n\n\nclass Audi:\n\n def __init__(self, name, color):\n self.name = name\n self.color = color\n\n def display(self):\n print('Audi car name is : ', self.name, ' and color is : ', self.color)\n\n\n<mask token>\n",
"step-3": "class Honda:\n\n def __init__(self, name, color):\n self.name = name\n self.color = color\n\n def display(self):\n print('Honda car name is : ', self.name, ' and color is : ', self.color\n )\n\n\nclass Audi:\n\n def __init__(self, name, color):\n self.name = name\n self.color = color\n\n def display(self):\n print('Audi car name is : ', self.name, ' and color is : ', self.color)\n\n\n<mask token>\nfor car in (HondaCar, AudiCar):\n car.display()\n",
"step-4": "class Honda:\n\n def __init__(self, name, color):\n self.name = name\n self.color = color\n\n def display(self):\n print('Honda car name is : ', self.name, ' and color is : ', self.color\n )\n\n\nclass Audi:\n\n def __init__(self, name, color):\n self.name = name\n self.color = color\n\n def display(self):\n print('Audi car name is : ', self.name, ' and color is : ', self.color)\n\n\nHondaCar = Honda('Honda City', 'White')\nAudiCar = Audi('A6', 'Black')\nfor car in (HondaCar, AudiCar):\n car.display()\n",
"step-5": "# 5. Write a program to implement polymorphism.\n\nclass Honda:\n\n def __init__(self, name, color):\n self.name = name\n self.color = color\n\n def display(self):\n print(\"Honda car name is : \", self.name, \" and color is : \", self.color)\n\n\nclass Audi:\n\n def __init__(self, name, color):\n self.name = name\n self.color = color\n\n def display(self):\n print(\"Audi car name is : \", self.name, \" and color is : \", self.color)\n\n\nHondaCar = Honda(\"Honda City\", \"White\")\nAudiCar = Audi(\"A6\", \"Black\")\n\nfor car in (HondaCar, AudiCar):\n car.display()\n\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
from django.contrib import admin
from .models import (AddressLink, Address, Child, Citation,
Configuration, Event, Exclusion, FactType,
Family, Group, Label, LinkAncestry,
Link, MediaLink, Multimedia, Name,
Person, Place, ResearchItem, Research,
Role, Source, SourceTemplate, Url,
Witness)
from . import EXODUS_DB_NAME
from .utils.admin import MultiDBModelAdmin
from .utils.rootsmagic import read_and_pprint_date
class RootsMagicModelAdmin(MultiDBModelAdmin):
using = EXODUS_DB_NAME
class AddressLinkAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"owner_type",
"address",
"owner_id",
"address_number",
"details",
]
class AddressAdmin(RootsMagicModelAdmin):
pass
class ChildAdmin(RootsMagicModelAdmin):
list_display = [
"record_id",
"child",
"family",
"father_relationship",
"mother_relationship",
"child_order",
"is_private",
"father_proof",
"mother_proof",
"note",
]
raw_id_fields = [
'child',
'family',
]
class CitationAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"owner_type",
"source_id",
"owner_id",
"quality",
"is_private",
"comments",
"actual_text",
"reference_number",
"flags",
# "fields",
]
class ConfigurationAdmin(RootsMagicModelAdmin):
pass
class EventAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"event_type",
"owner",
"owner_type",
"owner_id",
"family",
"place",
"site",
# "date",
"pretty_date",
"sort_date",
"is_primary",
"is_private",
"proof",
"status",
"edit_date",
"sentence",
# "details",
# "note",
]
def pretty_date(self, obj):
return read_and_pprint_date(obj.date)
pretty_date.short_description = "Date"
class ExclusionAdmin(RootsMagicModelAdmin):
pass
class FactTypeAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"owner_type",
"name",
"abbreviation",
"gedcom_tag",
"use_value",
"use_date",
"use_place",
"sentence",
"flags",
]
class FamilyAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"father",
"mother",
"child",
"husband_order",
"wife_order",
"is_private",
"proof",
"spouse_label",
"father_label",
"mother_label",
# "note",
]
class GroupAdmin(RootsMagicModelAdmin):
pass
class LabelAdmin(RootsMagicModelAdmin):
pass
class LinkAncestryAdmin(RootsMagicModelAdmin):
pass
class LinkAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"ext_system",
"link_type",
"rootsmagic",
"ext_id",
"modified",
"ext_version",
"ext_date",
"status",
"note",
]
class MediaLinkAdmin(RootsMagicModelAdmin):
list_display = [
"link_id",
"media",
"owner",
"owner_type",
"owner_id",
"is_primary",
"include_1",
"include_2",
"include_3",
"include_4",
"sort_order",
"rectangle_left",
"rectangle_top",
"rectangle_right",
"rectangle_bottom",
"note",
"caption",
"reference_number",
"date",
"sort_date",
# "description",
]
class MultimediaAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"media_type",
"media_path",
"media_file",
"url",
"thumbnail",
"caption",
"reference_number",
# "date",
"pretty_date",
"sort_date",
# "description",
]
def pretty_date(self, obj):
return read_and_pprint_date(obj.date)
pretty_date.short_description = "Date"
class NameAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"owner",
"surname",
"given",
"prefix",
"suffix",
"nickname",
"name_type",
"date",
"sort_date",
"is_primary",
"is_private",
"proof",
"edit_date",
"sentence",
# "note",
"birth_year",
"death_year",
]
class PersonAdmin(RootsMagicModelAdmin):
list_display = [
"id",
'primary_name',
"sex_short",
"edit_date",
"parent",
"spouse",
"color",
"relate_1",
"relate_2",
"flags",
"is_living",
"is_private",
"proof",
"unique_id",
"bookmark",
# "note",
]
class PlaceAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"place_type",
"name",
"abbreviation",
"normalized",
"master_place",
# "latitude",
# "longitude",
"pretty_latlong",
"exact_latituate_longitude",
"note",
]
raw_id_fields = [
"master_place"
]
readonly_fields = [
"pretty_latlong"
]
class ResearchItemAdmin(RootsMagicModelAdmin):
pass
class ResearchAdmin(RootsMagicModelAdmin):
pass
class RoleAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"role_name",
"event_type",
"role_type",
"sentence",
]
class SourceAdmin(RootsMagicModelAdmin):
raw_id_fields = ['template']
class SourceTemplateAdmin(RootsMagicModelAdmin):
pass
class UrlAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"owner_type",
"owner_id",
"link_type",
"name",
"url",
"note",
]
class WitnessAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"event",
"person",
"witness_order",
"role",
"sentence",
"note",
"given",
"surname",
"prefix",
"suffix",
]
admin.site.register(AddressLink, AddressLinkAdmin)
admin.site.register(Address, AddressAdmin)
admin.site.register(Child, ChildAdmin)
admin.site.register(Citation, CitationAdmin)
admin.site.register(Configuration, ConfigurationAdmin)
admin.site.register(Event, EventAdmin)
admin.site.register(Exclusion, ExclusionAdmin)
admin.site.register(FactType, FactTypeAdmin)
admin.site.register(Family, FamilyAdmin)
admin.site.register(Group, GroupAdmin)
admin.site.register(Label, LabelAdmin)
admin.site.register(LinkAncestry, LinkAncestryAdmin)
admin.site.register(Link, LinkAdmin)
admin.site.register(MediaLink, MediaLinkAdmin)
admin.site.register(Multimedia, MultimediaAdmin)
admin.site.register(Name, NameAdmin)
admin.site.register(Person, PersonAdmin)
admin.site.register(Place, PlaceAdmin)
admin.site.register(ResearchItem, ResearchItemAdmin)
admin.site.register(Research, ResearchAdmin)
admin.site.register(Role, RoleAdmin)
admin.site.register(Source, SourceAdmin)
admin.site.register(SourceTemplate, SourceTemplateAdmin)
admin.site.register(Url, UrlAdmin)
admin.site.register(Witness, WitnessAdmin)
|
normal
|
{
"blob_id": "b4d48427dddc7c0240cf05c003cbf7b0163279ee",
"index": 9729,
"step-1": "<mask token>\n\n\nclass FactTypeAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'owner_type', 'name', 'abbreviation',\n 'gedcom_tag', 'use_value', 'use_date', 'use_place', 'sentence', 'flags'\n ]\n\n\nclass FamilyAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'father', 'mother', 'child', 'husband_order',\n 'wife_order', 'is_private', 'proof', 'spouse_label', 'father_label',\n 'mother_label']\n\n\nclass GroupAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass LabelAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass LinkAncestryAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass LinkAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'ext_system', 'link_type', 'rootsmagic', 'ext_id',\n 'modified', 'ext_version', 'ext_date', 'status', 'note']\n\n\nclass MediaLinkAdmin(RootsMagicModelAdmin):\n list_display = ['link_id', 'media', 'owner', 'owner_type', 'owner_id',\n 'is_primary', 'include_1', 'include_2', 'include_3', 'include_4',\n 'sort_order', 'rectangle_left', 'rectangle_top', 'rectangle_right',\n 'rectangle_bottom', 'note', 'caption', 'reference_number', 'date',\n 'sort_date']\n\n\nclass MultimediaAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'media_type', 'media_path', 'media_file', 'url',\n 'thumbnail', 'caption', 'reference_number', 'pretty_date', 'sort_date']\n\n def pretty_date(self, obj):\n return read_and_pprint_date(obj.date)\n pretty_date.short_description = 'Date'\n\n\nclass NameAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'owner', 'surname', 'given', 'prefix', 'suffix',\n 'nickname', 'name_type', 'date', 'sort_date', 'is_primary',\n 'is_private', 'proof', 'edit_date', 'sentence', 'birth_year',\n 'death_year']\n\n\nclass PersonAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'primary_name', 'sex_short', 'edit_date',\n 'parent', 'spouse', 'color', 'relate_1', 'relate_2', 'flags',\n 'is_living', 'is_private', 'proof', 'unique_id', 'bookmark']\n\n\nclass PlaceAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'place_type', 'name', 'abbreviation',\n 'normalized', 'master_place', 'pretty_latlong',\n 'exact_latituate_longitude', 'note']\n raw_id_fields = ['master_place']\n readonly_fields = ['pretty_latlong']\n\n\nclass ResearchItemAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass ResearchAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass RoleAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'role_name', 'event_type', 'role_type', 'sentence']\n\n\nclass SourceAdmin(RootsMagicModelAdmin):\n raw_id_fields = ['template']\n\n\nclass SourceTemplateAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass UrlAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'owner_type', 'owner_id', 'link_type', 'name',\n 'url', 'note']\n\n\nclass WitnessAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'event', 'person', 'witness_order', 'role',\n 'sentence', 'note', 'given', 'surname', 'prefix', 'suffix']\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass EventAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'event_type', 'owner', 'owner_type', 'owner_id',\n 'family', 'place', 'site', 'pretty_date', 'sort_date', 'is_primary',\n 'is_private', 'proof', 'status', 'edit_date', 'sentence']\n\n def pretty_date(self, obj):\n return read_and_pprint_date(obj.date)\n pretty_date.short_description = 'Date'\n\n\nclass ExclusionAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass FactTypeAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'owner_type', 'name', 'abbreviation',\n 'gedcom_tag', 'use_value', 'use_date', 'use_place', 'sentence', 'flags'\n ]\n\n\nclass FamilyAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'father', 'mother', 'child', 'husband_order',\n 'wife_order', 'is_private', 'proof', 'spouse_label', 'father_label',\n 'mother_label']\n\n\nclass GroupAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass LabelAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass LinkAncestryAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass LinkAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'ext_system', 'link_type', 'rootsmagic', 'ext_id',\n 'modified', 'ext_version', 'ext_date', 'status', 'note']\n\n\nclass MediaLinkAdmin(RootsMagicModelAdmin):\n list_display = ['link_id', 'media', 'owner', 'owner_type', 'owner_id',\n 'is_primary', 'include_1', 'include_2', 'include_3', 'include_4',\n 'sort_order', 'rectangle_left', 'rectangle_top', 'rectangle_right',\n 'rectangle_bottom', 'note', 'caption', 'reference_number', 'date',\n 'sort_date']\n\n\nclass MultimediaAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'media_type', 'media_path', 'media_file', 'url',\n 'thumbnail', 'caption', 'reference_number', 'pretty_date', 'sort_date']\n\n def pretty_date(self, obj):\n return read_and_pprint_date(obj.date)\n pretty_date.short_description = 'Date'\n\n\nclass NameAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'owner', 'surname', 'given', 'prefix', 'suffix',\n 'nickname', 'name_type', 'date', 'sort_date', 'is_primary',\n 'is_private', 'proof', 'edit_date', 'sentence', 'birth_year',\n 'death_year']\n\n\nclass PersonAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'primary_name', 'sex_short', 'edit_date',\n 'parent', 'spouse', 'color', 'relate_1', 'relate_2', 'flags',\n 'is_living', 'is_private', 'proof', 'unique_id', 'bookmark']\n\n\nclass PlaceAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'place_type', 'name', 'abbreviation',\n 'normalized', 'master_place', 'pretty_latlong',\n 'exact_latituate_longitude', 'note']\n raw_id_fields = ['master_place']\n readonly_fields = ['pretty_latlong']\n\n\nclass ResearchItemAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass ResearchAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass RoleAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'role_name', 'event_type', 'role_type', 'sentence']\n\n\nclass SourceAdmin(RootsMagicModelAdmin):\n raw_id_fields = ['template']\n\n\nclass SourceTemplateAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass UrlAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'owner_type', 'owner_id', 'link_type', 'name',\n 'url', 'note']\n\n\nclass WitnessAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'event', 'person', 'witness_order', 'role',\n 'sentence', 'note', 'given', 'surname', 'prefix', 'suffix']\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass CitationAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'owner_type', 'source_id', 'owner_id', 'quality',\n 'is_private', 'comments', 'actual_text', 'reference_number', 'flags']\n\n\nclass ConfigurationAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass EventAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'event_type', 'owner', 'owner_type', 'owner_id',\n 'family', 'place', 'site', 'pretty_date', 'sort_date', 'is_primary',\n 'is_private', 'proof', 'status', 'edit_date', 'sentence']\n\n def pretty_date(self, obj):\n return read_and_pprint_date(obj.date)\n pretty_date.short_description = 'Date'\n\n\nclass ExclusionAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass FactTypeAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'owner_type', 'name', 'abbreviation',\n 'gedcom_tag', 'use_value', 'use_date', 'use_place', 'sentence', 'flags'\n ]\n\n\nclass FamilyAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'father', 'mother', 'child', 'husband_order',\n 'wife_order', 'is_private', 'proof', 'spouse_label', 'father_label',\n 'mother_label']\n\n\nclass GroupAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass LabelAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass LinkAncestryAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass LinkAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'ext_system', 'link_type', 'rootsmagic', 'ext_id',\n 'modified', 'ext_version', 'ext_date', 'status', 'note']\n\n\nclass MediaLinkAdmin(RootsMagicModelAdmin):\n list_display = ['link_id', 'media', 'owner', 'owner_type', 'owner_id',\n 'is_primary', 'include_1', 'include_2', 'include_3', 'include_4',\n 'sort_order', 'rectangle_left', 'rectangle_top', 'rectangle_right',\n 'rectangle_bottom', 'note', 'caption', 'reference_number', 'date',\n 'sort_date']\n\n\nclass MultimediaAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'media_type', 'media_path', 'media_file', 'url',\n 'thumbnail', 'caption', 'reference_number', 'pretty_date', 'sort_date']\n\n def pretty_date(self, obj):\n return read_and_pprint_date(obj.date)\n pretty_date.short_description = 'Date'\n\n\nclass NameAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'owner', 'surname', 'given', 'prefix', 'suffix',\n 'nickname', 'name_type', 'date', 'sort_date', 'is_primary',\n 'is_private', 'proof', 'edit_date', 'sentence', 'birth_year',\n 'death_year']\n\n\nclass PersonAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'primary_name', 'sex_short', 'edit_date',\n 'parent', 'spouse', 'color', 'relate_1', 'relate_2', 'flags',\n 'is_living', 'is_private', 'proof', 'unique_id', 'bookmark']\n\n\nclass PlaceAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'place_type', 'name', 'abbreviation',\n 'normalized', 'master_place', 'pretty_latlong',\n 'exact_latituate_longitude', 'note']\n raw_id_fields = ['master_place']\n readonly_fields = ['pretty_latlong']\n\n\nclass ResearchItemAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass ResearchAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass RoleAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'role_name', 'event_type', 'role_type', 'sentence']\n\n\nclass SourceAdmin(RootsMagicModelAdmin):\n raw_id_fields = ['template']\n\n\nclass SourceTemplateAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass UrlAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'owner_type', 'owner_id', 'link_type', 'name',\n 'url', 'note']\n\n\nclass WitnessAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'event', 'person', 'witness_order', 'role',\n 'sentence', 'note', 'given', 'surname', 'prefix', 'suffix']\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ChildAdmin(RootsMagicModelAdmin):\n <mask token>\n <mask token>\n\n\nclass CitationAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'owner_type', 'source_id', 'owner_id', 'quality',\n 'is_private', 'comments', 'actual_text', 'reference_number', 'flags']\n\n\nclass ConfigurationAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass EventAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'event_type', 'owner', 'owner_type', 'owner_id',\n 'family', 'place', 'site', 'pretty_date', 'sort_date', 'is_primary',\n 'is_private', 'proof', 'status', 'edit_date', 'sentence']\n\n def pretty_date(self, obj):\n return read_and_pprint_date(obj.date)\n pretty_date.short_description = 'Date'\n\n\nclass ExclusionAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass FactTypeAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'owner_type', 'name', 'abbreviation',\n 'gedcom_tag', 'use_value', 'use_date', 'use_place', 'sentence', 'flags'\n ]\n\n\nclass FamilyAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'father', 'mother', 'child', 'husband_order',\n 'wife_order', 'is_private', 'proof', 'spouse_label', 'father_label',\n 'mother_label']\n\n\nclass GroupAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass LabelAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass LinkAncestryAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass LinkAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'ext_system', 'link_type', 'rootsmagic', 'ext_id',\n 'modified', 'ext_version', 'ext_date', 'status', 'note']\n\n\nclass MediaLinkAdmin(RootsMagicModelAdmin):\n list_display = ['link_id', 'media', 'owner', 'owner_type', 'owner_id',\n 'is_primary', 'include_1', 'include_2', 'include_3', 'include_4',\n 'sort_order', 'rectangle_left', 'rectangle_top', 'rectangle_right',\n 'rectangle_bottom', 'note', 'caption', 'reference_number', 'date',\n 'sort_date']\n\n\nclass MultimediaAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'media_type', 'media_path', 'media_file', 'url',\n 'thumbnail', 'caption', 'reference_number', 'pretty_date', 'sort_date']\n\n def pretty_date(self, obj):\n return read_and_pprint_date(obj.date)\n pretty_date.short_description = 'Date'\n\n\nclass NameAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'owner', 'surname', 'given', 'prefix', 'suffix',\n 'nickname', 'name_type', 'date', 'sort_date', 'is_primary',\n 'is_private', 'proof', 'edit_date', 'sentence', 'birth_year',\n 'death_year']\n\n\nclass PersonAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'primary_name', 'sex_short', 'edit_date',\n 'parent', 'spouse', 'color', 'relate_1', 'relate_2', 'flags',\n 'is_living', 'is_private', 'proof', 'unique_id', 'bookmark']\n\n\nclass PlaceAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'place_type', 'name', 'abbreviation',\n 'normalized', 'master_place', 'pretty_latlong',\n 'exact_latituate_longitude', 'note']\n raw_id_fields = ['master_place']\n readonly_fields = ['pretty_latlong']\n\n\nclass ResearchItemAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass ResearchAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass RoleAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'role_name', 'event_type', 'role_type', 'sentence']\n\n\nclass SourceAdmin(RootsMagicModelAdmin):\n raw_id_fields = ['template']\n\n\nclass SourceTemplateAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass UrlAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'owner_type', 'owner_id', 'link_type', 'name',\n 'url', 'note']\n\n\nclass WitnessAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'event', 'person', 'witness_order', 'role',\n 'sentence', 'note', 'given', 'surname', 'prefix', 'suffix']\n\n\n<mask token>\n",
"step-5": "from django.contrib import admin\n\nfrom .models import (AddressLink, Address, Child, Citation,\n Configuration, Event, Exclusion, FactType,\n Family, Group, Label, LinkAncestry,\n Link, MediaLink, Multimedia, Name,\n Person, Place, ResearchItem, Research,\n Role, Source, SourceTemplate, Url,\n Witness)\n\nfrom . import EXODUS_DB_NAME\nfrom .utils.admin import MultiDBModelAdmin\nfrom .utils.rootsmagic import read_and_pprint_date\n\n\nclass RootsMagicModelAdmin(MultiDBModelAdmin):\n using = EXODUS_DB_NAME\n\n\nclass AddressLinkAdmin(RootsMagicModelAdmin):\n list_display = [\n \"id\",\n \"owner_type\",\n \"address\",\n \"owner_id\",\n \"address_number\",\n \"details\",\n ]\n\nclass AddressAdmin(RootsMagicModelAdmin):\n pass\n\nclass ChildAdmin(RootsMagicModelAdmin):\n list_display = [\n \"record_id\",\n \"child\",\n \"family\",\n \"father_relationship\",\n \"mother_relationship\",\n \"child_order\",\n \"is_private\",\n \"father_proof\",\n \"mother_proof\",\n \"note\",\n ]\n raw_id_fields = [\n 'child',\n 'family',\n ]\n\nclass CitationAdmin(RootsMagicModelAdmin):\n list_display = [\n \"id\",\n \"owner_type\",\n \"source_id\",\n \"owner_id\",\n \"quality\",\n \"is_private\",\n \"comments\",\n \"actual_text\",\n \"reference_number\",\n \"flags\",\n # \"fields\",\n ]\n\nclass ConfigurationAdmin(RootsMagicModelAdmin):\n pass\n\nclass EventAdmin(RootsMagicModelAdmin):\n list_display = [\n \"id\",\n \"event_type\",\n \"owner\",\n \"owner_type\",\n \"owner_id\",\n \"family\",\n \"place\",\n \"site\",\n # \"date\",\n \"pretty_date\",\n \"sort_date\",\n \"is_primary\",\n \"is_private\",\n \"proof\",\n \"status\",\n \"edit_date\",\n \"sentence\",\n # \"details\",\n # \"note\",\n ]\n\n def pretty_date(self, obj):\n return read_and_pprint_date(obj.date)\n pretty_date.short_description = \"Date\"\n\nclass ExclusionAdmin(RootsMagicModelAdmin):\n pass\n\nclass FactTypeAdmin(RootsMagicModelAdmin):\n list_display = [\n \"id\",\n \"owner_type\",\n \"name\",\n \"abbreviation\",\n \"gedcom_tag\",\n \"use_value\",\n \"use_date\",\n \"use_place\",\n \"sentence\",\n \"flags\",\n ]\n\nclass FamilyAdmin(RootsMagicModelAdmin):\n list_display = [\n \"id\",\n \"father\",\n \"mother\",\n \"child\",\n \"husband_order\",\n \"wife_order\",\n \"is_private\",\n \"proof\",\n \"spouse_label\",\n \"father_label\",\n \"mother_label\",\n # \"note\",\n ]\n\nclass GroupAdmin(RootsMagicModelAdmin):\n pass\n\nclass LabelAdmin(RootsMagicModelAdmin):\n pass\n\nclass LinkAncestryAdmin(RootsMagicModelAdmin):\n pass\n\nclass LinkAdmin(RootsMagicModelAdmin):\n list_display = [\n \"id\",\n \"ext_system\",\n \"link_type\",\n \"rootsmagic\",\n \"ext_id\",\n \"modified\",\n \"ext_version\",\n \"ext_date\",\n \"status\",\n \"note\",\n ]\n\nclass MediaLinkAdmin(RootsMagicModelAdmin):\n list_display = [\n \"link_id\",\n \"media\",\n \"owner\",\n \"owner_type\",\n \"owner_id\",\n \"is_primary\",\n \"include_1\",\n \"include_2\",\n \"include_3\",\n \"include_4\",\n \"sort_order\",\n \"rectangle_left\",\n \"rectangle_top\",\n \"rectangle_right\",\n \"rectangle_bottom\",\n \"note\",\n \"caption\",\n \"reference_number\",\n \"date\",\n \"sort_date\",\n # \"description\",\n ]\n\nclass MultimediaAdmin(RootsMagicModelAdmin):\n list_display = [\n \"id\",\n \"media_type\",\n \"media_path\",\n \"media_file\",\n \"url\",\n \"thumbnail\",\n \"caption\",\n \"reference_number\",\n # \"date\",\n \"pretty_date\",\n \"sort_date\",\n # \"description\",\n ]\n\n def pretty_date(self, obj):\n return read_and_pprint_date(obj.date)\n pretty_date.short_description = \"Date\"\n\nclass NameAdmin(RootsMagicModelAdmin):\n list_display = [\n \"id\",\n \"owner\",\n \"surname\",\n \"given\",\n \"prefix\",\n \"suffix\",\n \"nickname\",\n \"name_type\",\n \"date\",\n \"sort_date\",\n \"is_primary\",\n \"is_private\",\n \"proof\",\n \"edit_date\",\n \"sentence\",\n # \"note\",\n \"birth_year\",\n \"death_year\",\n ]\n\nclass PersonAdmin(RootsMagicModelAdmin):\n list_display = [\n \"id\",\n 'primary_name',\n \"sex_short\",\n \"edit_date\",\n \"parent\",\n \"spouse\",\n \"color\",\n \"relate_1\",\n \"relate_2\",\n \"flags\",\n \"is_living\",\n \"is_private\",\n \"proof\",\n \"unique_id\",\n \"bookmark\",\n # \"note\",\n ]\n\nclass PlaceAdmin(RootsMagicModelAdmin):\n list_display = [\n \"id\",\n \"place_type\",\n \"name\",\n \"abbreviation\",\n \"normalized\",\n \"master_place\",\n # \"latitude\",\n # \"longitude\",\n \"pretty_latlong\",\n \"exact_latituate_longitude\",\n \"note\",\n ]\n raw_id_fields = [\n \"master_place\"\n ]\n readonly_fields = [\n \"pretty_latlong\"\n ]\n\nclass ResearchItemAdmin(RootsMagicModelAdmin):\n pass\n\nclass ResearchAdmin(RootsMagicModelAdmin):\n pass\n\nclass RoleAdmin(RootsMagicModelAdmin):\n list_display = [\n \"id\",\n \"role_name\",\n \"event_type\",\n \"role_type\",\n \"sentence\",\n ]\n\nclass SourceAdmin(RootsMagicModelAdmin):\n raw_id_fields = ['template']\n\nclass SourceTemplateAdmin(RootsMagicModelAdmin):\n pass\n\nclass UrlAdmin(RootsMagicModelAdmin):\n list_display = [\n \"id\",\n \"owner_type\",\n \"owner_id\",\n \"link_type\",\n \"name\",\n \"url\",\n \"note\",\n ]\n\nclass WitnessAdmin(RootsMagicModelAdmin):\n list_display = [\n \"id\",\n \"event\",\n \"person\",\n \"witness_order\",\n \"role\",\n \"sentence\",\n \"note\",\n \"given\",\n \"surname\",\n \"prefix\",\n \"suffix\",\n ]\n\n\n\nadmin.site.register(AddressLink, AddressLinkAdmin)\nadmin.site.register(Address, AddressAdmin)\nadmin.site.register(Child, ChildAdmin)\nadmin.site.register(Citation, CitationAdmin)\nadmin.site.register(Configuration, ConfigurationAdmin)\nadmin.site.register(Event, EventAdmin)\nadmin.site.register(Exclusion, ExclusionAdmin)\nadmin.site.register(FactType, FactTypeAdmin)\nadmin.site.register(Family, FamilyAdmin)\nadmin.site.register(Group, GroupAdmin)\nadmin.site.register(Label, LabelAdmin)\nadmin.site.register(LinkAncestry, LinkAncestryAdmin)\nadmin.site.register(Link, LinkAdmin)\nadmin.site.register(MediaLink, MediaLinkAdmin)\nadmin.site.register(Multimedia, MultimediaAdmin)\nadmin.site.register(Name, NameAdmin)\nadmin.site.register(Person, PersonAdmin)\nadmin.site.register(Place, PlaceAdmin)\nadmin.site.register(ResearchItem, ResearchItemAdmin)\nadmin.site.register(Research, ResearchAdmin)\nadmin.site.register(Role, RoleAdmin)\nadmin.site.register(Source, SourceAdmin)\nadmin.site.register(SourceTemplate, SourceTemplateAdmin)\nadmin.site.register(Url, UrlAdmin)\nadmin.site.register(Witness, WitnessAdmin)\n",
"step-ids": [
31,
35,
38,
39,
48
]
}
|
[
31,
35,
38,
39,
48
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print("Welcome to my basic 'Calculator'")
print('Please choose your best option (+, -, *, /) ')
while True:
try:
A = int(input('Now Enter your first Value='))
break
except:
print('Oops!', sys.exc_info()[0], 'occurred.')
while True:
mathoparetor = input('Enter your Math oparetor=')
try:
if mathoparetor in ['+', '-', '*', '/']:
break
else:
raise Exception
except:
print('Opp, Enter Math again')
while True:
try:
B = int(input('Now Enter your second Value='))
break
except:
print('Oops!', sys.exc_info()[0], 'occurred.')
if mathoparetor == '+':
print('The addition number is', add(A, B))
elif mathoparetor == '-':
print('The subtraction number is', sub(A, B))
elif mathoparetor == '*':
print('The multiaplication number is', mull(A, B))
elif mathoparetor == '/':
print('The division number is', divi(A, B))
<|reserved_special_token_1|>
from mathmodule import *
import sys
print("Welcome to my basic 'Calculator'")
print('Please choose your best option (+, -, *, /) ')
while True:
try:
A = int(input('Now Enter your first Value='))
break
except:
print('Oops!', sys.exc_info()[0], 'occurred.')
while True:
mathoparetor = input('Enter your Math oparetor=')
try:
if mathoparetor in ['+', '-', '*', '/']:
break
else:
raise Exception
except:
print('Opp, Enter Math again')
while True:
try:
B = int(input('Now Enter your second Value='))
break
except:
print('Oops!', sys.exc_info()[0], 'occurred.')
if mathoparetor == '+':
print('The addition number is', add(A, B))
elif mathoparetor == '-':
print('The subtraction number is', sub(A, B))
elif mathoparetor == '*':
print('The multiaplication number is', mull(A, B))
elif mathoparetor == '/':
print('The division number is', divi(A, B))
<|reserved_special_token_1|>
from mathmodule import *
import sys
print("Welcome to my basic \'Calculator\'")
print("Please choose your best option (+, -, *, /) ")
# user input part
while True:
try:
A = int(input("Now Enter your first Value="))
break
except:
print("Oops!", sys.exc_info()[0], "occurred.")
while True:
mathoparetor = input("Enter your Math oparetor=")
try:
if mathoparetor in ['+','-','*','/']:
break
else:
raise Exception
except:
print("Opp, Enter Math again")
while True:
try:
B = int(input("Now Enter your second Value="))
break
except:
print("Oops!", sys.exc_info()[0], "occurred.")
# programing for perform
if mathoparetor == '+':
print('The addition number is', add(A,B))
elif mathoparetor == '-':
print('The subtraction number is', sub(A,B))
elif mathoparetor == '*':
print('The multiaplication number is', mull(A,B))
elif mathoparetor == '/':
print('The division number is', divi(A,B))
|
flexible
|
{
"blob_id": "1cca94040cdd8db9d98f587c62eff7c58eae7535",
"index": 6974,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(\"Welcome to my basic 'Calculator'\")\nprint('Please choose your best option (+, -, *, /) ')\nwhile True:\n try:\n A = int(input('Now Enter your first Value='))\n break\n except:\n print('Oops!', sys.exc_info()[0], 'occurred.')\nwhile True:\n mathoparetor = input('Enter your Math oparetor=')\n try:\n if mathoparetor in ['+', '-', '*', '/']:\n break\n else:\n raise Exception\n except:\n print('Opp, Enter Math again')\nwhile True:\n try:\n B = int(input('Now Enter your second Value='))\n break\n except:\n print('Oops!', sys.exc_info()[0], 'occurred.')\nif mathoparetor == '+':\n print('The addition number is', add(A, B))\nelif mathoparetor == '-':\n print('The subtraction number is', sub(A, B))\nelif mathoparetor == '*':\n print('The multiaplication number is', mull(A, B))\nelif mathoparetor == '/':\n print('The division number is', divi(A, B))\n",
"step-3": "from mathmodule import *\nimport sys\nprint(\"Welcome to my basic 'Calculator'\")\nprint('Please choose your best option (+, -, *, /) ')\nwhile True:\n try:\n A = int(input('Now Enter your first Value='))\n break\n except:\n print('Oops!', sys.exc_info()[0], 'occurred.')\nwhile True:\n mathoparetor = input('Enter your Math oparetor=')\n try:\n if mathoparetor in ['+', '-', '*', '/']:\n break\n else:\n raise Exception\n except:\n print('Opp, Enter Math again')\nwhile True:\n try:\n B = int(input('Now Enter your second Value='))\n break\n except:\n print('Oops!', sys.exc_info()[0], 'occurred.')\nif mathoparetor == '+':\n print('The addition number is', add(A, B))\nelif mathoparetor == '-':\n print('The subtraction number is', sub(A, B))\nelif mathoparetor == '*':\n print('The multiaplication number is', mull(A, B))\nelif mathoparetor == '/':\n print('The division number is', divi(A, B))\n",
"step-4": "from mathmodule import *\nimport sys\n\nprint(\"Welcome to my basic \\'Calculator\\'\")\n\nprint(\"Please choose your best option (+, -, *, /) \")\n\n# user input part \nwhile True:\n try:\n A = int(input(\"Now Enter your first Value=\"))\n break\n except:\n print(\"Oops!\", sys.exc_info()[0], \"occurred.\")\nwhile True:\n mathoparetor = input(\"Enter your Math oparetor=\")\n try:\n if mathoparetor in ['+','-','*','/']:\n break\n else:\n raise Exception\n except:\n print(\"Opp, Enter Math again\")\n\nwhile True:\n try:\n B = int(input(\"Now Enter your second Value=\"))\n break\n except:\n print(\"Oops!\", sys.exc_info()[0], \"occurred.\")\n\n\n\n# programing for perform\nif mathoparetor == '+':\n print('The addition number is', add(A,B))\n\nelif mathoparetor == '-':\n print('The subtraction number is', sub(A,B))\n\nelif mathoparetor == '*':\n print('The multiaplication number is', mull(A,B))\n\nelif mathoparetor == '/':\n print('The division number is', divi(A,B))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
from euler.baseeuler import BaseEuler
from os import path, getcwd
def get_name_score(l, name):
idx = l.index(name) + 1
val = sum([(ord(c) - 64) for c in name])
return idx * val
class Euler(BaseEuler):
def solve(self):
fp = path.join(getcwd(), 'euler/resources/names.txt')
with open(fp, 'r') as f:
names = sorted([name for name
in f.read().replace('"', '').split(',')])
return sum([get_name_score(names, name) for name in names])
@property
def answer(self):
return ('The total of all the name scores in the file is: %d'
% self.solve())
@property
def problem(self):
return '''
Project Euler Problem 22:
Using names.txt (right click and 'Save Link/Target As...'), a 46K text file
containing over five-thousand first names, begin by sorting it into
alphabetical order. Then working out the alphabetical value for each name,
multiply this value by its alphabetical position in the list to obtain a
name score.
For example, when the list is sorted into alphabetical order, COLIN, which
is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So,
COLIN would obtain a score of 938 * 53 = 49714.
What is the total of all the name scores in the file?
'''
|
normal
|
{
"blob_id": "40d08bfa3286aa30b612ed83b5e9c7a29e9de809",
"index": 6540,
"step-1": "<mask token>\n\n\nclass Euler(BaseEuler):\n\n def solve(self):\n fp = path.join(getcwd(), 'euler/resources/names.txt')\n with open(fp, 'r') as f:\n names = sorted([name for name in f.read().replace('\"', '').\n split(',')])\n return sum([get_name_score(names, name) for name in names])\n <mask token>\n\n @property\n def problem(self):\n return \"\"\"\nProject Euler Problem 22:\n\n Using names.txt (right click and 'Save Link/Target As...'), a 46K text file\n containing over five-thousand first names, begin by sorting it into\n alphabetical order. Then working out the alphabetical value for each name,\n multiply this value by its alphabetical position in the list to obtain a\n name score.\n\n For example, when the list is sorted into alphabetical order, COLIN, which\n is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So,\n COLIN would obtain a score of 938 * 53 = 49714.\n\n What is the total of all the name scores in the file?\n\"\"\"\n",
"step-2": "<mask token>\n\n\nclass Euler(BaseEuler):\n\n def solve(self):\n fp = path.join(getcwd(), 'euler/resources/names.txt')\n with open(fp, 'r') as f:\n names = sorted([name for name in f.read().replace('\"', '').\n split(',')])\n return sum([get_name_score(names, name) for name in names])\n\n @property\n def answer(self):\n return ('The total of all the name scores in the file is: %d' %\n self.solve())\n\n @property\n def problem(self):\n return \"\"\"\nProject Euler Problem 22:\n\n Using names.txt (right click and 'Save Link/Target As...'), a 46K text file\n containing over five-thousand first names, begin by sorting it into\n alphabetical order. Then working out the alphabetical value for each name,\n multiply this value by its alphabetical position in the list to obtain a\n name score.\n\n For example, when the list is sorted into alphabetical order, COLIN, which\n is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So,\n COLIN would obtain a score of 938 * 53 = 49714.\n\n What is the total of all the name scores in the file?\n\"\"\"\n",
"step-3": "<mask token>\n\n\ndef get_name_score(l, name):\n idx = l.index(name) + 1\n val = sum([(ord(c) - 64) for c in name])\n return idx * val\n\n\nclass Euler(BaseEuler):\n\n def solve(self):\n fp = path.join(getcwd(), 'euler/resources/names.txt')\n with open(fp, 'r') as f:\n names = sorted([name for name in f.read().replace('\"', '').\n split(',')])\n return sum([get_name_score(names, name) for name in names])\n\n @property\n def answer(self):\n return ('The total of all the name scores in the file is: %d' %\n self.solve())\n\n @property\n def problem(self):\n return \"\"\"\nProject Euler Problem 22:\n\n Using names.txt (right click and 'Save Link/Target As...'), a 46K text file\n containing over five-thousand first names, begin by sorting it into\n alphabetical order. Then working out the alphabetical value for each name,\n multiply this value by its alphabetical position in the list to obtain a\n name score.\n\n For example, when the list is sorted into alphabetical order, COLIN, which\n is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So,\n COLIN would obtain a score of 938 * 53 = 49714.\n\n What is the total of all the name scores in the file?\n\"\"\"\n",
"step-4": "from euler.baseeuler import BaseEuler\nfrom os import path, getcwd\n\n\ndef get_name_score(l, name):\n idx = l.index(name) + 1\n val = sum([(ord(c) - 64) for c in name])\n return idx * val\n\n\nclass Euler(BaseEuler):\n\n def solve(self):\n fp = path.join(getcwd(), 'euler/resources/names.txt')\n with open(fp, 'r') as f:\n names = sorted([name for name in f.read().replace('\"', '').\n split(',')])\n return sum([get_name_score(names, name) for name in names])\n\n @property\n def answer(self):\n return ('The total of all the name scores in the file is: %d' %\n self.solve())\n\n @property\n def problem(self):\n return \"\"\"\nProject Euler Problem 22:\n\n Using names.txt (right click and 'Save Link/Target As...'), a 46K text file\n containing over five-thousand first names, begin by sorting it into\n alphabetical order. Then working out the alphabetical value for each name,\n multiply this value by its alphabetical position in the list to obtain a\n name score.\n\n For example, when the list is sorted into alphabetical order, COLIN, which\n is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So,\n COLIN would obtain a score of 938 * 53 = 49714.\n\n What is the total of all the name scores in the file?\n\"\"\"\n",
"step-5": "# -*- coding: utf-8 -*-\n\nfrom euler.baseeuler import BaseEuler\nfrom os import path, getcwd\n\n\ndef get_name_score(l, name):\n idx = l.index(name) + 1\n val = sum([(ord(c) - 64) for c in name])\n return idx * val\n\n\nclass Euler(BaseEuler):\n def solve(self):\n fp = path.join(getcwd(), 'euler/resources/names.txt')\n with open(fp, 'r') as f:\n names = sorted([name for name\n in f.read().replace('\"', '').split(',')])\n\n return sum([get_name_score(names, name) for name in names])\n\n @property\n def answer(self):\n return ('The total of all the name scores in the file is: %d'\n % self.solve())\n\n @property\n def problem(self):\n return '''\nProject Euler Problem 22:\n\n Using names.txt (right click and 'Save Link/Target As...'), a 46K text file\n containing over five-thousand first names, begin by sorting it into\n alphabetical order. Then working out the alphabetical value for each name,\n multiply this value by its alphabetical position in the list to obtain a\n name score.\n\n For example, when the list is sorted into alphabetical order, COLIN, which\n is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So,\n COLIN would obtain a score of 938 * 53 = 49714.\n\n What is the total of all the name scores in the file?\n'''\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class CartPoleModel(nn.Module):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CartPoleModel(nn.Module):
def __init__(self):
super(CartPoleModel, self).__init__()
self.fc1 = nn.Linear(4, 60)
self.fc2 = nn.Linear(60, 120)
self.fc3 = nn.Linear(120, 2)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CartPoleModel(nn.Module):
def __init__(self):
super(CartPoleModel, self).__init__()
self.fc1 = nn.Linear(4, 60)
self.fc2 = nn.Linear(60, 120)
self.fc3 = nn.Linear(120, 2)
def forward(self, x):
x = F.tanh(self.fc1(x))
x = F.tanh(self.fc2(x))
x = self.fc3(x)
return x
<|reserved_special_token_1|>
import torch.nn as nn
import torch.nn.functional as F
class CartPoleModel(nn.Module):
def __init__(self):
super(CartPoleModel, self).__init__()
self.fc1 = nn.Linear(4, 60)
self.fc2 = nn.Linear(60, 120)
self.fc3 = nn.Linear(120, 2)
def forward(self, x):
x = F.tanh(self.fc1(x))
x = F.tanh(self.fc2(x))
x = self.fc3(x)
return x
<|reserved_special_token_1|>
# Neural network model(s) for the pygym 'CartPoleEnv'
#
# author: John Welsh
import torch.nn as nn
import torch.nn.functional as F
class CartPoleModel(nn.Module):
def __init__(self):
super(CartPoleModel, self).__init__()
self.fc1 = nn.Linear(4, 60)
self.fc2 = nn.Linear(60, 120)
self.fc3 = nn.Linear(120, 2)
def forward(self, x):
x = F.tanh(self.fc1(x))
x = F.tanh(self.fc2(x))
x = self.fc3(x)
return x
|
flexible
|
{
"blob_id": "bde3975f5b614a4b00ad392d9f0b4c1bd8c55dc0",
"index": 6855,
"step-1": "<mask token>\n\n\nclass CartPoleModel(nn.Module):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass CartPoleModel(nn.Module):\n\n def __init__(self):\n super(CartPoleModel, self).__init__()\n self.fc1 = nn.Linear(4, 60)\n self.fc2 = nn.Linear(60, 120)\n self.fc3 = nn.Linear(120, 2)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass CartPoleModel(nn.Module):\n\n def __init__(self):\n super(CartPoleModel, self).__init__()\n self.fc1 = nn.Linear(4, 60)\n self.fc2 = nn.Linear(60, 120)\n self.fc3 = nn.Linear(120, 2)\n\n def forward(self, x):\n x = F.tanh(self.fc1(x))\n x = F.tanh(self.fc2(x))\n x = self.fc3(x)\n return x\n",
"step-4": "import torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass CartPoleModel(nn.Module):\n\n def __init__(self):\n super(CartPoleModel, self).__init__()\n self.fc1 = nn.Linear(4, 60)\n self.fc2 = nn.Linear(60, 120)\n self.fc3 = nn.Linear(120, 2)\n\n def forward(self, x):\n x = F.tanh(self.fc1(x))\n x = F.tanh(self.fc2(x))\n x = self.fc3(x)\n return x\n",
"step-5": "# Neural network model(s) for the pygym 'CartPoleEnv'\n#\n# author: John Welsh\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass CartPoleModel(nn.Module):\n\n def __init__(self):\n super(CartPoleModel, self).__init__()\n self.fc1 = nn.Linear(4, 60)\n self.fc2 = nn.Linear(60, 120)\n self.fc3 = nn.Linear(120, 2)\n\n def forward(self, x):\n x = F.tanh(self.fc1(x))\n x = F.tanh(self.fc2(x))\n x = self.fc3(x)\n return x\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution(object):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution(object):
def sortArrayByParityII(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
i = 0
for j in range(1, len(A), 2):
if A[j] % 2 == 1:
continue
else:
while i + 2 < len(A) and A[i] % 2 == 0:
i += 2
A[i], A[j] = A[j], A[i]
i += 2
return A
|
flexible
|
{
"blob_id": "429af603bf8f1c003799c3d94c0ce9a2c2f80dfc",
"index": 3835,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n",
"step-3": "class Solution(object):\n\n def sortArrayByParityII(self, A):\n \"\"\"\n :type A: List[int]\n :rtype: List[int]\n \"\"\"\n i = 0\n for j in range(1, len(A), 2):\n if A[j] % 2 == 1:\n continue\n else:\n while i + 2 < len(A) and A[i] % 2 == 0:\n i += 2\n A[i], A[j] = A[j], A[i]\n i += 2\n return A\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
@probe_time
def opencl_multi_convolve_image(*args):
return _opencl_multi_convolve_image(*args)
<|reserved_special_token_0|>
def multi_convolve_image(data, kernel, bias, dil_x, dil_y):
if device < 0:
return cpu_multi_convolve_image(data, kernel, bias, dil_x, dil_y)
else:
return opencl_multi_convolve_image(data, kernel, bias, dil_x, dil_y,
device, *GROUPS[0:2])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def subsample(x, pool_size):
dx, dy = [int(p) for p in pool_size * (np.array(x.shape[0:2]) // pool_size)
]
return x[:dx:2, :dy:2]
<|reserved_special_token_0|>
@probe_time
def cpu_multi_convolve_image(*args):
return _multi_convolve_image(*args)
<|reserved_special_token_0|>
@probe_time
def opencl_multi_convolve_image(*args):
return _opencl_multi_convolve_image(*args)
@probe_time
def opencl_relu_max_pool_image(*args):
return _opencl_relu_max_pool_image(*args)
<|reserved_special_token_0|>
def multi_convolve_image(data, kernel, bias, dil_x, dil_y):
if device < 0:
return cpu_multi_convolve_image(data, kernel, bias, dil_x, dil_y)
else:
return opencl_multi_convolve_image(data, kernel, bias, dil_x, dil_y,
device, *GROUPS[0:2])
def relu_max_pool_image(data, size_x, size_y, dil_x, dil_y):
if device < 0:
return cpu_relu_max_pool_image(data, size_x, size_y, dil_x, dil_y)
else:
return opencl_relu_max_pool_image(data, size_x, size_y, dil_x,
dil_y, device, *GROUPS)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def subsample(x, pool_size):
dx, dy = [int(p) for p in pool_size * (np.array(x.shape[0:2]) // pool_size)
]
return x[:dx:2, :dy:2]
<|reserved_special_token_0|>
@probe_time
def cpu_multi_convolve_image(*args):
return _multi_convolve_image(*args)
@probe_time
def cpu_relu_max_pool_image(*args):
return _relu_max_pool_image(*args)
@probe_time
def opencl_multi_convolve_image(*args):
return _opencl_multi_convolve_image(*args)
@probe_time
def opencl_relu_max_pool_image(*args):
return _opencl_relu_max_pool_image(*args)
<|reserved_special_token_0|>
def multi_convolve_image(data, kernel, bias, dil_x, dil_y):
if device < 0:
return cpu_multi_convolve_image(data, kernel, bias, dil_x, dil_y)
else:
return opencl_multi_convolve_image(data, kernel, bias, dil_x, dil_y,
device, *GROUPS[0:2])
def relu_max_pool_image(data, size_x, size_y, dil_x, dil_y):
if device < 0:
return cpu_relu_max_pool_image(data, size_x, size_y, dil_x, dil_y)
else:
return opencl_relu_max_pool_image(data, size_x, size_y, dil_x,
dil_y, device, *GROUPS)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def subsample(x, pool_size):
dx, dy = [int(p) for p in pool_size * (np.array(x.shape[0:2]) // pool_size)
]
return x[:dx:2, :dy:2]
def probe_time(func):
def wrapper(*args, **kwargs):
t0 = time.time()
res = func(*args, **kwargs)
dt = time.time() - t0
print('Time (%s): %f' % (func.__name__, dt))
return res
return wrapper
@probe_time
def cpu_multi_convolve_image(*args):
return _multi_convolve_image(*args)
@probe_time
def cpu_relu_max_pool_image(*args):
return _relu_max_pool_image(*args)
@probe_time
def opencl_multi_convolve_image(*args):
return _opencl_multi_convolve_image(*args)
@probe_time
def opencl_relu_max_pool_image(*args):
return _opencl_relu_max_pool_image(*args)
<|reserved_special_token_0|>
if len(sys.argv) > 1:
fimg = sys.argv[1]
if len(sys.argv) > 2:
fmod = sys.argv[2]
if len(sys.argv) > 3:
device = int(sys.argv[3])
if device < 0:
device = None
<|reserved_special_token_0|>
def multi_convolve_image(data, kernel, bias, dil_x, dil_y):
if device < 0:
return cpu_multi_convolve_image(data, kernel, bias, dil_x, dil_y)
else:
return opencl_multi_convolve_image(data, kernel, bias, dil_x, dil_y,
device, *GROUPS[0:2])
def relu_max_pool_image(data, size_x, size_y, dil_x, dil_y):
if device < 0:
return cpu_relu_max_pool_image(data, size_x, size_y, dil_x, dil_y)
else:
return opencl_relu_max_pool_image(data, size_x, size_y, dil_x,
dil_y, device, *GROUPS)
print('CNN test')
<|reserved_special_token_0|>
for i in range(len(classif.conv_filters)):
kernel, bias = classif.get_weights(i)
flow = multi_convolve_image(flow, kernel, bias, 1, 1)[1:-1, 1:-1, :]
flow = subsample(relu_max_pool_image(flow, classif.pool_size, classif.
pool_size, 1, 1), 2)
<|reserved_special_token_0|>
for i in range(len(classif.conv_filters), len(classif.layers)):
kernel, bias = classif.get_weights(i)
flow = np.sum(kernel * np.expand_dims(flow, 1), 0) + bias
if i < len(classif.layers) - 1:
flow = np.maximum(flow, 0)
<|reserved_special_token_0|>
print('error = %f' % np.max(np.abs(gold - silver)))
<|reserved_special_token_1|>
import sys
import time
import numpy as np
import vii
import cnn
from cnn._utils import (FLOAT_DTYPE,
_multi_convolve_image,
_opencl_multi_convolve_image,
_relu_max_pool_image,
_opencl_relu_max_pool_image)
GROUPS = 25, 20, 1
def subsample(x, pool_size):
# Make sure it works with pool size > 2 !!!!
dx, dy = [int(p) for p in pool_size * (np.array(x.shape[0:2]) // pool_size)]
return x[:dx:2, :dy:2]
def probe_time(func):
def wrapper(*args, **kwargs):
t0 = time.time()
res = func(*args, **kwargs)
dt = time.time() - t0
print('Time (%s): %f' % (func.__name__, dt))
return res
return wrapper
@probe_time
def cpu_multi_convolve_image(*args):
return _multi_convolve_image(*args)
@probe_time
def cpu_relu_max_pool_image(*args):
return _relu_max_pool_image(*args)
@probe_time
def opencl_multi_convolve_image(*args):
return _opencl_multi_convolve_image(*args)
@probe_time
def opencl_relu_max_pool_image(*args):
return _opencl_relu_max_pool_image(*args)
###########################################################################
fimg = 'pizza.png'
fmod = 'feb2.h5'
device = 0
brute_force = False
if len(sys.argv) > 1:
fimg = sys.argv[1]
if len(sys.argv) > 2:
fmod = sys.argv[2]
if len(sys.argv) > 3:
device = int(sys.argv[3])
if device < 0:
device = None
img = vii.load_image(fimg)
classif = cnn.load_image_classifier(fmod)
def multi_convolve_image(data, kernel, bias, dil_x, dil_y):
if device < 0:
return cpu_multi_convolve_image(data, kernel, bias, dil_x, dil_y)
else:
return opencl_multi_convolve_image(data, kernel, bias, dil_x, dil_y, device, *(GROUPS[0:2]))
def relu_max_pool_image(data, size_x, size_y, dil_x, dil_y):
if device < 0:
return cpu_relu_max_pool_image(data, size_x, size_y, dil_x, dil_y)
else:
return opencl_relu_max_pool_image(data, size_x, size_y, dil_x, dil_y, device, *GROUPS)
###########################################################################
print('CNN test')
x = np.random.randint(img.dims[0] - classif.image_size[0] + 1)
y = np.random.randint(img.dims[1] - classif.image_size[1] + 1)
data = img.get_data().astype(FLOAT_DTYPE)[x:(x + classif.image_size[0]), y:(y + classif.image_size[1])] / 255
gold = classif.run(data)
flow = data
for i in range(len(classif.conv_filters)):
kernel, bias = classif.get_weights(i)
flow = multi_convolve_image(flow, kernel, bias, 1, 1)[1:-1, 1:-1, :]
flow = subsample(relu_max_pool_image(flow, classif.pool_size, classif.pool_size, 1, 1), 2)
flow = flow.flatten()
for i in range(len(classif.conv_filters), len(classif.layers)):
kernel, bias = classif.get_weights(i)
flow = np.sum(kernel * np.expand_dims(flow, 1), 0) + bias
if i < (len(classif.layers) - 1):
flow = np.maximum(flow, 0)
silver = cnn.softmax(flow)
print('error = %f' % np.max(np.abs(gold - silver)))
|
flexible
|
{
"blob_id": "8ec257d5dfe84e363e3c3aa5adee3470c20d1765",
"index": 5866,
"step-1": "<mask token>\n\n\n@probe_time\ndef opencl_multi_convolve_image(*args):\n return _opencl_multi_convolve_image(*args)\n\n\n<mask token>\n\n\ndef multi_convolve_image(data, kernel, bias, dil_x, dil_y):\n if device < 0:\n return cpu_multi_convolve_image(data, kernel, bias, dil_x, dil_y)\n else:\n return opencl_multi_convolve_image(data, kernel, bias, dil_x, dil_y,\n device, *GROUPS[0:2])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef subsample(x, pool_size):\n dx, dy = [int(p) for p in pool_size * (np.array(x.shape[0:2]) // pool_size)\n ]\n return x[:dx:2, :dy:2]\n\n\n<mask token>\n\n\n@probe_time\ndef cpu_multi_convolve_image(*args):\n return _multi_convolve_image(*args)\n\n\n<mask token>\n\n\n@probe_time\ndef opencl_multi_convolve_image(*args):\n return _opencl_multi_convolve_image(*args)\n\n\n@probe_time\ndef opencl_relu_max_pool_image(*args):\n return _opencl_relu_max_pool_image(*args)\n\n\n<mask token>\n\n\ndef multi_convolve_image(data, kernel, bias, dil_x, dil_y):\n if device < 0:\n return cpu_multi_convolve_image(data, kernel, bias, dil_x, dil_y)\n else:\n return opencl_multi_convolve_image(data, kernel, bias, dil_x, dil_y,\n device, *GROUPS[0:2])\n\n\ndef relu_max_pool_image(data, size_x, size_y, dil_x, dil_y):\n if device < 0:\n return cpu_relu_max_pool_image(data, size_x, size_y, dil_x, dil_y)\n else:\n return opencl_relu_max_pool_image(data, size_x, size_y, dil_x,\n dil_y, device, *GROUPS)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef subsample(x, pool_size):\n dx, dy = [int(p) for p in pool_size * (np.array(x.shape[0:2]) // pool_size)\n ]\n return x[:dx:2, :dy:2]\n\n\n<mask token>\n\n\n@probe_time\ndef cpu_multi_convolve_image(*args):\n return _multi_convolve_image(*args)\n\n\n@probe_time\ndef cpu_relu_max_pool_image(*args):\n return _relu_max_pool_image(*args)\n\n\n@probe_time\ndef opencl_multi_convolve_image(*args):\n return _opencl_multi_convolve_image(*args)\n\n\n@probe_time\ndef opencl_relu_max_pool_image(*args):\n return _opencl_relu_max_pool_image(*args)\n\n\n<mask token>\n\n\ndef multi_convolve_image(data, kernel, bias, dil_x, dil_y):\n if device < 0:\n return cpu_multi_convolve_image(data, kernel, bias, dil_x, dil_y)\n else:\n return opencl_multi_convolve_image(data, kernel, bias, dil_x, dil_y,\n device, *GROUPS[0:2])\n\n\ndef relu_max_pool_image(data, size_x, size_y, dil_x, dil_y):\n if device < 0:\n return cpu_relu_max_pool_image(data, size_x, size_y, dil_x, dil_y)\n else:\n return opencl_relu_max_pool_image(data, size_x, size_y, dil_x,\n dil_y, device, *GROUPS)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef subsample(x, pool_size):\n dx, dy = [int(p) for p in pool_size * (np.array(x.shape[0:2]) // pool_size)\n ]\n return x[:dx:2, :dy:2]\n\n\ndef probe_time(func):\n\n def wrapper(*args, **kwargs):\n t0 = time.time()\n res = func(*args, **kwargs)\n dt = time.time() - t0\n print('Time (%s): %f' % (func.__name__, dt))\n return res\n return wrapper\n\n\n@probe_time\ndef cpu_multi_convolve_image(*args):\n return _multi_convolve_image(*args)\n\n\n@probe_time\ndef cpu_relu_max_pool_image(*args):\n return _relu_max_pool_image(*args)\n\n\n@probe_time\ndef opencl_multi_convolve_image(*args):\n return _opencl_multi_convolve_image(*args)\n\n\n@probe_time\ndef opencl_relu_max_pool_image(*args):\n return _opencl_relu_max_pool_image(*args)\n\n\n<mask token>\nif len(sys.argv) > 1:\n fimg = sys.argv[1]\n if len(sys.argv) > 2:\n fmod = sys.argv[2]\n if len(sys.argv) > 3:\n device = int(sys.argv[3])\n if device < 0:\n device = None\n<mask token>\n\n\ndef multi_convolve_image(data, kernel, bias, dil_x, dil_y):\n if device < 0:\n return cpu_multi_convolve_image(data, kernel, bias, dil_x, dil_y)\n else:\n return opencl_multi_convolve_image(data, kernel, bias, dil_x, dil_y,\n device, *GROUPS[0:2])\n\n\ndef relu_max_pool_image(data, size_x, size_y, dil_x, dil_y):\n if device < 0:\n return cpu_relu_max_pool_image(data, size_x, size_y, dil_x, dil_y)\n else:\n return opencl_relu_max_pool_image(data, size_x, size_y, dil_x,\n dil_y, device, *GROUPS)\n\n\nprint('CNN test')\n<mask token>\nfor i in range(len(classif.conv_filters)):\n kernel, bias = classif.get_weights(i)\n flow = multi_convolve_image(flow, kernel, bias, 1, 1)[1:-1, 1:-1, :]\n flow = subsample(relu_max_pool_image(flow, classif.pool_size, classif.\n pool_size, 1, 1), 2)\n<mask token>\nfor i in range(len(classif.conv_filters), len(classif.layers)):\n kernel, bias = classif.get_weights(i)\n flow = np.sum(kernel * np.expand_dims(flow, 1), 0) + bias\n if i < len(classif.layers) - 1:\n flow = np.maximum(flow, 0)\n<mask token>\nprint('error = %f' % np.max(np.abs(gold - silver)))\n",
"step-5": "import sys\nimport time\nimport numpy as np\n\nimport vii\n\nimport cnn\nfrom cnn._utils import (FLOAT_DTYPE,\n _multi_convolve_image,\n _opencl_multi_convolve_image,\n _relu_max_pool_image,\n _opencl_relu_max_pool_image)\n\n\nGROUPS = 25, 20, 1\n\ndef subsample(x, pool_size):\n # Make sure it works with pool size > 2 !!!!\n dx, dy = [int(p) for p in pool_size * (np.array(x.shape[0:2]) // pool_size)]\n return x[:dx:2, :dy:2]\n\n\ndef probe_time(func):\n def wrapper(*args, **kwargs):\n t0 = time.time()\n res = func(*args, **kwargs)\n dt = time.time() - t0\n print('Time (%s): %f' % (func.__name__, dt))\n return res\n return wrapper\n\n\n\n@probe_time\ndef cpu_multi_convolve_image(*args):\n return _multi_convolve_image(*args)\n\n\n@probe_time\ndef cpu_relu_max_pool_image(*args):\n return _relu_max_pool_image(*args)\n\n\n@probe_time\ndef opencl_multi_convolve_image(*args):\n return _opencl_multi_convolve_image(*args)\n\n\n@probe_time\ndef opencl_relu_max_pool_image(*args):\n return _opencl_relu_max_pool_image(*args)\n\n\n###########################################################################\n\nfimg = 'pizza.png'\nfmod = 'feb2.h5'\ndevice = 0\nbrute_force = False\nif len(sys.argv) > 1:\n fimg = sys.argv[1]\n if len(sys.argv) > 2:\n fmod = sys.argv[2]\n if len(sys.argv) > 3:\n device = int(sys.argv[3])\n if device < 0:\n device = None\nimg = vii.load_image(fimg)\nclassif = cnn.load_image_classifier(fmod)\n\n\ndef multi_convolve_image(data, kernel, bias, dil_x, dil_y):\n if device < 0:\n return cpu_multi_convolve_image(data, kernel, bias, dil_x, dil_y)\n else:\n return opencl_multi_convolve_image(data, kernel, bias, dil_x, dil_y, device, *(GROUPS[0:2]))\n\n\ndef relu_max_pool_image(data, size_x, size_y, dil_x, dil_y):\n if device < 0:\n return cpu_relu_max_pool_image(data, size_x, size_y, dil_x, dil_y)\n else:\n return opencl_relu_max_pool_image(data, size_x, size_y, dil_x, dil_y, device, *GROUPS)\n\n\n###########################################################################\n \nprint('CNN test')\n\nx = np.random.randint(img.dims[0] - classif.image_size[0] + 1)\ny = np.random.randint(img.dims[1] - classif.image_size[1] + 1)\n\ndata = img.get_data().astype(FLOAT_DTYPE)[x:(x + classif.image_size[0]), y:(y + classif.image_size[1])] / 255\ngold = classif.run(data)\n\nflow = data\nfor i in range(len(classif.conv_filters)):\n kernel, bias = classif.get_weights(i)\n flow = multi_convolve_image(flow, kernel, bias, 1, 1)[1:-1, 1:-1, :]\n flow = subsample(relu_max_pool_image(flow, classif.pool_size, classif.pool_size, 1, 1), 2)\nflow = flow.flatten()\n\nfor i in range(len(classif.conv_filters), len(classif.layers)):\n kernel, bias = classif.get_weights(i)\n flow = np.sum(kernel * np.expand_dims(flow, 1), 0) + bias\n if i < (len(classif.layers) - 1):\n flow = np.maximum(flow, 0)\n\nsilver = cnn.softmax(flow)\n\nprint('error = %f' % np.max(np.abs(gold - silver))) \n",
"step-ids": [
2,
6,
7,
9,
12
]
}
|
[
2,
6,
7,
9,
12
] |
class Solution:
def jump(self, nums: List[int]) -> int:
if len(nums) < 2: return 0
jump = 1
curr_max = max_reach = nums[0]
for i in range(1, len(nums)):
if max_reach >= len(nums) - 1:
return jump
curr_max = max(curr_max, i + nums[i])
if i == max_reach:
max_reach = curr_max
jump += 1
return jump
# TC: O(n)
# n is the len(nums), as we only scan the list once
# SC: O(1)
# we only init 3 variables, thus space is constant
|
normal
|
{
"blob_id": "7f2ffa653486d000c9eee0087fc1e6ca0c84003c",
"index": 5671,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def jump(self, nums: List[int]) ->int:\n if len(nums) < 2:\n return 0\n jump = 1\n curr_max = max_reach = nums[0]\n for i in range(1, len(nums)):\n if max_reach >= len(nums) - 1:\n return jump\n curr_max = max(curr_max, i + nums[i])\n if i == max_reach:\n max_reach = curr_max\n jump += 1\n return jump\n",
"step-4": "class Solution:\n def jump(self, nums: List[int]) -> int:\n \n if len(nums) < 2: return 0 \n \n jump = 1 \n curr_max = max_reach = nums[0] \n \n for i in range(1, len(nums)): \n if max_reach >= len(nums) - 1: \n return jump\n \n curr_max = max(curr_max, i + nums[i])\n \n if i == max_reach: \n max_reach = curr_max \n jump += 1 \n \n return jump\n \n # TC: O(n)\n # n is the len(nums), as we only scan the list once\n \n # SC: O(1)\n # we only init 3 variables, thus space is constant\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sys
from reportlab.graphics.barcode import code39
from reportlab.lib.pagesizes import letter
from reportlab.lib.units import mm
from reportlab.pdfgen import canvas
from parseAccessionNumbers import parseFile
def main():
if len(sys.argv) <= 1:
print "No filepath argument passed."
return
c = canvas.Canvas("barcode_example.pdf", pagesize=letter)
accessionNumberList = parseFile(sys.argv[1])
# Page specs
totalHeight = 265 * mm
xColumnMargin = 70 * mm
yBarcodeMargin = 20 * mm
# Specs for lower right status info
xPageStatus = 165 * mm
yPageStatus = 17 * mm
yBarcodeStatus = 12 * mm
# Initial values
x = 1 * mm
y = totalHeight
x1 = 6.4 * mm
# Initialize barcode counts and page counts
currentBarcodeTotalCount = 0
currentPageCount = 0
currentPage = 1
totalPages = int(len(accessionNumberList) / 32)
if len(accessionNumberList) % 32 > 0:
totalPages += 1
for accessionNumber in accessionNumberList:
if currentBarcodeTotalCount % 32 == 0 and currentBarcodeTotalCount != 0:
c.drawString(xPageStatus, yPageStatus, "Page " + str(currentPage) + " of " + str(totalPages))
c.drawString(xPageStatus, yBarcodeStatus, str(currentPageCount) + " barcodes")
c.showPage()
# Reset values for a new page
x = 1 * mm
y = totalHeight
x1 = 6.4 * mm
currentPageCount = 0
# Increase to next page
currentPage += 1
currentBarcodeTotalCount += 1
currentPageCount += 1
barcode = code39.Extended39(accessionNumber)
# Draw the barcode on the canvas
barcode.drawOn(c, x, y)
x1 = x + 6.4 * mm
y -= 5 * mm
# Draw the actual string
c.drawString(x1, y, accessionNumber)
x = x
y -= yBarcodeMargin
if int(y) < 20:
x += xColumnMargin
y = totalHeight
c.drawString(xPageStatus, yPageStatus, "Page " + str(currentPage) + " of " + str(totalPages))
c.drawString(xPageStatus, yBarcodeStatus, str(currentPageCount) + " barcodes")
c.showPage()
c.save()
print "File successfully created"
main()
|
normal
|
{
"blob_id": "bc32518e5e37d4055f1bf5115953948a2bb24ba6",
"index": 3506,
"step-1": "import sys\nfrom reportlab.graphics.barcode import code39\nfrom reportlab.lib.pagesizes import letter\nfrom reportlab.lib.units import mm\nfrom reportlab.pdfgen import canvas\nfrom parseAccessionNumbers import parseFile\n\n\ndef main():\n if len(sys.argv) <= 1:\n print \"No filepath argument passed.\"\n return\n\n c = canvas.Canvas(\"barcode_example.pdf\", pagesize=letter)\n\n accessionNumberList = parseFile(sys.argv[1])\n\n # Page specs\n totalHeight = 265 * mm\n xColumnMargin = 70 * mm\n yBarcodeMargin = 20 * mm\n\n # Specs for lower right status info\n xPageStatus = 165 * mm\n yPageStatus = 17 * mm\n yBarcodeStatus = 12 * mm\n\n # Initial values\n x = 1 * mm\n y = totalHeight\n x1 = 6.4 * mm\n\n # Initialize barcode counts and page counts\n currentBarcodeTotalCount = 0\n currentPageCount = 0\n currentPage = 1\n totalPages = int(len(accessionNumberList) / 32)\n if len(accessionNumberList) % 32 > 0:\n totalPages += 1\n\n for accessionNumber in accessionNumberList:\n if currentBarcodeTotalCount % 32 == 0 and currentBarcodeTotalCount != 0:\n c.drawString(xPageStatus, yPageStatus, \"Page \" + str(currentPage) + \" of \" + str(totalPages))\n c.drawString(xPageStatus, yBarcodeStatus, str(currentPageCount) + \" barcodes\")\n c.showPage()\n\n # Reset values for a new page\n x = 1 * mm\n y = totalHeight\n x1 = 6.4 * mm\n currentPageCount = 0\n\n # Increase to next page\n currentPage += 1\n\n currentBarcodeTotalCount += 1\n currentPageCount += 1\n\n barcode = code39.Extended39(accessionNumber)\n\n # Draw the barcode on the canvas\n barcode.drawOn(c, x, y)\n x1 = x + 6.4 * mm\n y -= 5 * mm\n\n # Draw the actual string\n c.drawString(x1, y, accessionNumber)\n x = x\n y -= yBarcodeMargin\n\n if int(y) < 20:\n x += xColumnMargin\n y = totalHeight\n\n c.drawString(xPageStatus, yPageStatus, \"Page \" + str(currentPage) + \" of \" + str(totalPages))\n c.drawString(xPageStatus, yBarcodeStatus, str(currentPageCount) + \" barcodes\")\n c.showPage()\n c.save()\n print \"File successfully created\"\n\nmain()\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
os.mkdir('作业')
<|reserved_special_token_0|>
for i in range(10):
f.write('hello world\n')
f.seek(0)
<|reserved_special_token_0|>
print(s)
<|reserved_special_token_0|>
for i in s:
f.write(i)
f.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
os.mkdir('作业')
f = open('D:/six3/s/作业/tet.txt', 'w+')
for i in range(10):
f.write('hello world\n')
f.seek(0)
s = f.read(100)
print(s)
f = open('D:/six3/s/作业/tet2.txt', 'w+')
for i in s:
f.write(i)
f.close()
<|reserved_special_token_1|>
import os
os.mkdir('作业')
f = open('D:/six3/s/作业/tet.txt', 'w+')
for i in range(10):
f.write('hello world\n')
f.seek(0)
s = f.read(100)
print(s)
f = open('D:/six3/s/作业/tet2.txt', 'w+')
for i in s:
f.write(i)
f.close()
<|reserved_special_token_1|>
import os
os.mkdir("作业")
f=open("D:/six3/s/作业/tet.txt",'w+')
for i in range(10):
f.write("hello world\n")
f.seek(0)
s=f.read(100)
print(s)
f=open("D:/six3/s/作业/tet2.txt",'w+')
for i in s:
f.write(i)
f.close()
|
flexible
|
{
"blob_id": "5f5e314d2d18deb12a8ae757a117ef8fbb2ddad5",
"index": 2391,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nos.mkdir('作业')\n<mask token>\nfor i in range(10):\n f.write('hello world\\n')\nf.seek(0)\n<mask token>\nprint(s)\n<mask token>\nfor i in s:\n f.write(i)\nf.close()\n",
"step-3": "<mask token>\nos.mkdir('作业')\nf = open('D:/six3/s/作业/tet.txt', 'w+')\nfor i in range(10):\n f.write('hello world\\n')\nf.seek(0)\ns = f.read(100)\nprint(s)\nf = open('D:/six3/s/作业/tet2.txt', 'w+')\nfor i in s:\n f.write(i)\nf.close()\n",
"step-4": "import os\nos.mkdir('作业')\nf = open('D:/six3/s/作业/tet.txt', 'w+')\nfor i in range(10):\n f.write('hello world\\n')\nf.seek(0)\ns = f.read(100)\nprint(s)\nf = open('D:/six3/s/作业/tet2.txt', 'w+')\nfor i in s:\n f.write(i)\nf.close()\n",
"step-5": "import os\nos.mkdir(\"作业\")\nf=open(\"D:/six3/s/作业/tet.txt\",'w+')\nfor i in range(10):\n f.write(\"hello world\\n\")\n\nf.seek(0)\ns=f.read(100)\nprint(s)\nf=open(\"D:/six3/s/作业/tet2.txt\",'w+')\nfor i in s:\n f.write(i)\nf.close()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class IPBanMiddleware(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class IPBanMiddleware(object):
<|reserved_special_token_0|>
def process_request(self, request):
ip = request.META['REMOTE_ADDR']
try:
ban = Ban.objects.get(ip=ip)
if ban.banned():
return render_to_response('ban/banned.html', {'reason': ban
.reason, 'unbandate': ban.unbandate()})
else:
ban.delete()
pass
except Ban.DoesNotExist:
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class IPBanMiddleware(object):
"""
Simple middleware for taking care of bans from specific IP's
Redirects the banned user to a ban-page with an explanation
"""
def process_request(self, request):
ip = request.META['REMOTE_ADDR']
try:
ban = Ban.objects.get(ip=ip)
if ban.banned():
return render_to_response('ban/banned.html', {'reason': ban
.reason, 'unbandate': ban.unbandate()})
else:
ban.delete()
pass
except Ban.DoesNotExist:
pass
<|reserved_special_token_1|>
from models import Ban
from django.shortcuts import render_to_response
class IPBanMiddleware(object):
"""
Simple middleware for taking care of bans from specific IP's
Redirects the banned user to a ban-page with an explanation
"""
def process_request(self, request):
ip = request.META['REMOTE_ADDR']
try:
ban = Ban.objects.get(ip=ip)
if ban.banned():
return render_to_response('ban/banned.html', {'reason': ban
.reason, 'unbandate': ban.unbandate()})
else:
ban.delete()
pass
except Ban.DoesNotExist:
pass
<|reserved_special_token_1|>
from models import Ban
from django.shortcuts import render_to_response
class IPBanMiddleware(object):
"""
Simple middleware for taking care of bans from specific IP's
Redirects the banned user to a ban-page with an explanation
"""
def process_request(self, request):
ip = request.META['REMOTE_ADDR'] # user's IP
# see if user is banned
try:
# if this doesnt throw an exception, user is banned
ban = Ban.objects.get(ip=ip)
if ban.banned():
# return the "ban page"
return render_to_response("ban/banned.html",
{"reason": ban.reason, "unbandate": ban.unbandate()})
else:
# User was previously banned, but the ban is over by now
ban.delete()
pass
except Ban.DoesNotExist: # not banned! goodie
pass
|
flexible
|
{
"blob_id": "9289eb32db145187c5b4140e32acff520be8366e",
"index": 7620,
"step-1": "<mask token>\n\n\nclass IPBanMiddleware(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass IPBanMiddleware(object):\n <mask token>\n\n def process_request(self, request):\n ip = request.META['REMOTE_ADDR']\n try:\n ban = Ban.objects.get(ip=ip)\n if ban.banned():\n return render_to_response('ban/banned.html', {'reason': ban\n .reason, 'unbandate': ban.unbandate()})\n else:\n ban.delete()\n pass\n except Ban.DoesNotExist:\n pass\n",
"step-3": "<mask token>\n\n\nclass IPBanMiddleware(object):\n \"\"\"\n Simple middleware for taking care of bans from specific IP's\n Redirects the banned user to a ban-page with an explanation\n \"\"\"\n\n def process_request(self, request):\n ip = request.META['REMOTE_ADDR']\n try:\n ban = Ban.objects.get(ip=ip)\n if ban.banned():\n return render_to_response('ban/banned.html', {'reason': ban\n .reason, 'unbandate': ban.unbandate()})\n else:\n ban.delete()\n pass\n except Ban.DoesNotExist:\n pass\n",
"step-4": "from models import Ban\nfrom django.shortcuts import render_to_response\n\n\nclass IPBanMiddleware(object):\n \"\"\"\n Simple middleware for taking care of bans from specific IP's\n Redirects the banned user to a ban-page with an explanation\n \"\"\"\n\n def process_request(self, request):\n ip = request.META['REMOTE_ADDR']\n try:\n ban = Ban.objects.get(ip=ip)\n if ban.banned():\n return render_to_response('ban/banned.html', {'reason': ban\n .reason, 'unbandate': ban.unbandate()})\n else:\n ban.delete()\n pass\n except Ban.DoesNotExist:\n pass\n",
"step-5": "from models import Ban\nfrom django.shortcuts import render_to_response\n\nclass IPBanMiddleware(object):\n \"\"\"\n Simple middleware for taking care of bans from specific IP's\n Redirects the banned user to a ban-page with an explanation\n \"\"\"\n def process_request(self, request):\n ip = request.META['REMOTE_ADDR'] # user's IP\n\n # see if user is banned\n try:\n # if this doesnt throw an exception, user is banned\n ban = Ban.objects.get(ip=ip)\n \n if ban.banned():\n # return the \"ban page\"\n return render_to_response(\"ban/banned.html\",\n {\"reason\": ban.reason, \"unbandate\": ban.unbandate()})\n else:\n # User was previously banned, but the ban is over by now\n ban.delete()\n pass\n\n except Ban.DoesNotExist: # not banned! goodie\n pass\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
"""
Naive Bayes Class
- Bernoulli Naive Bayes
- Multinomial Naive Bayes
- Gaussian Naive Bayes
Arthor: Zhenhuan(Steven) Sun
"""
import numpy as np
class BernoulliNB:
def __init__(self, k=1.0, binarize=0.0):
# Laplace Smoothing Factor
self.K = k
# the degree of binarization
self.binarize = binarize
def fit(self, X, y):
# binarize X
# since we assume data is bernoulli distributed we need to make sure
# that data consist of binary values
X = self._binarize(X)
# separate training data by classes(different target)
X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in np.unique(y)]
# number of different class
self.n_classes = len(np.unique(y))
# count the number of examples and number of features in X
self.n_examples, self.n_features = X.shape
# count the number of examples that belong to class k (0 or 1 in spam classification)
prior_numerator = np.array([len(x) for x in X_separated_by_class])
# compute the prior probability (P(y))
self.prior_prob = prior_numerator / self.n_examples
# compute the log prior probability (log(P(y))) for prediction
self.log_prior_prob = np.log(self.prior_prob)
# compute the conditional probability
# with laplace smoothing we assume we have seen each feature at least self.K times
conditional_prob_numerator = np.array([np.array(x).sum(axis=0) + self.K for x in X_separated_by_class])
conditional_prob_denominator = np.expand_dims(np.array([len(x) + 2 * self.K for x in X_separated_by_class]), axis=1)
self.conditional_prob = conditional_prob_numerator / conditional_prob_denominator
return self
def predict(self, X):
# binarize X
X = self._binarize(X)
# compute log posterior probability log(P(y|X))
posterior_prob_numerator = np.array([(x * np.log(self.conditional_prob) +
np.abs(1 - x) * np.log(1 - self.conditional_prob)).sum(axis=1) +
self.log_prior_prob for x in X])
posterior_prob_denominator = np.expand_dims(np.array([(x * np.log(self.conditional_prob) +
np.abs(1 - x) * np.log(1 - self.conditional_prob)).sum(axis=1) +
self.log_prior_prob for x in X]).sum(axis=1), axis=1)
posterior_prob = posterior_prob_numerator - posterior_prob_denominator
# alternative solution
# since posterior_prob_denominator is a constant thus we don't bother compute the denominator
# compute the numerator is sufficient enough to make prediction and also it makes algorithm runs faster
#return np.argmax(posterior_prob_numerator, axis=1)
return np.argmax(posterior_prob, axis=1)
def _binarize(self, X):
# convert the values in X to binary values (0 or 1)
return np.where(X > self.binarize, 1, 0)
class MultinomialNB:
def __init__(self, k=1.0):
# Laplace Smoothing Factor
self.K = k
def fit(self, X, y):
# separate the training data by class
X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in np.unique(y)]
# number of different class
self.n_classes = len(np.unique(y))
# count the number of examples that belong to different classes
prior_numerator = [len(x) for x in X_separated_by_class]
# count the total number of examples in the training set
prior_denominator = X.shape[0]
# compute prior probability
self.prior_prob = np.array(prior_numerator) / prior_denominator
# compute log prior probability for prediction
self.log_prior_prob = np.log(self.prior_prob)
# compute the conditional probability's numerator for different class (with laplace smoothing)
# assume we have seen each feature at least once to avoid divide by zero error
conditional_prob_numerator = np.array([np.array(x).sum(axis=0) + self.K for x in X_separated_by_class])
# compute the conditional probability's denominator for different class
conditional_prob_denominator = np.expand_dims(conditional_prob_numerator.sum(axis=1), axis=1)
# compute the conditional probability for each feature and for each different classes
self.conditional_prob = conditional_prob_numerator / conditional_prob_denominator
return self
def predict(self, X):
# compute the log conditional probability for each examples and for each different classes
log_conditional_prob = np.array([(x * np.log(self.conditional_prob)).sum(axis=1) for x in X])
# compute the posterior probability
posterior_pronb = log_conditional_prob + self.log_prior_prob
# make prediction
return np.argmax(posterior_pronb, axis=1)
class GaussianNB:
def __init__(self, k=1.0):
# Laplace Smoothing Factor
self.K = k
def fit(self, X, y):
# separate the training set by classes
X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in np.unique(y)]
# count the number of different classes
self.n_classes = len(np.unique(y))
# compute prior probability
self.prior_prob = np.array([len(x) / X.shape[0] for x in X_separated_by_class])
# compute mean vector for each class
self.mean_vector = np.array([np.array(x).sum(axis=0) / len(x) for x in X_separated_by_class])
# compute covariance matrix for each class
covariance_diagonal_matrices = []
for c, x in enumerate(X_separated_by_class):
mean_square_difference = 0
for x_i in x:
# compute the covariance matrix for each examples (slow as hell -> abandoned)
# mean_difference = np.expand_dims((x_i - self.mean_vector[c]), axis=1)
# mean_square_difference += mean_difference.dot(mean_difference.T)
# compute the diagnal entries of covariance matrix for each examples (much faster than above method)
mean_difference = x_i - self.mean_vector[c]
mean_square_difference += mean_difference ** 2
# convert the list of diagonal entries back to covariance diagonal matrix
# here we assumed that the mean square difference between each feature and its mean is at least 1 to make sure that
# there is no zero variance in the covariance matrix and thus we won't encounter divide by zero error in the future
covariance_diagonal_matrix = ((mean_square_difference + self.K) / len(x)) * np.identity(X.shape[1])
covariance_diagonal_matrices.append(covariance_diagonal_matrix)
self.covariance_diagonal_matrices = np.asarray(covariance_diagonal_matrices)
return self
def log_gaussian_distribution(self, x, mean, variance):
log_multiplier = -np.log(np.sqrt((2 * np.pi) * variance))
log_exponent = -(x - mean)**2 / (2 * variance)
return sum(log_multiplier + log_exponent)
def predict(self, X):
variances = []
for matrix in self.covariance_diagonal_matrices:
variance = matrix.diagonal()
variances.append(variance)
variances = np.array(variances)
# list that stores all test data's posterior probability
posterior_prob_collection = []
for x in X:
conditional_prob = []
for mean, variance in zip(self.mean_vector, variances):
# compute conditional probability for each class
conditional_prob.append(self.log_gaussian_distribution(x, mean, variance))
# compute posterior probability
posterior_prob = np.array(conditional_prob) + np.log(self.prior_prob)
posterior_prob_collection.append(posterior_prob)
posterior_prob_collection = np.array(posterior_prob_collection)
return np.argmax(posterior_prob_collection, axis=1)
|
normal
|
{
"blob_id": "5dfe86d654e4184bab4401f8b634326996e42e9c",
"index": 2646,
"step-1": "<mask token>\n\n\nclass MultinomialNB:\n <mask token>\n\n def fit(self, X, y):\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n prior_numerator = [len(x) for x in X_separated_by_class]\n prior_denominator = X.shape[0]\n self.prior_prob = np.array(prior_numerator) / prior_denominator\n self.log_prior_prob = np.log(self.prior_prob)\n conditional_prob_numerator = np.array([(np.array(x).sum(axis=0) +\n self.K) for x in X_separated_by_class])\n conditional_prob_denominator = np.expand_dims(\n conditional_prob_numerator.sum(axis=1), axis=1)\n self.conditional_prob = (conditional_prob_numerator /\n conditional_prob_denominator)\n return self\n\n def predict(self, X):\n log_conditional_prob = np.array([(x * np.log(self.conditional_prob)\n ).sum(axis=1) for x in X])\n posterior_pronb = log_conditional_prob + self.log_prior_prob\n return np.argmax(posterior_pronb, axis=1)\n\n\nclass GaussianNB:\n\n def __init__(self, k=1.0):\n self.K = k\n\n def fit(self, X, y):\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n self.prior_prob = np.array([(len(x) / X.shape[0]) for x in\n X_separated_by_class])\n self.mean_vector = np.array([(np.array(x).sum(axis=0) / len(x)) for\n x in X_separated_by_class])\n covariance_diagonal_matrices = []\n for c, x in enumerate(X_separated_by_class):\n mean_square_difference = 0\n for x_i in x:\n mean_difference = x_i - self.mean_vector[c]\n mean_square_difference += mean_difference ** 2\n covariance_diagonal_matrix = (mean_square_difference + self.K\n ) / len(x) * np.identity(X.shape[1])\n covariance_diagonal_matrices.append(covariance_diagonal_matrix)\n self.covariance_diagonal_matrices = np.asarray(\n covariance_diagonal_matrices)\n return self\n\n def log_gaussian_distribution(self, x, mean, variance):\n log_multiplier = -np.log(np.sqrt(2 * np.pi * variance))\n log_exponent = -(x - mean) ** 2 / (2 * variance)\n return sum(log_multiplier + log_exponent)\n\n def predict(self, X):\n variances = []\n for matrix in self.covariance_diagonal_matrices:\n variance = matrix.diagonal()\n variances.append(variance)\n variances = np.array(variances)\n posterior_prob_collection = []\n for x in X:\n conditional_prob = []\n for mean, variance in zip(self.mean_vector, variances):\n conditional_prob.append(self.log_gaussian_distribution(x,\n mean, variance))\n posterior_prob = np.array(conditional_prob) + np.log(self.\n prior_prob)\n posterior_prob_collection.append(posterior_prob)\n posterior_prob_collection = np.array(posterior_prob_collection)\n return np.argmax(posterior_prob_collection, axis=1)\n",
"step-2": "<mask token>\n\n\nclass MultinomialNB:\n\n def __init__(self, k=1.0):\n self.K = k\n\n def fit(self, X, y):\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n prior_numerator = [len(x) for x in X_separated_by_class]\n prior_denominator = X.shape[0]\n self.prior_prob = np.array(prior_numerator) / prior_denominator\n self.log_prior_prob = np.log(self.prior_prob)\n conditional_prob_numerator = np.array([(np.array(x).sum(axis=0) +\n self.K) for x in X_separated_by_class])\n conditional_prob_denominator = np.expand_dims(\n conditional_prob_numerator.sum(axis=1), axis=1)\n self.conditional_prob = (conditional_prob_numerator /\n conditional_prob_denominator)\n return self\n\n def predict(self, X):\n log_conditional_prob = np.array([(x * np.log(self.conditional_prob)\n ).sum(axis=1) for x in X])\n posterior_pronb = log_conditional_prob + self.log_prior_prob\n return np.argmax(posterior_pronb, axis=1)\n\n\nclass GaussianNB:\n\n def __init__(self, k=1.0):\n self.K = k\n\n def fit(self, X, y):\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n self.prior_prob = np.array([(len(x) / X.shape[0]) for x in\n X_separated_by_class])\n self.mean_vector = np.array([(np.array(x).sum(axis=0) / len(x)) for\n x in X_separated_by_class])\n covariance_diagonal_matrices = []\n for c, x in enumerate(X_separated_by_class):\n mean_square_difference = 0\n for x_i in x:\n mean_difference = x_i - self.mean_vector[c]\n mean_square_difference += mean_difference ** 2\n covariance_diagonal_matrix = (mean_square_difference + self.K\n ) / len(x) * np.identity(X.shape[1])\n covariance_diagonal_matrices.append(covariance_diagonal_matrix)\n self.covariance_diagonal_matrices = np.asarray(\n covariance_diagonal_matrices)\n return self\n\n def log_gaussian_distribution(self, x, mean, variance):\n log_multiplier = -np.log(np.sqrt(2 * np.pi * variance))\n log_exponent = -(x - mean) ** 2 / (2 * variance)\n return sum(log_multiplier + log_exponent)\n\n def predict(self, X):\n variances = []\n for matrix in self.covariance_diagonal_matrices:\n variance = matrix.diagonal()\n variances.append(variance)\n variances = np.array(variances)\n posterior_prob_collection = []\n for x in X:\n conditional_prob = []\n for mean, variance in zip(self.mean_vector, variances):\n conditional_prob.append(self.log_gaussian_distribution(x,\n mean, variance))\n posterior_prob = np.array(conditional_prob) + np.log(self.\n prior_prob)\n posterior_prob_collection.append(posterior_prob)\n posterior_prob_collection = np.array(posterior_prob_collection)\n return np.argmax(posterior_prob_collection, axis=1)\n",
"step-3": "<mask token>\n\n\nclass BernoulliNB:\n\n def __init__(self, k=1.0, binarize=0.0):\n self.K = k\n self.binarize = binarize\n\n def fit(self, X, y):\n X = self._binarize(X)\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n self.n_examples, self.n_features = X.shape\n prior_numerator = np.array([len(x) for x in X_separated_by_class])\n self.prior_prob = prior_numerator / self.n_examples\n self.log_prior_prob = np.log(self.prior_prob)\n conditional_prob_numerator = np.array([(np.array(x).sum(axis=0) +\n self.K) for x in X_separated_by_class])\n conditional_prob_denominator = np.expand_dims(np.array([(len(x) + 2 *\n self.K) for x in X_separated_by_class]), axis=1)\n self.conditional_prob = (conditional_prob_numerator /\n conditional_prob_denominator)\n return self\n\n def predict(self, X):\n X = self._binarize(X)\n posterior_prob_numerator = np.array([((x * np.log(self.\n conditional_prob) + np.abs(1 - x) * np.log(1 - self.\n conditional_prob)).sum(axis=1) + self.log_prior_prob) for x in X])\n posterior_prob_denominator = np.expand_dims(np.array([((x * np.log(\n self.conditional_prob) + np.abs(1 - x) * np.log(1 - self.\n conditional_prob)).sum(axis=1) + self.log_prior_prob) for x in\n X]).sum(axis=1), axis=1)\n posterior_prob = posterior_prob_numerator - posterior_prob_denominator\n return np.argmax(posterior_prob, axis=1)\n\n def _binarize(self, X):\n return np.where(X > self.binarize, 1, 0)\n\n\nclass MultinomialNB:\n\n def __init__(self, k=1.0):\n self.K = k\n\n def fit(self, X, y):\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n prior_numerator = [len(x) for x in X_separated_by_class]\n prior_denominator = X.shape[0]\n self.prior_prob = np.array(prior_numerator) / prior_denominator\n self.log_prior_prob = np.log(self.prior_prob)\n conditional_prob_numerator = np.array([(np.array(x).sum(axis=0) +\n self.K) for x in X_separated_by_class])\n conditional_prob_denominator = np.expand_dims(\n conditional_prob_numerator.sum(axis=1), axis=1)\n self.conditional_prob = (conditional_prob_numerator /\n conditional_prob_denominator)\n return self\n\n def predict(self, X):\n log_conditional_prob = np.array([(x * np.log(self.conditional_prob)\n ).sum(axis=1) for x in X])\n posterior_pronb = log_conditional_prob + self.log_prior_prob\n return np.argmax(posterior_pronb, axis=1)\n\n\nclass GaussianNB:\n\n def __init__(self, k=1.0):\n self.K = k\n\n def fit(self, X, y):\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n self.prior_prob = np.array([(len(x) / X.shape[0]) for x in\n X_separated_by_class])\n self.mean_vector = np.array([(np.array(x).sum(axis=0) / len(x)) for\n x in X_separated_by_class])\n covariance_diagonal_matrices = []\n for c, x in enumerate(X_separated_by_class):\n mean_square_difference = 0\n for x_i in x:\n mean_difference = x_i - self.mean_vector[c]\n mean_square_difference += mean_difference ** 2\n covariance_diagonal_matrix = (mean_square_difference + self.K\n ) / len(x) * np.identity(X.shape[1])\n covariance_diagonal_matrices.append(covariance_diagonal_matrix)\n self.covariance_diagonal_matrices = np.asarray(\n covariance_diagonal_matrices)\n return self\n\n def log_gaussian_distribution(self, x, mean, variance):\n log_multiplier = -np.log(np.sqrt(2 * np.pi * variance))\n log_exponent = -(x - mean) ** 2 / (2 * variance)\n return sum(log_multiplier + log_exponent)\n\n def predict(self, X):\n variances = []\n for matrix in self.covariance_diagonal_matrices:\n variance = matrix.diagonal()\n variances.append(variance)\n variances = np.array(variances)\n posterior_prob_collection = []\n for x in X:\n conditional_prob = []\n for mean, variance in zip(self.mean_vector, variances):\n conditional_prob.append(self.log_gaussian_distribution(x,\n mean, variance))\n posterior_prob = np.array(conditional_prob) + np.log(self.\n prior_prob)\n posterior_prob_collection.append(posterior_prob)\n posterior_prob_collection = np.array(posterior_prob_collection)\n return np.argmax(posterior_prob_collection, axis=1)\n",
"step-4": "<mask token>\nimport numpy as np\n\n\nclass BernoulliNB:\n\n def __init__(self, k=1.0, binarize=0.0):\n self.K = k\n self.binarize = binarize\n\n def fit(self, X, y):\n X = self._binarize(X)\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n self.n_examples, self.n_features = X.shape\n prior_numerator = np.array([len(x) for x in X_separated_by_class])\n self.prior_prob = prior_numerator / self.n_examples\n self.log_prior_prob = np.log(self.prior_prob)\n conditional_prob_numerator = np.array([(np.array(x).sum(axis=0) +\n self.K) for x in X_separated_by_class])\n conditional_prob_denominator = np.expand_dims(np.array([(len(x) + 2 *\n self.K) for x in X_separated_by_class]), axis=1)\n self.conditional_prob = (conditional_prob_numerator /\n conditional_prob_denominator)\n return self\n\n def predict(self, X):\n X = self._binarize(X)\n posterior_prob_numerator = np.array([((x * np.log(self.\n conditional_prob) + np.abs(1 - x) * np.log(1 - self.\n conditional_prob)).sum(axis=1) + self.log_prior_prob) for x in X])\n posterior_prob_denominator = np.expand_dims(np.array([((x * np.log(\n self.conditional_prob) + np.abs(1 - x) * np.log(1 - self.\n conditional_prob)).sum(axis=1) + self.log_prior_prob) for x in\n X]).sum(axis=1), axis=1)\n posterior_prob = posterior_prob_numerator - posterior_prob_denominator\n return np.argmax(posterior_prob, axis=1)\n\n def _binarize(self, X):\n return np.where(X > self.binarize, 1, 0)\n\n\nclass MultinomialNB:\n\n def __init__(self, k=1.0):\n self.K = k\n\n def fit(self, X, y):\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n prior_numerator = [len(x) for x in X_separated_by_class]\n prior_denominator = X.shape[0]\n self.prior_prob = np.array(prior_numerator) / prior_denominator\n self.log_prior_prob = np.log(self.prior_prob)\n conditional_prob_numerator = np.array([(np.array(x).sum(axis=0) +\n self.K) for x in X_separated_by_class])\n conditional_prob_denominator = np.expand_dims(\n conditional_prob_numerator.sum(axis=1), axis=1)\n self.conditional_prob = (conditional_prob_numerator /\n conditional_prob_denominator)\n return self\n\n def predict(self, X):\n log_conditional_prob = np.array([(x * np.log(self.conditional_prob)\n ).sum(axis=1) for x in X])\n posterior_pronb = log_conditional_prob + self.log_prior_prob\n return np.argmax(posterior_pronb, axis=1)\n\n\nclass GaussianNB:\n\n def __init__(self, k=1.0):\n self.K = k\n\n def fit(self, X, y):\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n self.prior_prob = np.array([(len(x) / X.shape[0]) for x in\n X_separated_by_class])\n self.mean_vector = np.array([(np.array(x).sum(axis=0) / len(x)) for\n x in X_separated_by_class])\n covariance_diagonal_matrices = []\n for c, x in enumerate(X_separated_by_class):\n mean_square_difference = 0\n for x_i in x:\n mean_difference = x_i - self.mean_vector[c]\n mean_square_difference += mean_difference ** 2\n covariance_diagonal_matrix = (mean_square_difference + self.K\n ) / len(x) * np.identity(X.shape[1])\n covariance_diagonal_matrices.append(covariance_diagonal_matrix)\n self.covariance_diagonal_matrices = np.asarray(\n covariance_diagonal_matrices)\n return self\n\n def log_gaussian_distribution(self, x, mean, variance):\n log_multiplier = -np.log(np.sqrt(2 * np.pi * variance))\n log_exponent = -(x - mean) ** 2 / (2 * variance)\n return sum(log_multiplier + log_exponent)\n\n def predict(self, X):\n variances = []\n for matrix in self.covariance_diagonal_matrices:\n variance = matrix.diagonal()\n variances.append(variance)\n variances = np.array(variances)\n posterior_prob_collection = []\n for x in X:\n conditional_prob = []\n for mean, variance in zip(self.mean_vector, variances):\n conditional_prob.append(self.log_gaussian_distribution(x,\n mean, variance))\n posterior_prob = np.array(conditional_prob) + np.log(self.\n prior_prob)\n posterior_prob_collection.append(posterior_prob)\n posterior_prob_collection = np.array(posterior_prob_collection)\n return np.argmax(posterior_prob_collection, axis=1)\n",
"step-5": "\"\"\"\n Naive Bayes Class\n - Bernoulli Naive Bayes\n - Multinomial Naive Bayes\n - Gaussian Naive Bayes\n Arthor: Zhenhuan(Steven) Sun\n\"\"\"\n\nimport numpy as np\n\nclass BernoulliNB:\n def __init__(self, k=1.0, binarize=0.0):\n # Laplace Smoothing Factor\n self.K = k\n\n # the degree of binarization\n self.binarize = binarize\n\n def fit(self, X, y):\n # binarize X\n # since we assume data is bernoulli distributed we need to make sure\n # that data consist of binary values\n X = self._binarize(X)\n\n # separate training data by classes(different target)\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in np.unique(y)]\n\n # number of different class\n self.n_classes = len(np.unique(y))\n\n # count the number of examples and number of features in X\n self.n_examples, self.n_features = X.shape\n\n # count the number of examples that belong to class k (0 or 1 in spam classification)\n prior_numerator = np.array([len(x) for x in X_separated_by_class])\n\n # compute the prior probability (P(y))\n self.prior_prob = prior_numerator / self.n_examples\n\n # compute the log prior probability (log(P(y))) for prediction\n self.log_prior_prob = np.log(self.prior_prob)\n\n # compute the conditional probability\n # with laplace smoothing we assume we have seen each feature at least self.K times\n conditional_prob_numerator = np.array([np.array(x).sum(axis=0) + self.K for x in X_separated_by_class])\n conditional_prob_denominator = np.expand_dims(np.array([len(x) + 2 * self.K for x in X_separated_by_class]), axis=1)\n self.conditional_prob = conditional_prob_numerator / conditional_prob_denominator\n\n return self\n\n def predict(self, X):\n # binarize X\n X = self._binarize(X)\n\n # compute log posterior probability log(P(y|X))\n posterior_prob_numerator = np.array([(x * np.log(self.conditional_prob) + \n np.abs(1 - x) * np.log(1 - self.conditional_prob)).sum(axis=1) + \n self.log_prior_prob for x in X])\n posterior_prob_denominator = np.expand_dims(np.array([(x * np.log(self.conditional_prob) + \n np.abs(1 - x) * np.log(1 - self.conditional_prob)).sum(axis=1) +\n self.log_prior_prob for x in X]).sum(axis=1), axis=1)\n \n posterior_prob = posterior_prob_numerator - posterior_prob_denominator\n\n # alternative solution\n # since posterior_prob_denominator is a constant thus we don't bother compute the denominator\n # compute the numerator is sufficient enough to make prediction and also it makes algorithm runs faster\n #return np.argmax(posterior_prob_numerator, axis=1)\n\n return np.argmax(posterior_prob, axis=1)\n\n def _binarize(self, X):\n # convert the values in X to binary values (0 or 1)\n return np.where(X > self.binarize, 1, 0)\n\nclass MultinomialNB:\n def __init__(self, k=1.0):\n # Laplace Smoothing Factor\n self.K = k\n\n def fit(self, X, y):\n # separate the training data by class\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in np.unique(y)]\n \n # number of different class\n self.n_classes = len(np.unique(y))\n\n # count the number of examples that belong to different classes\n prior_numerator = [len(x) for x in X_separated_by_class]\n\n # count the total number of examples in the training set\n prior_denominator = X.shape[0]\n\n # compute prior probability\n self.prior_prob = np.array(prior_numerator) / prior_denominator\n\n # compute log prior probability for prediction\n self.log_prior_prob = np.log(self.prior_prob)\n\n # compute the conditional probability's numerator for different class (with laplace smoothing)\n # assume we have seen each feature at least once to avoid divide by zero error\n conditional_prob_numerator = np.array([np.array(x).sum(axis=0) + self.K for x in X_separated_by_class])\n\n # compute the conditional probability's denominator for different class\n conditional_prob_denominator = np.expand_dims(conditional_prob_numerator.sum(axis=1), axis=1)\n\n # compute the conditional probability for each feature and for each different classes\n self.conditional_prob = conditional_prob_numerator / conditional_prob_denominator\n\n return self\n\n def predict(self, X):\n # compute the log conditional probability for each examples and for each different classes\n log_conditional_prob = np.array([(x * np.log(self.conditional_prob)).sum(axis=1) for x in X])\n\n # compute the posterior probability\n posterior_pronb = log_conditional_prob + self.log_prior_prob\n\n # make prediction\n return np.argmax(posterior_pronb, axis=1)\n\nclass GaussianNB:\n def __init__(self, k=1.0):\n # Laplace Smoothing Factor\n self.K = k\n\n def fit(self, X, y):\n # separate the training set by classes\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in np.unique(y)]\n\n # count the number of different classes\n self.n_classes = len(np.unique(y))\n\n # compute prior probability\n self.prior_prob = np.array([len(x) / X.shape[0] for x in X_separated_by_class])\n\n # compute mean vector for each class\n self.mean_vector = np.array([np.array(x).sum(axis=0) / len(x) for x in X_separated_by_class])\n\n # compute covariance matrix for each class\n covariance_diagonal_matrices = []\n for c, x in enumerate(X_separated_by_class):\n mean_square_difference = 0\n for x_i in x:\n # compute the covariance matrix for each examples (slow as hell -> abandoned)\n # mean_difference = np.expand_dims((x_i - self.mean_vector[c]), axis=1)\n # mean_square_difference += mean_difference.dot(mean_difference.T) \n # compute the diagnal entries of covariance matrix for each examples (much faster than above method)\n mean_difference = x_i - self.mean_vector[c]\n mean_square_difference += mean_difference ** 2\n # convert the list of diagonal entries back to covariance diagonal matrix\n # here we assumed that the mean square difference between each feature and its mean is at least 1 to make sure that \n # there is no zero variance in the covariance matrix and thus we won't encounter divide by zero error in the future\n covariance_diagonal_matrix = ((mean_square_difference + self.K) / len(x)) * np.identity(X.shape[1])\n covariance_diagonal_matrices.append(covariance_diagonal_matrix)\n self.covariance_diagonal_matrices = np.asarray(covariance_diagonal_matrices)\n\n return self\n\n def log_gaussian_distribution(self, x, mean, variance):\n\n log_multiplier = -np.log(np.sqrt((2 * np.pi) * variance))\n log_exponent = -(x - mean)**2 / (2 * variance)\n\n return sum(log_multiplier + log_exponent)\n\n def predict(self, X):\n variances = []\n for matrix in self.covariance_diagonal_matrices:\n variance = matrix.diagonal()\n variances.append(variance)\n variances = np.array(variances)\n \n # list that stores all test data's posterior probability\n posterior_prob_collection = []\n for x in X:\n conditional_prob = []\n for mean, variance in zip(self.mean_vector, variances):\n # compute conditional probability for each class\n conditional_prob.append(self.log_gaussian_distribution(x, mean, variance))\n # compute posterior probability\n posterior_prob = np.array(conditional_prob) + np.log(self.prior_prob)\n posterior_prob_collection.append(posterior_prob)\n posterior_prob_collection = np.array(posterior_prob_collection)\n \n return np.argmax(posterior_prob_collection, axis=1)",
"step-ids": [
8,
9,
14,
15,
16
]
}
|
[
8,
9,
14,
15,
16
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-30 14:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('books', '0007_auto_20170127_2254'),
]
operations = [
migrations.AlterField(
model_name='book',
name='subtitle',
field=models.CharField(blank=True, help_text='e.g. There and Back Again', max_length=200),
),
migrations.AlterField(
model_name='book',
name='title',
field=models.CharField(db_index=True, help_text='e.g. The Hobbit', max_length=200, unique=True),
),
]
|
normal
|
{
"blob_id": "65ea27851d9db0f0a06d42bd37eff633d22a1548",
"index": 9528,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('books', '0007_auto_20170127_2254')]\n operations = [migrations.AlterField(model_name='book', name='subtitle',\n field=models.CharField(blank=True, help_text=\n 'e.g. There and Back Again', max_length=200)), migrations.\n AlterField(model_name='book', name='title', field=models.CharField(\n db_index=True, help_text='e.g. The Hobbit', max_length=200, unique=\n True))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('books', '0007_auto_20170127_2254')]\n operations = [migrations.AlterField(model_name='book', name='subtitle',\n field=models.CharField(blank=True, help_text=\n 'e.g. There and Back Again', max_length=200)), migrations.\n AlterField(model_name='book', name='title', field=models.CharField(\n db_index=True, help_text='e.g. The Hobbit', max_length=200, unique=\n True))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.5 on 2017-01-30 14:50\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('books', '0007_auto_20170127_2254'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='book',\n name='subtitle',\n field=models.CharField(blank=True, help_text='e.g. There and Back Again', max_length=200),\n ),\n migrations.AlterField(\n model_name='book',\n name='title',\n field=models.CharField(db_index=True, help_text='e.g. The Hobbit', max_length=200, unique=True),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while number < count:
print(number, '.', lines[number])
number = number + 1
fileopen.close()
<|reserved_special_token_1|>
fileopen = open(input(
'Please enter the name of the file that you wish to open.'), 'r')
lines = fileopen.readlines()
count = len(lines)
number = 0
while number < count:
print(number, '.', lines[number])
number = number + 1
fileopen.close()
<|reserved_special_token_1|>
#Opens the file that the user specifies
fileopen = open(input("Please enter the name of the file that you wish to open."), 'r')
#Reads the lines within the file and determines the length of the file
lines = fileopen.readlines()
count = len(lines)
#Count is how long the file is, so number is the index values basically.
#As long as the number variable is less than the amount of lines in the file (because one must be subtracted since the index starts at 0) the
#number will be printed in front of the lines found in the file.
number = 0
while number < count:
print(number,".",lines[number])
number = number + 1
fileopen.close()
|
flexible
|
{
"blob_id": "258b28153124ce42578c9eede429354069d8a7d6",
"index": 2869,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile number < count:\n print(number, '.', lines[number])\n number = number + 1\nfileopen.close()\n",
"step-3": "fileopen = open(input(\n 'Please enter the name of the file that you wish to open.'), 'r')\nlines = fileopen.readlines()\ncount = len(lines)\nnumber = 0\nwhile number < count:\n print(number, '.', lines[number])\n number = number + 1\nfileopen.close()\n",
"step-4": "#Opens the file that the user specifies\nfileopen = open(input(\"Please enter the name of the file that you wish to open.\"), 'r')\n\n#Reads the lines within the file and determines the length of the file\nlines = fileopen.readlines()\ncount = len(lines)\n\n#Count is how long the file is, so number is the index values basically.\n#As long as the number variable is less than the amount of lines in the file (because one must be subtracted since the index starts at 0) the \n#number will be printed in front of the lines found in the file.\nnumber = 0\nwhile number < count:\n print(number,\".\",lines[number])\n number = number + 1\nfileopen.close()",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# 1-[2-3-4-5]-1
# 순열로 돌리고, 백트래킹으로 걷어내기
def DFS(idx, cost, cur_loc):
global min_cost
if min_cost < cost: return
if idx == N and arr[cur_loc][0]:
if min_cost > cost + arr[cur_loc][0]:
min_cost = cost + arr[cur_loc][0]
return
for i in range(1, N):
if way[i] or not arr[cur_loc][i] : continue
way[i] =1
DFS(idx+1, cost+arr[cur_loc][i], i)
way[i] = 0
N = int(input())
arr = [list(map(int, input().split())) for _ in range(N)]
way = [0] * N
min_cost = 100 * N
DFS(1, 0, 0)
print(min_cost)
|
normal
|
{
"blob_id": "4ff7e83c6e85a041578a8b3471cbbb7e0c2543e6",
"index": 2663,
"step-1": "<mask token>\n",
"step-2": "def DFS(idx, cost, cur_loc):\n global min_cost\n if min_cost < cost:\n return\n if idx == N and arr[cur_loc][0]:\n if min_cost > cost + arr[cur_loc][0]:\n min_cost = cost + arr[cur_loc][0]\n return\n for i in range(1, N):\n if way[i] or not arr[cur_loc][i]:\n continue\n way[i] = 1\n DFS(idx + 1, cost + arr[cur_loc][i], i)\n way[i] = 0\n\n\n<mask token>\n",
"step-3": "def DFS(idx, cost, cur_loc):\n global min_cost\n if min_cost < cost:\n return\n if idx == N and arr[cur_loc][0]:\n if min_cost > cost + arr[cur_loc][0]:\n min_cost = cost + arr[cur_loc][0]\n return\n for i in range(1, N):\n if way[i] or not arr[cur_loc][i]:\n continue\n way[i] = 1\n DFS(idx + 1, cost + arr[cur_loc][i], i)\n way[i] = 0\n\n\n<mask token>\nDFS(1, 0, 0)\nprint(min_cost)\n",
"step-4": "def DFS(idx, cost, cur_loc):\n global min_cost\n if min_cost < cost:\n return\n if idx == N and arr[cur_loc][0]:\n if min_cost > cost + arr[cur_loc][0]:\n min_cost = cost + arr[cur_loc][0]\n return\n for i in range(1, N):\n if way[i] or not arr[cur_loc][i]:\n continue\n way[i] = 1\n DFS(idx + 1, cost + arr[cur_loc][i], i)\n way[i] = 0\n\n\nN = int(input())\narr = [list(map(int, input().split())) for _ in range(N)]\nway = [0] * N\nmin_cost = 100 * N\nDFS(1, 0, 0)\nprint(min_cost)\n",
"step-5": "# 1-[2-3-4-5]-1\n# 순열로 돌리고, 백트래킹으로 걷어내기\n\ndef DFS(idx, cost, cur_loc):\n global min_cost\n if min_cost < cost: return\n if idx == N and arr[cur_loc][0]:\n if min_cost > cost + arr[cur_loc][0]:\n min_cost = cost + arr[cur_loc][0]\n return\n for i in range(1, N):\n if way[i] or not arr[cur_loc][i] : continue\n way[i] =1\n DFS(idx+1, cost+arr[cur_loc][i], i)\n way[i] = 0\n\nN = int(input())\narr = [list(map(int, input().split())) for _ in range(N)]\nway = [0] * N\nmin_cost = 100 * N\nDFS(1, 0, 0)\nprint(min_cost)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# new libraries
import ConfigParser
import logging
from time import time
from os import path
# imports from nike.py below
import smass
import helperFunctions
import clusterSMass_orig
import numpy as np
from joblib import Parallel, delayed
def getConfig(section, item, boolean=False,
userConfigFile="BMA_StellarMass_Config.ini"):
configFile = ConfigParser.ConfigParser()
configFile.read(userConfigFile)
# if config item not found, raise log warning
if (not configFile.has_option(section, item)):
msg = '{item} from [{setion}] NOT found in config file: {userConfigFile}!'.format(
item=item, section=section,
userConfigFile=userConfigFile)
if (section != 'Log'):
logging.warning(msg)
else:
print msg
return ""
# else save item value (debug)
msg = '{item}: {value}'.format(
item=item, value=configFile.get(section, item))
if (section != 'Log'):
logging.debug(msg)
else:
print msg
if (not boolean):
return configFile.get(section, item)
else:
return configFile.getboolean(section, item)
def isOperationSet(operation,section="Operations"):
return getConfig(boolean=True, section=section,
item=operation)
def createLog():
logLevel = getConfig("Log","level")
logFileName = getConfig("Log","logFile")
myFormat = '[%(asctime)s] [%(levelname)s]\t%(module)s - %(message)s'
if logLevel == 'DEBUG':
logging.basicConfig(
filename=logFileName,
level=logging.DEBUG,
format=myFormat)
else:
logging.basicConfig(
filename=logFileName,
level=logging.INFO,
format=myFormat)
def extractTarGz(tarFileName, path):
import tarfile
tar = tarfile.open(tarFileName, "r:gz")
tar.extractall(path=inputPath)
tar.close()
def getInputPath():
inputPath = getConfig("Paths","inputPath")
# if a inputPath is not set, go after the .tar.gz file
if (not inputPath):
# if tarFile doesn't exist, abort
tarName = getConfig("Files","tarFile")
if (not tarName or not path.isfile(tarName) or
not tarName.endswith("tar.gz")):
return ""
# defining inputPath to uncompress file
inputPath = "./simha_miles_Nov2016/"
extractTarGz(tarFileName=tarName, path=inputPath)
return inputPath
def getStellarMassOutPrefix():
stellarMassOutPrefix = getConfig("Files","stellarMassOutPrefix")
if not stellarMassOutPrefix:
logging.critical("Can't continue without stellarMassOutPrefix defined! Exiting.")
exit()
return stellarMassOutPrefix
def combineFits():
from combineCat import combineBMAStellarMassOutput
stellarMassOutPrefix = getStellarMassOutPrefix()
combineBMAStellarMassOutput(stellarMassOutPrefix)
def computeStellarMass(batch, memPerJob):
# For running the stellar masses (takes the longest)
batchIndex = batch + memPerJob
job = int(batchIndex / memPerJob)
logging.debug('Starting computeStellarMass() with batch = {b}; job = {j}.'.format(
b = batch, j = job))
stellarMassOutFile = getConfig("Files","stellarMassOutPrefix") + "{:0>5d}.fits".format(job)
inPath = getInputPath()
membersInFile = getConfig("Files","membersInputFile")
if (not inPath or not membersInFile):
logging.critical("Can't continue without either inputPath or membersInputFile defined! Exiting.")
exit()
inputDataDict = helperFunctions.read_afterburner(membersInFile, batch, batchIndex)
smass.calc(inputDataDict, outfile=stellarMassOutFile, indir=inPath, lib="miles")
logging.debug('Returning from computeStellarMass() with batch = {b}; job = {j}.'.format(
b = batch, j = job))
def computeClusterStellarMass():
stellarMassFile = getConfig("Files","stellarMassOutPrefix") + 'full.fits'
clusterOutFile = getConfig("Files","clusterStellarMassOutFile")
logging.info('Computing cluster stellar mass.')
clusterSMass_orig.haloStellarMass(filename = stellarMassFile, outfile = clusterOutFile)
def parallelComputeStellarMass(batchStart=0,
batchMax=25936, nJobs=100, nCores=20):
# nJobs is normally = 100
batchesList = np.linspace(batchStart, batchMax, nJobs, endpoint=False, dtype=int)
logging.info('Calling parallelism inside parallelComputeStellarMass().')
Parallel(n_jobs=nCores)(delayed(computeStellarMass)
(batch, (batchMax - batchStart) / nJobs)
for batch in batchesList)
# generate concatenated fits file
logging.info('Combining fits.')
combineFits()
def main():
# start logging
createLog()
logging.info('Starting BMA Stellar Masses program.')
# get initial time
total_t0 = time()
# check and parallel compute stellar mass,
# if it is the case
if (isOperationSet(operation="stellarMass")):
logging.info('Starting parallel stellar masses operation.')
section = "Parallel"
stellarMass_t0 = time()
# get parallel information
batchStart = int(getConfig(section, "batchStart"))
batchMax = int(getConfig(section, "batchMax"))
nJobs = int(getConfig(section, "nJobs"))
nCores = int(getConfig(section, "nCores"))
# call function to parallel compute
parallelComputeStellarMass(batchStart=batchStart,
batchMax=batchMax, nJobs=nJobs, nCores=nCores)
# save time to compute stellar mass
stellarMassTime = time() - stellarMass_t0
stellarMassMsg = "Stellar Mass (parallel) time: {}s".format(stellarMassTime)
logging.info(stellarMassMsg)
# check and compute cluster stellar mass,
# if it is the case
if (isOperationSet(operation="clusterStellarMass")):
logging.info('Starting cluster stellar mass operation.')
clusterStellarMassTime_t0 = time()
computeClusterStellarMass()
# save time to compute cluster stellar mass
clusterStellarMassTime = time() - clusterStellarMassTime_t0
clusterStellarMassMsg = "Cluster Stellar Mass time: {}s".format(clusterStellarMassTime)
logging.info(clusterStellarMassMsg)
# save total computing time
totalTime = time() - total_t0
totalTimeMsg = "Total time: {}s".format(totalTime)
logging.info(totalTimeMsg)
logging.info('All done.')
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "ae71cbd17ec04125354d5aac1cf800f2dffa3e04",
"index": 3314,
"step-1": "# new libraries\nimport ConfigParser\nimport logging\nfrom time import time\nfrom os import path\n# imports from nike.py below\nimport smass\nimport helperFunctions\nimport clusterSMass_orig\nimport numpy as np\nfrom joblib import Parallel, delayed\n\n\ndef getConfig(section, item, boolean=False,\n\t\tuserConfigFile=\"BMA_StellarMass_Config.ini\"):\n\n\tconfigFile = ConfigParser.ConfigParser()\n\tconfigFile.read(userConfigFile)\n\n\t# if config item not found, raise log warning\n\tif (not configFile.has_option(section, item)):\n\t\tmsg = '{item} from [{setion}] NOT found in config file: {userConfigFile}!'.format(\n\t\t\titem=item, section=section,\n\t\t\tuserConfigFile=userConfigFile)\n\t\tif (section != 'Log'):\n\t\t\tlogging.warning(msg)\n\t\telse:\n\t\t\tprint msg\n\t\treturn \"\"\n\n\t# else save item value (debug)\n\tmsg = '{item}: {value}'.format(\n\t\titem=item, value=configFile.get(section, item))\n\tif (section != 'Log'):\n\t\tlogging.debug(msg)\n\telse:\n\t\tprint msg\n\n\tif (not boolean):\n\t\treturn configFile.get(section, item)\n\n\telse:\n\t\treturn configFile.getboolean(section, item)\n\n\ndef isOperationSet(operation,section=\"Operations\"):\n\treturn getConfig(boolean=True, section=section,\n\t\titem=operation)\n\n\ndef createLog():\n\tlogLevel = getConfig(\"Log\",\"level\")\n\tlogFileName = getConfig(\"Log\",\"logFile\")\n\tmyFormat = '[%(asctime)s] [%(levelname)s]\\t%(module)s - %(message)s'\n\tif logLevel == 'DEBUG':\n\t\tlogging.basicConfig(\n\t\t\tfilename=logFileName,\n\t\t\tlevel=logging.DEBUG,\n\t\t\tformat=myFormat)\n\telse:\n\t\tlogging.basicConfig(\n\t\t\tfilename=logFileName,\n\t\t\tlevel=logging.INFO,\n\t\t\tformat=myFormat)\n\n\ndef extractTarGz(tarFileName, path):\n\timport tarfile\n\ttar = tarfile.open(tarFileName, \"r:gz\")\n\ttar.extractall(path=inputPath)\n\ttar.close()\n\n\ndef getInputPath():\n\tinputPath = getConfig(\"Paths\",\"inputPath\")\n\t# if a inputPath is not set, go after the .tar.gz file\n\tif (not inputPath):\n\n\t\t# if tarFile doesn't exist, abort\n\t\ttarName = getConfig(\"Files\",\"tarFile\")\n\n\t\tif (not tarName or not path.isfile(tarName) or\n\t\t\t\tnot tarName.endswith(\"tar.gz\")):\n\n\t\t\treturn \"\"\n\n\t\t# defining inputPath to uncompress file\n\t\tinputPath = \"./simha_miles_Nov2016/\"\n\t\textractTarGz(tarFileName=tarName, path=inputPath)\n\n\treturn inputPath\n\n\ndef getStellarMassOutPrefix():\n\tstellarMassOutPrefix = getConfig(\"Files\",\"stellarMassOutPrefix\")\n\n\tif not stellarMassOutPrefix:\n\t\tlogging.critical(\"Can't continue without stellarMassOutPrefix defined! Exiting.\")\n\t\texit()\n\n\treturn stellarMassOutPrefix\n\n\ndef combineFits():\n\tfrom combineCat import combineBMAStellarMassOutput\n\tstellarMassOutPrefix = getStellarMassOutPrefix()\n\tcombineBMAStellarMassOutput(stellarMassOutPrefix)\n\n\ndef computeStellarMass(batch, memPerJob):\n\t# For running the stellar masses (takes the longest)\n\tbatchIndex = batch + memPerJob\n\tjob = int(batchIndex / memPerJob)\n\n\tlogging.debug('Starting computeStellarMass() with batch = {b}; job = {j}.'.format(\n\t\tb = batch, j = job))\n\n\tstellarMassOutFile = getConfig(\"Files\",\"stellarMassOutPrefix\") + \"{:0>5d}.fits\".format(job)\n\n\tinPath = getInputPath()\n\tmembersInFile = getConfig(\"Files\",\"membersInputFile\")\n\n\tif (not inPath or not membersInFile):\n\t\tlogging.critical(\"Can't continue without either inputPath or membersInputFile defined! Exiting.\")\n\t\texit()\n\n\tinputDataDict = helperFunctions.read_afterburner(membersInFile, batch, batchIndex)\n\n\tsmass.calc(inputDataDict, outfile=stellarMassOutFile, indir=inPath, lib=\"miles\")\n\n\tlogging.debug('Returning from computeStellarMass() with batch = {b}; job = {j}.'.format(\n\t\tb = batch, j = job))\n\n\ndef computeClusterStellarMass():\n\tstellarMassFile = getConfig(\"Files\",\"stellarMassOutPrefix\") + 'full.fits'\t\n\tclusterOutFile = getConfig(\"Files\",\"clusterStellarMassOutFile\")\n\n\tlogging.info('Computing cluster stellar mass.')\n\tclusterSMass_orig.haloStellarMass(filename = stellarMassFile, outfile = clusterOutFile)\n\n\ndef parallelComputeStellarMass(batchStart=0,\n\t\tbatchMax=25936, nJobs=100, nCores=20):\n\t\t# nJobs is normally = 100\n\tbatchesList = np.linspace(batchStart, batchMax, nJobs, endpoint=False, dtype=int)\n\n\tlogging.info('Calling parallelism inside parallelComputeStellarMass().')\n\tParallel(n_jobs=nCores)(delayed(computeStellarMass)\n\t\t(batch, (batchMax - batchStart) / nJobs) \n\t\tfor batch in batchesList)\n\n\t# generate concatenated fits file\n\tlogging.info('Combining fits.')\n\tcombineFits()\n\n\ndef main():\n\t# start logging\n\tcreateLog()\n\n\tlogging.info('Starting BMA Stellar Masses program.')\n\n\t# get initial time\n\ttotal_t0 = time()\n\n\t# check and parallel compute stellar mass,\n\t#\tif it is the case\n\tif (isOperationSet(operation=\"stellarMass\")):\n\t\tlogging.info('Starting parallel stellar masses operation.')\n\t\tsection = \"Parallel\"\n\n\t\tstellarMass_t0 = time()\n\t\t# get parallel information\n\t\tbatchStart = int(getConfig(section, \"batchStart\"))\n\t\tbatchMax = int(getConfig(section, \"batchMax\"))\n\t\tnJobs \t = int(getConfig(section, \"nJobs\"))\n\t\tnCores \t = int(getConfig(section, \"nCores\"))\n\n\t\t# call function to parallel compute\n\t\tparallelComputeStellarMass(batchStart=batchStart,\n\t\t\tbatchMax=batchMax, nJobs=nJobs, nCores=nCores)\n\n\t\t# save time to compute stellar mass\n\t\tstellarMassTime = time() - stellarMass_t0\n\t\tstellarMassMsg = \"Stellar Mass (parallel) time: {}s\".format(stellarMassTime)\n\t\tlogging.info(stellarMassMsg)\n\n\t# check and compute cluster stellar mass,\n\t#\tif it is the case\n\tif (isOperationSet(operation=\"clusterStellarMass\")):\n\t\tlogging.info('Starting cluster stellar mass operation.')\n\t\tclusterStellarMassTime_t0 = time()\n\t\tcomputeClusterStellarMass()\n\n\t\t# save time to compute cluster stellar mass\n\t\tclusterStellarMassTime = time() - clusterStellarMassTime_t0\n\t\tclusterStellarMassMsg = \"Cluster Stellar Mass time: {}s\".format(clusterStellarMassTime)\n\t\tlogging.info(clusterStellarMassMsg)\n\n\t# save total computing time\n\ttotalTime = time() - total_t0\n\ttotalTimeMsg = \"Total time: {}s\".format(totalTime)\n\tlogging.info(totalTimeMsg)\n\tlogging.info('All done.')\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.