code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
a = (5, 1, 3, 5, 3, 1, 0, 9, 5, 3, 8, 6, 5, 7)
b = []
for index, elements in enumerate (a):
if elements == 5:
b.append(index)
print(b)
|
normal
|
{
"blob_id": "d7876a078af8572e44b4eb16f3ec0898db73724d",
"index": 2118,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor index, elements in enumerate(a):\n if elements == 5:\n b.append(index)\nprint(b)\n",
"step-3": "a = 5, 1, 3, 5, 3, 1, 0, 9, 5, 3, 8, 6, 5, 7\nb = []\nfor index, elements in enumerate(a):\n if elements == 5:\n b.append(index)\nprint(b)\n",
"step-4": "a = (5, 1, 3, 5, 3, 1, 0, 9, 5, 3, 8, 6, 5, 7)\r\nb = []\r\nfor index, elements in enumerate (a):\r\n if elements == 5:\r\n b.append(index)\r\nprint(b)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def _split_journal_attrs(attrs):
if attrs:
return [t.text.replace(':', '').strip().split('\n') for t in [k for
k in attrs if isinstance(k, bs4.element.Tag)]]
return []
def _get_title_history(history_attrs):
all_td = []
if history_attrs:
for h in history_attrs:
all_td.extend(h.find_all('td'))
if len(all_td) > 0:
return '#'.join([''.join([a.strip() for a in k.text.split('\n')]) for
k in all_td if isinstance(k, bs4.element.Tag)])
return ''
def _get_pair_key_values(splitted_attrs, prefix: str):
tmp_dict = {}
for j in splitted_attrs:
tmp_dict[prefix + j[0].replace('\t', ' ')] = '#'.join([k.strip().
replace('\t', ' ').replace('#', ' ') for k in j[1:] if k.strip(
) != ''])
return tmp_dict
def html2dict(path_zip_file: str):
"""
Open, reads and converts a zipped html into a dict.
:param path_zip_file: path of the zip file
:return: a dict where each key is the profile id and the value is its key-value pairs (attrs)
"""
profile_id = path_zip_file.split('/')[-1].split('.')[0]
inner_html_path = 'data/ulrich/html/' + profile_id + '.html'
html_content = zipfile.ZipFile(path_zip_file).open(inner_html_path).read()
parsed_data = [profile_id]
soupped_html = BeautifulSoup(html_content, 'html.parser')
title_details = soupped_html.find('div', {'id': 'resultPane'})
basic_description_attrs = _find_all_tr_pairs('basicDescriptionContainer',
title_details, profile_id)
title_history_attrs = _find_all_tr_pairs('titleHistoryContainer',
title_details, profile_id)
bd_splitted = _split_journal_attrs(basic_description_attrs)
dict_bd = _get_pair_key_values(bd_splitted, 'bd_')
title_history = _get_title_history(title_history_attrs)
for k in sorted(DEFAULT_ATTRS):
parsed_data.append(dict_bd.get(k, ''))
parsed_data.append(title_history)
return parsed_data
def save_tsv_file(parsed_data):
"""
Save a parsed journal to a tsv file
:param parsed_data: a list of dictionaries where the only main key is a profile_id and its value is the pairs of journal's attributes
"""
result_file.write('\t'.join(parsed_data) + '\n')
def save_into_html_file(path_html_file: str, response):
"""
Receives a response (in text format).
Saves the document into a html file.
"""
html_file = open(path_html_file, 'w')
html_file.writelines(response)
html_file.close()
with zipfile.ZipFile(path_html_file.replace('.html', '.zip'), 'w') as zf:
zf.write(path_html_file, compress_type=zipfile.ZIP_DEFLATED)
zf.close()
os.remove(path_html_file)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _find_all_tr_pairs(key: str, title_details, profile_id):
try:
return title_details.find('div', {'id': key}).find('table', {
'class': 'resultsTable'}).find_all('tr')
except AttributeError:
logging.warning('ID %s (KEY) %s doest not have resultsTable' % (
profile_id, key))
def _split_journal_attrs(attrs):
if attrs:
return [t.text.replace(':', '').strip().split('\n') for t in [k for
k in attrs if isinstance(k, bs4.element.Tag)]]
return []
def _get_title_history(history_attrs):
all_td = []
if history_attrs:
for h in history_attrs:
all_td.extend(h.find_all('td'))
if len(all_td) > 0:
return '#'.join([''.join([a.strip() for a in k.text.split('\n')]) for
k in all_td if isinstance(k, bs4.element.Tag)])
return ''
def _get_pair_key_values(splitted_attrs, prefix: str):
tmp_dict = {}
for j in splitted_attrs:
tmp_dict[prefix + j[0].replace('\t', ' ')] = '#'.join([k.strip().
replace('\t', ' ').replace('#', ' ') for k in j[1:] if k.strip(
) != ''])
return tmp_dict
def html2dict(path_zip_file: str):
"""
Open, reads and converts a zipped html into a dict.
:param path_zip_file: path of the zip file
:return: a dict where each key is the profile id and the value is its key-value pairs (attrs)
"""
profile_id = path_zip_file.split('/')[-1].split('.')[0]
inner_html_path = 'data/ulrich/html/' + profile_id + '.html'
html_content = zipfile.ZipFile(path_zip_file).open(inner_html_path).read()
parsed_data = [profile_id]
soupped_html = BeautifulSoup(html_content, 'html.parser')
title_details = soupped_html.find('div', {'id': 'resultPane'})
basic_description_attrs = _find_all_tr_pairs('basicDescriptionContainer',
title_details, profile_id)
title_history_attrs = _find_all_tr_pairs('titleHistoryContainer',
title_details, profile_id)
bd_splitted = _split_journal_attrs(basic_description_attrs)
dict_bd = _get_pair_key_values(bd_splitted, 'bd_')
title_history = _get_title_history(title_history_attrs)
for k in sorted(DEFAULT_ATTRS):
parsed_data.append(dict_bd.get(k, ''))
parsed_data.append(title_history)
return parsed_data
def save_tsv_file(parsed_data):
"""
Save a parsed journal to a tsv file
:param parsed_data: a list of dictionaries where the only main key is a profile_id and its value is the pairs of journal's attributes
"""
result_file.write('\t'.join(parsed_data) + '\n')
def save_into_html_file(path_html_file: str, response):
"""
Receives a response (in text format).
Saves the document into a html file.
"""
html_file = open(path_html_file, 'w')
html_file.writelines(response)
html_file.close()
with zipfile.ZipFile(path_html_file.replace('.html', '.zip'), 'w') as zf:
zf.write(path_html_file, compress_type=zipfile.ZIP_DEFLATED)
zf.close()
os.remove(path_html_file)
async def fetch(url, session):
"""
Fetches the url.
Calls the method save_into_html_file with the response as a parameter (in text format).
"""
try:
async with session.get(url) as response:
profile_id = url.split('/')[-1]
print('COLLECTING %s' % profile_id)
for attempt in range(DEFAULT_MAX_ATTEMPTS):
try:
if response.status == 200:
response = await response.text(errors='ignore')
save_into_html_file(DEFAULT_DIR_HTML + profile_id +
'.html', response)
logging.info('COLLECTED: %s' % profile_id)
break
elif response.status == 500 and attempt == DEFAULT_MAX_ATTEMPTS:
logging.info('RESPONSE_ERROR_500: %s' % profile_id)
elif response.status == 404:
logging.info('RESPONSE_ERROR_404: %s' % profile_id)
except ServerDisconnectedError:
logging.info('SERVER_DISCONNECTED_ERROR: %s' % profile_id)
except TimeoutError:
logging.info('TIMEOUT_ERROR: %s' % profile_id)
except ContentTypeError:
logging.info('CONTENT_TYPE_ERROR: %s' % profile_id)
except TimeoutError:
logging.info('GENERALIZED_TIMEOUT_ERROR')
except ClientConnectionError:
logging.info('GENERALIZED_CLIENT_CONNECTION_ERROR')
except ServerDisconnectedError:
logging.info('GENERALIZED_SERVER_DISCONNECTED_ERROR')
except ContentTypeError:
logging.info('GENERALIZED_CONTENT_TYPE_ERROR')
async def bound_fetch(sem, url, session):
"""
Limits the collecting task to a semaphore.
"""
async with sem:
await fetch(url, session)
async def run():
"""
Creates tasks to get the html file with respect to a list composed by htmls.
"""
sem = asyncio.Semaphore(DEFAULT_SEMAPHORE_LIMIT)
tasks = []
async with ClientSession() as session:
for u in [ROOT_URL.format(jid) for jid in DEFAULT_RANGE_IDS]:
task = asyncio.ensure_future(bound_fetch(sem, u, session))
tasks.append(task)
responses = asyncio.gather(*tasks)
await responses
if __name__ == '__main__':
logging.basicConfig(filename='ulrich.log', level=logging.INFO, format=
'%(asctime)s - %(levelname)s - %(message)s')
MODE = sys.argv[1]
DIR_HTML = sys.argv[2]
if MODE == 'collect':
DEFAULT_DIR_HTML = DIR_HTML
os.makedirs(DEFAULT_DIR_HTML, exist_ok=True)
if len(sys.argv) == 4:
start_id = int(sys.argv[3])
DEFAULT_RANGE_IDS = itertools.chain(range(start_id,
DEFAULT_END_ID), DEFAULT_RANGE_2)
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(run())
loop.run_until_complete(future)
elif MODE == 'parse':
DEFAULT_DIR_HTML = DIR_HTML
START = int(sys.argv[3])
END = int(sys.argv[4])
if END > len(os.listdir(DEFAULT_DIR_HTML)):
END = len(os.listdir(DEFAULT_DIR_HTML))
htmls = sorted([(DEFAULT_DIR_HTML + h) for h in os.listdir(DIR_HTML)])[
START:END]
result_file = open(DEFAULT_DIR_HTML + '../' + str(START) + '.tsv', 'w')
result_file.write('\t'.join(['Profile Identifier'] + sorted(
DEFAULT_ATTRS) + ['title_history']) + '\n')
for i, h in enumerate(sorted(htmls)):
print('\r%d / %d' % (i + 1 + START, START + len(htmls)), end='')
parsed = html2dict(h)
save_tsv_file(parsed)
result_file.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ROOT_URL = 'https://ulrichsweb.serialssolutions.com/titleDetails/{}'
DEFAULT_START_ID = 12515
DEFAULT_END_ID = 835018
DEFAULT_RANGE_1 = range(DEFAULT_START_ID, DEFAULT_END_ID)
DEFAULT_RANGE_2 = range(15793473, 15798807)
DEFAULT_RANGE_IDS = itertools.chain(DEFAULT_RANGE_1, DEFAULT_RANGE_2)
DEFAULT_DIR_HTML = 'data/ulrich/html/'
DEFAULT_MAX_ATTEMPTS = 5
DEFAULT_MODE = 'collect'
DEFAULT_NUM_THREADS = 4
DEFAULT_SEMAPHORE_LIMIT = 2
DEFAULT_ATTRS = {'bd_Title', 'bd_ISSN', 'bd_Format', 'bd_Frequency',
'bd_Country'}
def _find_all_tr_pairs(key: str, title_details, profile_id):
try:
return title_details.find('div', {'id': key}).find('table', {
'class': 'resultsTable'}).find_all('tr')
except AttributeError:
logging.warning('ID %s (KEY) %s doest not have resultsTable' % (
profile_id, key))
def _split_journal_attrs(attrs):
if attrs:
return [t.text.replace(':', '').strip().split('\n') for t in [k for
k in attrs if isinstance(k, bs4.element.Tag)]]
return []
def _get_title_history(history_attrs):
all_td = []
if history_attrs:
for h in history_attrs:
all_td.extend(h.find_all('td'))
if len(all_td) > 0:
return '#'.join([''.join([a.strip() for a in k.text.split('\n')]) for
k in all_td if isinstance(k, bs4.element.Tag)])
return ''
def _get_pair_key_values(splitted_attrs, prefix: str):
tmp_dict = {}
for j in splitted_attrs:
tmp_dict[prefix + j[0].replace('\t', ' ')] = '#'.join([k.strip().
replace('\t', ' ').replace('#', ' ') for k in j[1:] if k.strip(
) != ''])
return tmp_dict
def html2dict(path_zip_file: str):
"""
Open, reads and converts a zipped html into a dict.
:param path_zip_file: path of the zip file
:return: a dict where each key is the profile id and the value is its key-value pairs (attrs)
"""
profile_id = path_zip_file.split('/')[-1].split('.')[0]
inner_html_path = 'data/ulrich/html/' + profile_id + '.html'
html_content = zipfile.ZipFile(path_zip_file).open(inner_html_path).read()
parsed_data = [profile_id]
soupped_html = BeautifulSoup(html_content, 'html.parser')
title_details = soupped_html.find('div', {'id': 'resultPane'})
basic_description_attrs = _find_all_tr_pairs('basicDescriptionContainer',
title_details, profile_id)
title_history_attrs = _find_all_tr_pairs('titleHistoryContainer',
title_details, profile_id)
bd_splitted = _split_journal_attrs(basic_description_attrs)
dict_bd = _get_pair_key_values(bd_splitted, 'bd_')
title_history = _get_title_history(title_history_attrs)
for k in sorted(DEFAULT_ATTRS):
parsed_data.append(dict_bd.get(k, ''))
parsed_data.append(title_history)
return parsed_data
def save_tsv_file(parsed_data):
"""
Save a parsed journal to a tsv file
:param parsed_data: a list of dictionaries where the only main key is a profile_id and its value is the pairs of journal's attributes
"""
result_file.write('\t'.join(parsed_data) + '\n')
def save_into_html_file(path_html_file: str, response):
"""
Receives a response (in text format).
Saves the document into a html file.
"""
html_file = open(path_html_file, 'w')
html_file.writelines(response)
html_file.close()
with zipfile.ZipFile(path_html_file.replace('.html', '.zip'), 'w') as zf:
zf.write(path_html_file, compress_type=zipfile.ZIP_DEFLATED)
zf.close()
os.remove(path_html_file)
async def fetch(url, session):
"""
Fetches the url.
Calls the method save_into_html_file with the response as a parameter (in text format).
"""
try:
async with session.get(url) as response:
profile_id = url.split('/')[-1]
print('COLLECTING %s' % profile_id)
for attempt in range(DEFAULT_MAX_ATTEMPTS):
try:
if response.status == 200:
response = await response.text(errors='ignore')
save_into_html_file(DEFAULT_DIR_HTML + profile_id +
'.html', response)
logging.info('COLLECTED: %s' % profile_id)
break
elif response.status == 500 and attempt == DEFAULT_MAX_ATTEMPTS:
logging.info('RESPONSE_ERROR_500: %s' % profile_id)
elif response.status == 404:
logging.info('RESPONSE_ERROR_404: %s' % profile_id)
except ServerDisconnectedError:
logging.info('SERVER_DISCONNECTED_ERROR: %s' % profile_id)
except TimeoutError:
logging.info('TIMEOUT_ERROR: %s' % profile_id)
except ContentTypeError:
logging.info('CONTENT_TYPE_ERROR: %s' % profile_id)
except TimeoutError:
logging.info('GENERALIZED_TIMEOUT_ERROR')
except ClientConnectionError:
logging.info('GENERALIZED_CLIENT_CONNECTION_ERROR')
except ServerDisconnectedError:
logging.info('GENERALIZED_SERVER_DISCONNECTED_ERROR')
except ContentTypeError:
logging.info('GENERALIZED_CONTENT_TYPE_ERROR')
async def bound_fetch(sem, url, session):
"""
Limits the collecting task to a semaphore.
"""
async with sem:
await fetch(url, session)
async def run():
"""
Creates tasks to get the html file with respect to a list composed by htmls.
"""
sem = asyncio.Semaphore(DEFAULT_SEMAPHORE_LIMIT)
tasks = []
async with ClientSession() as session:
for u in [ROOT_URL.format(jid) for jid in DEFAULT_RANGE_IDS]:
task = asyncio.ensure_future(bound_fetch(sem, u, session))
tasks.append(task)
responses = asyncio.gather(*tasks)
await responses
if __name__ == '__main__':
logging.basicConfig(filename='ulrich.log', level=logging.INFO, format=
'%(asctime)s - %(levelname)s - %(message)s')
MODE = sys.argv[1]
DIR_HTML = sys.argv[2]
if MODE == 'collect':
DEFAULT_DIR_HTML = DIR_HTML
os.makedirs(DEFAULT_DIR_HTML, exist_ok=True)
if len(sys.argv) == 4:
start_id = int(sys.argv[3])
DEFAULT_RANGE_IDS = itertools.chain(range(start_id,
DEFAULT_END_ID), DEFAULT_RANGE_2)
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(run())
loop.run_until_complete(future)
elif MODE == 'parse':
DEFAULT_DIR_HTML = DIR_HTML
START = int(sys.argv[3])
END = int(sys.argv[4])
if END > len(os.listdir(DEFAULT_DIR_HTML)):
END = len(os.listdir(DEFAULT_DIR_HTML))
htmls = sorted([(DEFAULT_DIR_HTML + h) for h in os.listdir(DIR_HTML)])[
START:END]
result_file = open(DEFAULT_DIR_HTML + '../' + str(START) + '.tsv', 'w')
result_file.write('\t'.join(['Profile Identifier'] + sorted(
DEFAULT_ATTRS) + ['title_history']) + '\n')
for i, h in enumerate(sorted(htmls)):
print('\r%d / %d' % (i + 1 + START, START + len(htmls)), end='')
parsed = html2dict(h)
save_tsv_file(parsed)
result_file.close()
<|reserved_special_token_1|>
import asyncio
import bs4
import itertools
import logging
import sys
import os
import zipfile
from asyncio import TimeoutError
from aiohttp import ClientSession, ClientConnectionError
from aiohttp.client_exceptions import ContentTypeError, ServerDisconnectedError
from bs4 import BeautifulSoup
ROOT_URL = 'https://ulrichsweb.serialssolutions.com/titleDetails/{}'
DEFAULT_START_ID = 12515
DEFAULT_END_ID = 835018
DEFAULT_RANGE_1 = range(DEFAULT_START_ID, DEFAULT_END_ID)
DEFAULT_RANGE_2 = range(15793473, 15798807)
DEFAULT_RANGE_IDS = itertools.chain(DEFAULT_RANGE_1, DEFAULT_RANGE_2)
DEFAULT_DIR_HTML = 'data/ulrich/html/'
DEFAULT_MAX_ATTEMPTS = 5
DEFAULT_MODE = 'collect'
DEFAULT_NUM_THREADS = 4
DEFAULT_SEMAPHORE_LIMIT = 2
DEFAULT_ATTRS = {'bd_Title', 'bd_ISSN', 'bd_Format', 'bd_Frequency',
'bd_Country'}
def _find_all_tr_pairs(key: str, title_details, profile_id):
try:
return title_details.find('div', {'id': key}).find('table', {
'class': 'resultsTable'}).find_all('tr')
except AttributeError:
logging.warning('ID %s (KEY) %s doest not have resultsTable' % (
profile_id, key))
def _split_journal_attrs(attrs):
if attrs:
return [t.text.replace(':', '').strip().split('\n') for t in [k for
k in attrs if isinstance(k, bs4.element.Tag)]]
return []
def _get_title_history(history_attrs):
all_td = []
if history_attrs:
for h in history_attrs:
all_td.extend(h.find_all('td'))
if len(all_td) > 0:
return '#'.join([''.join([a.strip() for a in k.text.split('\n')]) for
k in all_td if isinstance(k, bs4.element.Tag)])
return ''
def _get_pair_key_values(splitted_attrs, prefix: str):
tmp_dict = {}
for j in splitted_attrs:
tmp_dict[prefix + j[0].replace('\t', ' ')] = '#'.join([k.strip().
replace('\t', ' ').replace('#', ' ') for k in j[1:] if k.strip(
) != ''])
return tmp_dict
def html2dict(path_zip_file: str):
"""
Open, reads and converts a zipped html into a dict.
:param path_zip_file: path of the zip file
:return: a dict where each key is the profile id and the value is its key-value pairs (attrs)
"""
profile_id = path_zip_file.split('/')[-1].split('.')[0]
inner_html_path = 'data/ulrich/html/' + profile_id + '.html'
html_content = zipfile.ZipFile(path_zip_file).open(inner_html_path).read()
parsed_data = [profile_id]
soupped_html = BeautifulSoup(html_content, 'html.parser')
title_details = soupped_html.find('div', {'id': 'resultPane'})
basic_description_attrs = _find_all_tr_pairs('basicDescriptionContainer',
title_details, profile_id)
title_history_attrs = _find_all_tr_pairs('titleHistoryContainer',
title_details, profile_id)
bd_splitted = _split_journal_attrs(basic_description_attrs)
dict_bd = _get_pair_key_values(bd_splitted, 'bd_')
title_history = _get_title_history(title_history_attrs)
for k in sorted(DEFAULT_ATTRS):
parsed_data.append(dict_bd.get(k, ''))
parsed_data.append(title_history)
return parsed_data
def save_tsv_file(parsed_data):
"""
Save a parsed journal to a tsv file
:param parsed_data: a list of dictionaries where the only main key is a profile_id and its value is the pairs of journal's attributes
"""
result_file.write('\t'.join(parsed_data) + '\n')
def save_into_html_file(path_html_file: str, response):
"""
Receives a response (in text format).
Saves the document into a html file.
"""
html_file = open(path_html_file, 'w')
html_file.writelines(response)
html_file.close()
with zipfile.ZipFile(path_html_file.replace('.html', '.zip'), 'w') as zf:
zf.write(path_html_file, compress_type=zipfile.ZIP_DEFLATED)
zf.close()
os.remove(path_html_file)
async def fetch(url, session):
"""
Fetches the url.
Calls the method save_into_html_file with the response as a parameter (in text format).
"""
try:
async with session.get(url) as response:
profile_id = url.split('/')[-1]
print('COLLECTING %s' % profile_id)
for attempt in range(DEFAULT_MAX_ATTEMPTS):
try:
if response.status == 200:
response = await response.text(errors='ignore')
save_into_html_file(DEFAULT_DIR_HTML + profile_id +
'.html', response)
logging.info('COLLECTED: %s' % profile_id)
break
elif response.status == 500 and attempt == DEFAULT_MAX_ATTEMPTS:
logging.info('RESPONSE_ERROR_500: %s' % profile_id)
elif response.status == 404:
logging.info('RESPONSE_ERROR_404: %s' % profile_id)
except ServerDisconnectedError:
logging.info('SERVER_DISCONNECTED_ERROR: %s' % profile_id)
except TimeoutError:
logging.info('TIMEOUT_ERROR: %s' % profile_id)
except ContentTypeError:
logging.info('CONTENT_TYPE_ERROR: %s' % profile_id)
except TimeoutError:
logging.info('GENERALIZED_TIMEOUT_ERROR')
except ClientConnectionError:
logging.info('GENERALIZED_CLIENT_CONNECTION_ERROR')
except ServerDisconnectedError:
logging.info('GENERALIZED_SERVER_DISCONNECTED_ERROR')
except ContentTypeError:
logging.info('GENERALIZED_CONTENT_TYPE_ERROR')
async def bound_fetch(sem, url, session):
"""
Limits the collecting task to a semaphore.
"""
async with sem:
await fetch(url, session)
async def run():
"""
Creates tasks to get the html file with respect to a list composed by htmls.
"""
sem = asyncio.Semaphore(DEFAULT_SEMAPHORE_LIMIT)
tasks = []
async with ClientSession() as session:
for u in [ROOT_URL.format(jid) for jid in DEFAULT_RANGE_IDS]:
task = asyncio.ensure_future(bound_fetch(sem, u, session))
tasks.append(task)
responses = asyncio.gather(*tasks)
await responses
if __name__ == '__main__':
logging.basicConfig(filename='ulrich.log', level=logging.INFO, format=
'%(asctime)s - %(levelname)s - %(message)s')
MODE = sys.argv[1]
DIR_HTML = sys.argv[2]
if MODE == 'collect':
DEFAULT_DIR_HTML = DIR_HTML
os.makedirs(DEFAULT_DIR_HTML, exist_ok=True)
if len(sys.argv) == 4:
start_id = int(sys.argv[3])
DEFAULT_RANGE_IDS = itertools.chain(range(start_id,
DEFAULT_END_ID), DEFAULT_RANGE_2)
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(run())
loop.run_until_complete(future)
elif MODE == 'parse':
DEFAULT_DIR_HTML = DIR_HTML
START = int(sys.argv[3])
END = int(sys.argv[4])
if END > len(os.listdir(DEFAULT_DIR_HTML)):
END = len(os.listdir(DEFAULT_DIR_HTML))
htmls = sorted([(DEFAULT_DIR_HTML + h) for h in os.listdir(DIR_HTML)])[
START:END]
result_file = open(DEFAULT_DIR_HTML + '../' + str(START) + '.tsv', 'w')
result_file.write('\t'.join(['Profile Identifier'] + sorted(
DEFAULT_ATTRS) + ['title_history']) + '\n')
for i, h in enumerate(sorted(htmls)):
print('\r%d / %d' % (i + 1 + START, START + len(htmls)), end='')
parsed = html2dict(h)
save_tsv_file(parsed)
result_file.close()
<|reserved_special_token_1|>
#!/usr/bin/env python3
import asyncio
import bs4
import itertools
import logging
import sys
import os
import zipfile
from asyncio import TimeoutError
from aiohttp import ClientSession, ClientConnectionError
from aiohttp.client_exceptions import ContentTypeError, ServerDisconnectedError
from bs4 import BeautifulSoup
ROOT_URL = 'https://ulrichsweb.serialssolutions.com/titleDetails/{}'
DEFAULT_START_ID = 12515
DEFAULT_END_ID = 835018
DEFAULT_RANGE_1 = range(DEFAULT_START_ID, DEFAULT_END_ID)
DEFAULT_RANGE_2 = range(15793473, 15798807)
DEFAULT_RANGE_IDS = itertools.chain(DEFAULT_RANGE_1, DEFAULT_RANGE_2)
DEFAULT_DIR_HTML = 'data/ulrich/html/'
DEFAULT_MAX_ATTEMPTS = 5
DEFAULT_MODE = 'collect'
DEFAULT_NUM_THREADS = 4
DEFAULT_SEMAPHORE_LIMIT = 2
DEFAULT_ATTRS = {'bd_Title', 'bd_ISSN', 'bd_Format', 'bd_Frequency', 'bd_Country'}
def _find_all_tr_pairs(key: str, title_details, profile_id):
try:
return title_details.find('div', {'id': key}).find('table', {'class': 'resultsTable'}).find_all('tr')
except AttributeError:
logging.warning('ID %s (KEY) %s doest not have resultsTable' % (profile_id, key))
def _split_journal_attrs(attrs):
if attrs:
return [t.text.replace(':', '').strip().split('\n') for t in
[k for k in attrs if isinstance(k, bs4.element.Tag)]]
return []
def _get_title_history(history_attrs):
all_td = []
if history_attrs:
for h in history_attrs:
all_td.extend(h.find_all('td'))
if len(all_td) > 0:
return '#'.join([''.join([a.strip() for a in k.text.split('\n')]) for k in all_td if isinstance(k, bs4.element.Tag)])
return ''
def _get_pair_key_values(splitted_attrs, prefix: str):
tmp_dict = {}
for j in splitted_attrs:
tmp_dict[prefix + j[0].replace('\t', ' ')] = '#'.join(
[k.strip().replace('\t', ' ').replace('#', ' ') for k in j[1:] if k.strip() != ''])
return tmp_dict
def html2dict(path_zip_file: str):
"""
Open, reads and converts a zipped html into a dict.
:param path_zip_file: path of the zip file
:return: a dict where each key is the profile id and the value is its key-value pairs (attrs)
"""
profile_id = path_zip_file.split('/')[-1].split('.')[0]
inner_html_path = 'data/ulrich/html/' + profile_id + '.html'
html_content = zipfile.ZipFile(path_zip_file).open(inner_html_path).read()
parsed_data = [profile_id]
soupped_html = BeautifulSoup(html_content, 'html.parser')
title_details = soupped_html.find('div', {'id': 'resultPane'})
basic_description_attrs = _find_all_tr_pairs('basicDescriptionContainer', title_details, profile_id)
title_history_attrs = _find_all_tr_pairs('titleHistoryContainer', title_details, profile_id)
bd_splitted = _split_journal_attrs(basic_description_attrs)
dict_bd = _get_pair_key_values(bd_splitted, 'bd_')
title_history = _get_title_history(title_history_attrs)
for k in sorted(DEFAULT_ATTRS):
parsed_data.append(dict_bd.get(k, ''))
parsed_data.append(title_history)
return parsed_data
def save_tsv_file(parsed_data):
"""
Save a parsed journal to a tsv file
:param parsed_data: a list of dictionaries where the only main key is a profile_id and its value is the pairs of journal's attributes
"""
result_file.write('\t'.join(parsed_data) + '\n')
def save_into_html_file(path_html_file: str, response):
"""
Receives a response (in text format).
Saves the document into a html file.
"""
html_file = open(path_html_file, 'w')
html_file.writelines(response)
html_file.close()
with zipfile.ZipFile(path_html_file.replace('.html', '.zip'), 'w') as zf:
zf.write(path_html_file, compress_type=zipfile.ZIP_DEFLATED)
zf.close()
os.remove(path_html_file)
async def fetch(url, session):
"""
Fetches the url.
Calls the method save_into_html_file with the response as a parameter (in text format).
"""
try:
async with session.get(url) as response:
profile_id = url.split('/')[-1]
print('COLLECTING %s' % profile_id)
for attempt in range(DEFAULT_MAX_ATTEMPTS):
try:
if response.status == 200:
response = await response.text(errors='ignore')
save_into_html_file(DEFAULT_DIR_HTML + profile_id + '.html', response)
logging.info('COLLECTED: %s' % profile_id)
break
elif response.status == 500 and attempt == DEFAULT_MAX_ATTEMPTS:
logging.info('RESPONSE_ERROR_500: %s' % profile_id)
elif response.status == 404:
logging.info('RESPONSE_ERROR_404: %s' % profile_id)
except ServerDisconnectedError:
logging.info('SERVER_DISCONNECTED_ERROR: %s' % profile_id)
except TimeoutError:
logging.info('TIMEOUT_ERROR: %s' % profile_id)
except ContentTypeError:
logging.info('CONTENT_TYPE_ERROR: %s' % profile_id)
except TimeoutError:
logging.info('GENERALIZED_TIMEOUT_ERROR')
except ClientConnectionError:
logging.info('GENERALIZED_CLIENT_CONNECTION_ERROR')
except ServerDisconnectedError:
logging.info('GENERALIZED_SERVER_DISCONNECTED_ERROR')
except ContentTypeError:
logging.info('GENERALIZED_CONTENT_TYPE_ERROR')
async def bound_fetch(sem, url, session):
"""
Limits the collecting task to a semaphore.
"""
async with sem:
await fetch(url, session)
async def run():
"""
Creates tasks to get the html file with respect to a list composed by htmls.
"""
sem = asyncio.Semaphore(DEFAULT_SEMAPHORE_LIMIT)
tasks = []
async with ClientSession() as session:
for u in [ROOT_URL.format(jid) for jid in DEFAULT_RANGE_IDS]:
task = asyncio.ensure_future(bound_fetch(sem, u, session))
tasks.append(task)
responses = asyncio.gather(*tasks)
await responses
if __name__ == "__main__":
logging.basicConfig(filename='ulrich.log', level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
MODE = sys.argv[1]
DIR_HTML = sys.argv[2]
if MODE == 'collect':
DEFAULT_DIR_HTML = DIR_HTML
os.makedirs(DEFAULT_DIR_HTML, exist_ok=True)
if len(sys.argv) == 4:
start_id = int(sys.argv[3])
DEFAULT_RANGE_IDS = itertools.chain(range(start_id, DEFAULT_END_ID), DEFAULT_RANGE_2)
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(run())
loop.run_until_complete(future)
elif MODE == 'parse':
DEFAULT_DIR_HTML = DIR_HTML
START = int(sys.argv[3])
END = int(sys.argv[4])
if END > len(os.listdir(DEFAULT_DIR_HTML)):
END = len(os.listdir(DEFAULT_DIR_HTML))
htmls = sorted([DEFAULT_DIR_HTML + h for h in os.listdir(DIR_HTML)])[START:END]
result_file = open(DEFAULT_DIR_HTML + '../' + str(START) + '.tsv', 'w')
result_file.write('\t'.join(['Profile Identifier'] + sorted(DEFAULT_ATTRS) + ['title_history']) + '\n')
for i, h in enumerate(sorted(htmls)):
print('\r%d / %d' % (i + 1 + START, START + len(htmls)), end='')
parsed = html2dict(h)
save_tsv_file(parsed)
result_file.close()
|
flexible
|
{
"blob_id": "002f65fd77ce5043d1a0495ed13c15e3b4d2fb76",
"index": 7244,
"step-1": "<mask token>\n\n\ndef _split_journal_attrs(attrs):\n if attrs:\n return [t.text.replace(':', '').strip().split('\\n') for t in [k for\n k in attrs if isinstance(k, bs4.element.Tag)]]\n return []\n\n\ndef _get_title_history(history_attrs):\n all_td = []\n if history_attrs:\n for h in history_attrs:\n all_td.extend(h.find_all('td'))\n if len(all_td) > 0:\n return '#'.join([''.join([a.strip() for a in k.text.split('\\n')]) for\n k in all_td if isinstance(k, bs4.element.Tag)])\n return ''\n\n\ndef _get_pair_key_values(splitted_attrs, prefix: str):\n tmp_dict = {}\n for j in splitted_attrs:\n tmp_dict[prefix + j[0].replace('\\t', ' ')] = '#'.join([k.strip().\n replace('\\t', ' ').replace('#', ' ') for k in j[1:] if k.strip(\n ) != ''])\n return tmp_dict\n\n\ndef html2dict(path_zip_file: str):\n \"\"\"\n Open, reads and converts a zipped html into a dict.\n :param path_zip_file: path of the zip file\n :return: a dict where each key is the profile id and the value is its key-value pairs (attrs)\n \"\"\"\n profile_id = path_zip_file.split('/')[-1].split('.')[0]\n inner_html_path = 'data/ulrich/html/' + profile_id + '.html'\n html_content = zipfile.ZipFile(path_zip_file).open(inner_html_path).read()\n parsed_data = [profile_id]\n soupped_html = BeautifulSoup(html_content, 'html.parser')\n title_details = soupped_html.find('div', {'id': 'resultPane'})\n basic_description_attrs = _find_all_tr_pairs('basicDescriptionContainer',\n title_details, profile_id)\n title_history_attrs = _find_all_tr_pairs('titleHistoryContainer',\n title_details, profile_id)\n bd_splitted = _split_journal_attrs(basic_description_attrs)\n dict_bd = _get_pair_key_values(bd_splitted, 'bd_')\n title_history = _get_title_history(title_history_attrs)\n for k in sorted(DEFAULT_ATTRS):\n parsed_data.append(dict_bd.get(k, ''))\n parsed_data.append(title_history)\n return parsed_data\n\n\ndef save_tsv_file(parsed_data):\n \"\"\"\n Save a parsed journal to a tsv file\n :param parsed_data: a list of dictionaries where the only main key is a profile_id and its value is the pairs of journal's attributes\n \"\"\"\n result_file.write('\\t'.join(parsed_data) + '\\n')\n\n\ndef save_into_html_file(path_html_file: str, response):\n \"\"\"\n Receives a response (in text format).\n Saves the document into a html file.\n \"\"\"\n html_file = open(path_html_file, 'w')\n html_file.writelines(response)\n html_file.close()\n with zipfile.ZipFile(path_html_file.replace('.html', '.zip'), 'w') as zf:\n zf.write(path_html_file, compress_type=zipfile.ZIP_DEFLATED)\n zf.close()\n os.remove(path_html_file)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _find_all_tr_pairs(key: str, title_details, profile_id):\n try:\n return title_details.find('div', {'id': key}).find('table', {\n 'class': 'resultsTable'}).find_all('tr')\n except AttributeError:\n logging.warning('ID %s (KEY) %s doest not have resultsTable' % (\n profile_id, key))\n\n\ndef _split_journal_attrs(attrs):\n if attrs:\n return [t.text.replace(':', '').strip().split('\\n') for t in [k for\n k in attrs if isinstance(k, bs4.element.Tag)]]\n return []\n\n\ndef _get_title_history(history_attrs):\n all_td = []\n if history_attrs:\n for h in history_attrs:\n all_td.extend(h.find_all('td'))\n if len(all_td) > 0:\n return '#'.join([''.join([a.strip() for a in k.text.split('\\n')]) for\n k in all_td if isinstance(k, bs4.element.Tag)])\n return ''\n\n\ndef _get_pair_key_values(splitted_attrs, prefix: str):\n tmp_dict = {}\n for j in splitted_attrs:\n tmp_dict[prefix + j[0].replace('\\t', ' ')] = '#'.join([k.strip().\n replace('\\t', ' ').replace('#', ' ') for k in j[1:] if k.strip(\n ) != ''])\n return tmp_dict\n\n\ndef html2dict(path_zip_file: str):\n \"\"\"\n Open, reads and converts a zipped html into a dict.\n :param path_zip_file: path of the zip file\n :return: a dict where each key is the profile id and the value is its key-value pairs (attrs)\n \"\"\"\n profile_id = path_zip_file.split('/')[-1].split('.')[0]\n inner_html_path = 'data/ulrich/html/' + profile_id + '.html'\n html_content = zipfile.ZipFile(path_zip_file).open(inner_html_path).read()\n parsed_data = [profile_id]\n soupped_html = BeautifulSoup(html_content, 'html.parser')\n title_details = soupped_html.find('div', {'id': 'resultPane'})\n basic_description_attrs = _find_all_tr_pairs('basicDescriptionContainer',\n title_details, profile_id)\n title_history_attrs = _find_all_tr_pairs('titleHistoryContainer',\n title_details, profile_id)\n bd_splitted = _split_journal_attrs(basic_description_attrs)\n dict_bd = _get_pair_key_values(bd_splitted, 'bd_')\n title_history = _get_title_history(title_history_attrs)\n for k in sorted(DEFAULT_ATTRS):\n parsed_data.append(dict_bd.get(k, ''))\n parsed_data.append(title_history)\n return parsed_data\n\n\ndef save_tsv_file(parsed_data):\n \"\"\"\n Save a parsed journal to a tsv file\n :param parsed_data: a list of dictionaries where the only main key is a profile_id and its value is the pairs of journal's attributes\n \"\"\"\n result_file.write('\\t'.join(parsed_data) + '\\n')\n\n\ndef save_into_html_file(path_html_file: str, response):\n \"\"\"\n Receives a response (in text format).\n Saves the document into a html file.\n \"\"\"\n html_file = open(path_html_file, 'w')\n html_file.writelines(response)\n html_file.close()\n with zipfile.ZipFile(path_html_file.replace('.html', '.zip'), 'w') as zf:\n zf.write(path_html_file, compress_type=zipfile.ZIP_DEFLATED)\n zf.close()\n os.remove(path_html_file)\n\n\nasync def fetch(url, session):\n \"\"\"\n Fetches the url.\n Calls the method save_into_html_file with the response as a parameter (in text format).\n \"\"\"\n try:\n async with session.get(url) as response:\n profile_id = url.split('/')[-1]\n print('COLLECTING %s' % profile_id)\n for attempt in range(DEFAULT_MAX_ATTEMPTS):\n try:\n if response.status == 200:\n response = await response.text(errors='ignore')\n save_into_html_file(DEFAULT_DIR_HTML + profile_id +\n '.html', response)\n logging.info('COLLECTED: %s' % profile_id)\n break\n elif response.status == 500 and attempt == DEFAULT_MAX_ATTEMPTS:\n logging.info('RESPONSE_ERROR_500: %s' % profile_id)\n elif response.status == 404:\n logging.info('RESPONSE_ERROR_404: %s' % profile_id)\n except ServerDisconnectedError:\n logging.info('SERVER_DISCONNECTED_ERROR: %s' % profile_id)\n except TimeoutError:\n logging.info('TIMEOUT_ERROR: %s' % profile_id)\n except ContentTypeError:\n logging.info('CONTENT_TYPE_ERROR: %s' % profile_id)\n except TimeoutError:\n logging.info('GENERALIZED_TIMEOUT_ERROR')\n except ClientConnectionError:\n logging.info('GENERALIZED_CLIENT_CONNECTION_ERROR')\n except ServerDisconnectedError:\n logging.info('GENERALIZED_SERVER_DISCONNECTED_ERROR')\n except ContentTypeError:\n logging.info('GENERALIZED_CONTENT_TYPE_ERROR')\n\n\nasync def bound_fetch(sem, url, session):\n \"\"\"\n Limits the collecting task to a semaphore.\n \"\"\"\n async with sem:\n await fetch(url, session)\n\n\nasync def run():\n \"\"\"\n Creates tasks to get the html file with respect to a list composed by htmls.\n \"\"\"\n sem = asyncio.Semaphore(DEFAULT_SEMAPHORE_LIMIT)\n tasks = []\n async with ClientSession() as session:\n for u in [ROOT_URL.format(jid) for jid in DEFAULT_RANGE_IDS]:\n task = asyncio.ensure_future(bound_fetch(sem, u, session))\n tasks.append(task)\n responses = asyncio.gather(*tasks)\n await responses\n\n\nif __name__ == '__main__':\n logging.basicConfig(filename='ulrich.log', level=logging.INFO, format=\n '%(asctime)s - %(levelname)s - %(message)s')\n MODE = sys.argv[1]\n DIR_HTML = sys.argv[2]\n if MODE == 'collect':\n DEFAULT_DIR_HTML = DIR_HTML\n os.makedirs(DEFAULT_DIR_HTML, exist_ok=True)\n if len(sys.argv) == 4:\n start_id = int(sys.argv[3])\n DEFAULT_RANGE_IDS = itertools.chain(range(start_id,\n DEFAULT_END_ID), DEFAULT_RANGE_2)\n loop = asyncio.get_event_loop()\n future = asyncio.ensure_future(run())\n loop.run_until_complete(future)\n elif MODE == 'parse':\n DEFAULT_DIR_HTML = DIR_HTML\n START = int(sys.argv[3])\n END = int(sys.argv[4])\n if END > len(os.listdir(DEFAULT_DIR_HTML)):\n END = len(os.listdir(DEFAULT_DIR_HTML))\n htmls = sorted([(DEFAULT_DIR_HTML + h) for h in os.listdir(DIR_HTML)])[\n START:END]\n result_file = open(DEFAULT_DIR_HTML + '../' + str(START) + '.tsv', 'w')\n result_file.write('\\t'.join(['Profile Identifier'] + sorted(\n DEFAULT_ATTRS) + ['title_history']) + '\\n')\n for i, h in enumerate(sorted(htmls)):\n print('\\r%d / %d' % (i + 1 + START, START + len(htmls)), end='')\n parsed = html2dict(h)\n save_tsv_file(parsed)\n result_file.close()\n",
"step-3": "<mask token>\nROOT_URL = 'https://ulrichsweb.serialssolutions.com/titleDetails/{}'\nDEFAULT_START_ID = 12515\nDEFAULT_END_ID = 835018\nDEFAULT_RANGE_1 = range(DEFAULT_START_ID, DEFAULT_END_ID)\nDEFAULT_RANGE_2 = range(15793473, 15798807)\nDEFAULT_RANGE_IDS = itertools.chain(DEFAULT_RANGE_1, DEFAULT_RANGE_2)\nDEFAULT_DIR_HTML = 'data/ulrich/html/'\nDEFAULT_MAX_ATTEMPTS = 5\nDEFAULT_MODE = 'collect'\nDEFAULT_NUM_THREADS = 4\nDEFAULT_SEMAPHORE_LIMIT = 2\nDEFAULT_ATTRS = {'bd_Title', 'bd_ISSN', 'bd_Format', 'bd_Frequency',\n 'bd_Country'}\n\n\ndef _find_all_tr_pairs(key: str, title_details, profile_id):\n try:\n return title_details.find('div', {'id': key}).find('table', {\n 'class': 'resultsTable'}).find_all('tr')\n except AttributeError:\n logging.warning('ID %s (KEY) %s doest not have resultsTable' % (\n profile_id, key))\n\n\ndef _split_journal_attrs(attrs):\n if attrs:\n return [t.text.replace(':', '').strip().split('\\n') for t in [k for\n k in attrs if isinstance(k, bs4.element.Tag)]]\n return []\n\n\ndef _get_title_history(history_attrs):\n all_td = []\n if history_attrs:\n for h in history_attrs:\n all_td.extend(h.find_all('td'))\n if len(all_td) > 0:\n return '#'.join([''.join([a.strip() for a in k.text.split('\\n')]) for\n k in all_td if isinstance(k, bs4.element.Tag)])\n return ''\n\n\ndef _get_pair_key_values(splitted_attrs, prefix: str):\n tmp_dict = {}\n for j in splitted_attrs:\n tmp_dict[prefix + j[0].replace('\\t', ' ')] = '#'.join([k.strip().\n replace('\\t', ' ').replace('#', ' ') for k in j[1:] if k.strip(\n ) != ''])\n return tmp_dict\n\n\ndef html2dict(path_zip_file: str):\n \"\"\"\n Open, reads and converts a zipped html into a dict.\n :param path_zip_file: path of the zip file\n :return: a dict where each key is the profile id and the value is its key-value pairs (attrs)\n \"\"\"\n profile_id = path_zip_file.split('/')[-1].split('.')[0]\n inner_html_path = 'data/ulrich/html/' + profile_id + '.html'\n html_content = zipfile.ZipFile(path_zip_file).open(inner_html_path).read()\n parsed_data = [profile_id]\n soupped_html = BeautifulSoup(html_content, 'html.parser')\n title_details = soupped_html.find('div', {'id': 'resultPane'})\n basic_description_attrs = _find_all_tr_pairs('basicDescriptionContainer',\n title_details, profile_id)\n title_history_attrs = _find_all_tr_pairs('titleHistoryContainer',\n title_details, profile_id)\n bd_splitted = _split_journal_attrs(basic_description_attrs)\n dict_bd = _get_pair_key_values(bd_splitted, 'bd_')\n title_history = _get_title_history(title_history_attrs)\n for k in sorted(DEFAULT_ATTRS):\n parsed_data.append(dict_bd.get(k, ''))\n parsed_data.append(title_history)\n return parsed_data\n\n\ndef save_tsv_file(parsed_data):\n \"\"\"\n Save a parsed journal to a tsv file\n :param parsed_data: a list of dictionaries where the only main key is a profile_id and its value is the pairs of journal's attributes\n \"\"\"\n result_file.write('\\t'.join(parsed_data) + '\\n')\n\n\ndef save_into_html_file(path_html_file: str, response):\n \"\"\"\n Receives a response (in text format).\n Saves the document into a html file.\n \"\"\"\n html_file = open(path_html_file, 'w')\n html_file.writelines(response)\n html_file.close()\n with zipfile.ZipFile(path_html_file.replace('.html', '.zip'), 'w') as zf:\n zf.write(path_html_file, compress_type=zipfile.ZIP_DEFLATED)\n zf.close()\n os.remove(path_html_file)\n\n\nasync def fetch(url, session):\n \"\"\"\n Fetches the url.\n Calls the method save_into_html_file with the response as a parameter (in text format).\n \"\"\"\n try:\n async with session.get(url) as response:\n profile_id = url.split('/')[-1]\n print('COLLECTING %s' % profile_id)\n for attempt in range(DEFAULT_MAX_ATTEMPTS):\n try:\n if response.status == 200:\n response = await response.text(errors='ignore')\n save_into_html_file(DEFAULT_DIR_HTML + profile_id +\n '.html', response)\n logging.info('COLLECTED: %s' % profile_id)\n break\n elif response.status == 500 and attempt == DEFAULT_MAX_ATTEMPTS:\n logging.info('RESPONSE_ERROR_500: %s' % profile_id)\n elif response.status == 404:\n logging.info('RESPONSE_ERROR_404: %s' % profile_id)\n except ServerDisconnectedError:\n logging.info('SERVER_DISCONNECTED_ERROR: %s' % profile_id)\n except TimeoutError:\n logging.info('TIMEOUT_ERROR: %s' % profile_id)\n except ContentTypeError:\n logging.info('CONTENT_TYPE_ERROR: %s' % profile_id)\n except TimeoutError:\n logging.info('GENERALIZED_TIMEOUT_ERROR')\n except ClientConnectionError:\n logging.info('GENERALIZED_CLIENT_CONNECTION_ERROR')\n except ServerDisconnectedError:\n logging.info('GENERALIZED_SERVER_DISCONNECTED_ERROR')\n except ContentTypeError:\n logging.info('GENERALIZED_CONTENT_TYPE_ERROR')\n\n\nasync def bound_fetch(sem, url, session):\n \"\"\"\n Limits the collecting task to a semaphore.\n \"\"\"\n async with sem:\n await fetch(url, session)\n\n\nasync def run():\n \"\"\"\n Creates tasks to get the html file with respect to a list composed by htmls.\n \"\"\"\n sem = asyncio.Semaphore(DEFAULT_SEMAPHORE_LIMIT)\n tasks = []\n async with ClientSession() as session:\n for u in [ROOT_URL.format(jid) for jid in DEFAULT_RANGE_IDS]:\n task = asyncio.ensure_future(bound_fetch(sem, u, session))\n tasks.append(task)\n responses = asyncio.gather(*tasks)\n await responses\n\n\nif __name__ == '__main__':\n logging.basicConfig(filename='ulrich.log', level=logging.INFO, format=\n '%(asctime)s - %(levelname)s - %(message)s')\n MODE = sys.argv[1]\n DIR_HTML = sys.argv[2]\n if MODE == 'collect':\n DEFAULT_DIR_HTML = DIR_HTML\n os.makedirs(DEFAULT_DIR_HTML, exist_ok=True)\n if len(sys.argv) == 4:\n start_id = int(sys.argv[3])\n DEFAULT_RANGE_IDS = itertools.chain(range(start_id,\n DEFAULT_END_ID), DEFAULT_RANGE_2)\n loop = asyncio.get_event_loop()\n future = asyncio.ensure_future(run())\n loop.run_until_complete(future)\n elif MODE == 'parse':\n DEFAULT_DIR_HTML = DIR_HTML\n START = int(sys.argv[3])\n END = int(sys.argv[4])\n if END > len(os.listdir(DEFAULT_DIR_HTML)):\n END = len(os.listdir(DEFAULT_DIR_HTML))\n htmls = sorted([(DEFAULT_DIR_HTML + h) for h in os.listdir(DIR_HTML)])[\n START:END]\n result_file = open(DEFAULT_DIR_HTML + '../' + str(START) + '.tsv', 'w')\n result_file.write('\\t'.join(['Profile Identifier'] + sorted(\n DEFAULT_ATTRS) + ['title_history']) + '\\n')\n for i, h in enumerate(sorted(htmls)):\n print('\\r%d / %d' % (i + 1 + START, START + len(htmls)), end='')\n parsed = html2dict(h)\n save_tsv_file(parsed)\n result_file.close()\n",
"step-4": "import asyncio\nimport bs4\nimport itertools\nimport logging\nimport sys\nimport os\nimport zipfile\nfrom asyncio import TimeoutError\nfrom aiohttp import ClientSession, ClientConnectionError\nfrom aiohttp.client_exceptions import ContentTypeError, ServerDisconnectedError\nfrom bs4 import BeautifulSoup\nROOT_URL = 'https://ulrichsweb.serialssolutions.com/titleDetails/{}'\nDEFAULT_START_ID = 12515\nDEFAULT_END_ID = 835018\nDEFAULT_RANGE_1 = range(DEFAULT_START_ID, DEFAULT_END_ID)\nDEFAULT_RANGE_2 = range(15793473, 15798807)\nDEFAULT_RANGE_IDS = itertools.chain(DEFAULT_RANGE_1, DEFAULT_RANGE_2)\nDEFAULT_DIR_HTML = 'data/ulrich/html/'\nDEFAULT_MAX_ATTEMPTS = 5\nDEFAULT_MODE = 'collect'\nDEFAULT_NUM_THREADS = 4\nDEFAULT_SEMAPHORE_LIMIT = 2\nDEFAULT_ATTRS = {'bd_Title', 'bd_ISSN', 'bd_Format', 'bd_Frequency',\n 'bd_Country'}\n\n\ndef _find_all_tr_pairs(key: str, title_details, profile_id):\n try:\n return title_details.find('div', {'id': key}).find('table', {\n 'class': 'resultsTable'}).find_all('tr')\n except AttributeError:\n logging.warning('ID %s (KEY) %s doest not have resultsTable' % (\n profile_id, key))\n\n\ndef _split_journal_attrs(attrs):\n if attrs:\n return [t.text.replace(':', '').strip().split('\\n') for t in [k for\n k in attrs if isinstance(k, bs4.element.Tag)]]\n return []\n\n\ndef _get_title_history(history_attrs):\n all_td = []\n if history_attrs:\n for h in history_attrs:\n all_td.extend(h.find_all('td'))\n if len(all_td) > 0:\n return '#'.join([''.join([a.strip() for a in k.text.split('\\n')]) for\n k in all_td if isinstance(k, bs4.element.Tag)])\n return ''\n\n\ndef _get_pair_key_values(splitted_attrs, prefix: str):\n tmp_dict = {}\n for j in splitted_attrs:\n tmp_dict[prefix + j[0].replace('\\t', ' ')] = '#'.join([k.strip().\n replace('\\t', ' ').replace('#', ' ') for k in j[1:] if k.strip(\n ) != ''])\n return tmp_dict\n\n\ndef html2dict(path_zip_file: str):\n \"\"\"\n Open, reads and converts a zipped html into a dict.\n :param path_zip_file: path of the zip file\n :return: a dict where each key is the profile id and the value is its key-value pairs (attrs)\n \"\"\"\n profile_id = path_zip_file.split('/')[-1].split('.')[0]\n inner_html_path = 'data/ulrich/html/' + profile_id + '.html'\n html_content = zipfile.ZipFile(path_zip_file).open(inner_html_path).read()\n parsed_data = [profile_id]\n soupped_html = BeautifulSoup(html_content, 'html.parser')\n title_details = soupped_html.find('div', {'id': 'resultPane'})\n basic_description_attrs = _find_all_tr_pairs('basicDescriptionContainer',\n title_details, profile_id)\n title_history_attrs = _find_all_tr_pairs('titleHistoryContainer',\n title_details, profile_id)\n bd_splitted = _split_journal_attrs(basic_description_attrs)\n dict_bd = _get_pair_key_values(bd_splitted, 'bd_')\n title_history = _get_title_history(title_history_attrs)\n for k in sorted(DEFAULT_ATTRS):\n parsed_data.append(dict_bd.get(k, ''))\n parsed_data.append(title_history)\n return parsed_data\n\n\ndef save_tsv_file(parsed_data):\n \"\"\"\n Save a parsed journal to a tsv file\n :param parsed_data: a list of dictionaries where the only main key is a profile_id and its value is the pairs of journal's attributes\n \"\"\"\n result_file.write('\\t'.join(parsed_data) + '\\n')\n\n\ndef save_into_html_file(path_html_file: str, response):\n \"\"\"\n Receives a response (in text format).\n Saves the document into a html file.\n \"\"\"\n html_file = open(path_html_file, 'w')\n html_file.writelines(response)\n html_file.close()\n with zipfile.ZipFile(path_html_file.replace('.html', '.zip'), 'w') as zf:\n zf.write(path_html_file, compress_type=zipfile.ZIP_DEFLATED)\n zf.close()\n os.remove(path_html_file)\n\n\nasync def fetch(url, session):\n \"\"\"\n Fetches the url.\n Calls the method save_into_html_file with the response as a parameter (in text format).\n \"\"\"\n try:\n async with session.get(url) as response:\n profile_id = url.split('/')[-1]\n print('COLLECTING %s' % profile_id)\n for attempt in range(DEFAULT_MAX_ATTEMPTS):\n try:\n if response.status == 200:\n response = await response.text(errors='ignore')\n save_into_html_file(DEFAULT_DIR_HTML + profile_id +\n '.html', response)\n logging.info('COLLECTED: %s' % profile_id)\n break\n elif response.status == 500 and attempt == DEFAULT_MAX_ATTEMPTS:\n logging.info('RESPONSE_ERROR_500: %s' % profile_id)\n elif response.status == 404:\n logging.info('RESPONSE_ERROR_404: %s' % profile_id)\n except ServerDisconnectedError:\n logging.info('SERVER_DISCONNECTED_ERROR: %s' % profile_id)\n except TimeoutError:\n logging.info('TIMEOUT_ERROR: %s' % profile_id)\n except ContentTypeError:\n logging.info('CONTENT_TYPE_ERROR: %s' % profile_id)\n except TimeoutError:\n logging.info('GENERALIZED_TIMEOUT_ERROR')\n except ClientConnectionError:\n logging.info('GENERALIZED_CLIENT_CONNECTION_ERROR')\n except ServerDisconnectedError:\n logging.info('GENERALIZED_SERVER_DISCONNECTED_ERROR')\n except ContentTypeError:\n logging.info('GENERALIZED_CONTENT_TYPE_ERROR')\n\n\nasync def bound_fetch(sem, url, session):\n \"\"\"\n Limits the collecting task to a semaphore.\n \"\"\"\n async with sem:\n await fetch(url, session)\n\n\nasync def run():\n \"\"\"\n Creates tasks to get the html file with respect to a list composed by htmls.\n \"\"\"\n sem = asyncio.Semaphore(DEFAULT_SEMAPHORE_LIMIT)\n tasks = []\n async with ClientSession() as session:\n for u in [ROOT_URL.format(jid) for jid in DEFAULT_RANGE_IDS]:\n task = asyncio.ensure_future(bound_fetch(sem, u, session))\n tasks.append(task)\n responses = asyncio.gather(*tasks)\n await responses\n\n\nif __name__ == '__main__':\n logging.basicConfig(filename='ulrich.log', level=logging.INFO, format=\n '%(asctime)s - %(levelname)s - %(message)s')\n MODE = sys.argv[1]\n DIR_HTML = sys.argv[2]\n if MODE == 'collect':\n DEFAULT_DIR_HTML = DIR_HTML\n os.makedirs(DEFAULT_DIR_HTML, exist_ok=True)\n if len(sys.argv) == 4:\n start_id = int(sys.argv[3])\n DEFAULT_RANGE_IDS = itertools.chain(range(start_id,\n DEFAULT_END_ID), DEFAULT_RANGE_2)\n loop = asyncio.get_event_loop()\n future = asyncio.ensure_future(run())\n loop.run_until_complete(future)\n elif MODE == 'parse':\n DEFAULT_DIR_HTML = DIR_HTML\n START = int(sys.argv[3])\n END = int(sys.argv[4])\n if END > len(os.listdir(DEFAULT_DIR_HTML)):\n END = len(os.listdir(DEFAULT_DIR_HTML))\n htmls = sorted([(DEFAULT_DIR_HTML + h) for h in os.listdir(DIR_HTML)])[\n START:END]\n result_file = open(DEFAULT_DIR_HTML + '../' + str(START) + '.tsv', 'w')\n result_file.write('\\t'.join(['Profile Identifier'] + sorted(\n DEFAULT_ATTRS) + ['title_history']) + '\\n')\n for i, h in enumerate(sorted(htmls)):\n print('\\r%d / %d' % (i + 1 + START, START + len(htmls)), end='')\n parsed = html2dict(h)\n save_tsv_file(parsed)\n result_file.close()\n",
"step-5": "#!/usr/bin/env python3\nimport asyncio\n\nimport bs4\nimport itertools\nimport logging\nimport sys\nimport os\nimport zipfile\n\nfrom asyncio import TimeoutError\nfrom aiohttp import ClientSession, ClientConnectionError\nfrom aiohttp.client_exceptions import ContentTypeError, ServerDisconnectedError\nfrom bs4 import BeautifulSoup\n\nROOT_URL = 'https://ulrichsweb.serialssolutions.com/titleDetails/{}'\n\nDEFAULT_START_ID = 12515\nDEFAULT_END_ID = 835018\nDEFAULT_RANGE_1 = range(DEFAULT_START_ID, DEFAULT_END_ID)\nDEFAULT_RANGE_2 = range(15793473, 15798807)\nDEFAULT_RANGE_IDS = itertools.chain(DEFAULT_RANGE_1, DEFAULT_RANGE_2)\n\nDEFAULT_DIR_HTML = 'data/ulrich/html/'\n\nDEFAULT_MAX_ATTEMPTS = 5\nDEFAULT_MODE = 'collect'\nDEFAULT_NUM_THREADS = 4\nDEFAULT_SEMAPHORE_LIMIT = 2\n\nDEFAULT_ATTRS = {'bd_Title', 'bd_ISSN', 'bd_Format', 'bd_Frequency', 'bd_Country'}\n\n\ndef _find_all_tr_pairs(key: str, title_details, profile_id):\n try:\n return title_details.find('div', {'id': key}).find('table', {'class': 'resultsTable'}).find_all('tr')\n except AttributeError:\n logging.warning('ID %s (KEY) %s doest not have resultsTable' % (profile_id, key))\n\n\ndef _split_journal_attrs(attrs):\n if attrs:\n return [t.text.replace(':', '').strip().split('\\n') for t in\n [k for k in attrs if isinstance(k, bs4.element.Tag)]]\n return []\n\n\ndef _get_title_history(history_attrs):\n all_td = []\n if history_attrs:\n for h in history_attrs:\n all_td.extend(h.find_all('td'))\n if len(all_td) > 0:\n return '#'.join([''.join([a.strip() for a in k.text.split('\\n')]) for k in all_td if isinstance(k, bs4.element.Tag)])\n return ''\n\n\ndef _get_pair_key_values(splitted_attrs, prefix: str):\n tmp_dict = {}\n for j in splitted_attrs:\n tmp_dict[prefix + j[0].replace('\\t', ' ')] = '#'.join(\n [k.strip().replace('\\t', ' ').replace('#', ' ') for k in j[1:] if k.strip() != ''])\n return tmp_dict\n\n\ndef html2dict(path_zip_file: str):\n \"\"\"\n Open, reads and converts a zipped html into a dict.\n :param path_zip_file: path of the zip file\n :return: a dict where each key is the profile id and the value is its key-value pairs (attrs)\n \"\"\"\n profile_id = path_zip_file.split('/')[-1].split('.')[0]\n inner_html_path = 'data/ulrich/html/' + profile_id + '.html'\n html_content = zipfile.ZipFile(path_zip_file).open(inner_html_path).read()\n\n parsed_data = [profile_id]\n\n soupped_html = BeautifulSoup(html_content, 'html.parser')\n\n title_details = soupped_html.find('div', {'id': 'resultPane'})\n basic_description_attrs = _find_all_tr_pairs('basicDescriptionContainer', title_details, profile_id)\n title_history_attrs = _find_all_tr_pairs('titleHistoryContainer', title_details, profile_id)\n bd_splitted = _split_journal_attrs(basic_description_attrs)\n dict_bd = _get_pair_key_values(bd_splitted, 'bd_')\n title_history = _get_title_history(title_history_attrs)\n\n for k in sorted(DEFAULT_ATTRS):\n parsed_data.append(dict_bd.get(k, ''))\n\n parsed_data.append(title_history)\n\n return parsed_data\n\n\ndef save_tsv_file(parsed_data):\n \"\"\"\n Save a parsed journal to a tsv file\n :param parsed_data: a list of dictionaries where the only main key is a profile_id and its value is the pairs of journal's attributes\n \"\"\"\n result_file.write('\\t'.join(parsed_data) + '\\n')\n\n\ndef save_into_html_file(path_html_file: str, response):\n \"\"\"\n Receives a response (in text format).\n Saves the document into a html file.\n \"\"\"\n html_file = open(path_html_file, 'w')\n html_file.writelines(response)\n html_file.close()\n\n with zipfile.ZipFile(path_html_file.replace('.html', '.zip'), 'w') as zf:\n zf.write(path_html_file, compress_type=zipfile.ZIP_DEFLATED)\n zf.close()\n os.remove(path_html_file)\n\n\nasync def fetch(url, session):\n \"\"\"\n Fetches the url.\n Calls the method save_into_html_file with the response as a parameter (in text format).\n \"\"\"\n try:\n async with session.get(url) as response:\n profile_id = url.split('/')[-1]\n print('COLLECTING %s' % profile_id)\n for attempt in range(DEFAULT_MAX_ATTEMPTS):\n try:\n if response.status == 200:\n response = await response.text(errors='ignore')\n save_into_html_file(DEFAULT_DIR_HTML + profile_id + '.html', response)\n logging.info('COLLECTED: %s' % profile_id)\n break\n elif response.status == 500 and attempt == DEFAULT_MAX_ATTEMPTS:\n logging.info('RESPONSE_ERROR_500: %s' % profile_id)\n elif response.status == 404:\n logging.info('RESPONSE_ERROR_404: %s' % profile_id)\n except ServerDisconnectedError:\n logging.info('SERVER_DISCONNECTED_ERROR: %s' % profile_id)\n except TimeoutError:\n logging.info('TIMEOUT_ERROR: %s' % profile_id)\n except ContentTypeError:\n logging.info('CONTENT_TYPE_ERROR: %s' % profile_id)\n except TimeoutError:\n logging.info('GENERALIZED_TIMEOUT_ERROR')\n except ClientConnectionError:\n logging.info('GENERALIZED_CLIENT_CONNECTION_ERROR')\n except ServerDisconnectedError:\n logging.info('GENERALIZED_SERVER_DISCONNECTED_ERROR')\n except ContentTypeError:\n logging.info('GENERALIZED_CONTENT_TYPE_ERROR')\n\n\nasync def bound_fetch(sem, url, session):\n \"\"\"\n Limits the collecting task to a semaphore.\n \"\"\"\n async with sem:\n await fetch(url, session)\n\n\nasync def run():\n \"\"\"\n Creates tasks to get the html file with respect to a list composed by htmls.\n \"\"\"\n sem = asyncio.Semaphore(DEFAULT_SEMAPHORE_LIMIT)\n tasks = []\n\n async with ClientSession() as session:\n for u in [ROOT_URL.format(jid) for jid in DEFAULT_RANGE_IDS]:\n task = asyncio.ensure_future(bound_fetch(sem, u, session))\n tasks.append(task)\n responses = asyncio.gather(*tasks)\n await responses\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(filename='ulrich.log', level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')\n\n MODE = sys.argv[1]\n DIR_HTML = sys.argv[2]\n\n if MODE == 'collect':\n DEFAULT_DIR_HTML = DIR_HTML\n os.makedirs(DEFAULT_DIR_HTML, exist_ok=True)\n\n if len(sys.argv) == 4:\n start_id = int(sys.argv[3])\n DEFAULT_RANGE_IDS = itertools.chain(range(start_id, DEFAULT_END_ID), DEFAULT_RANGE_2)\n\n loop = asyncio.get_event_loop()\n future = asyncio.ensure_future(run())\n loop.run_until_complete(future)\n elif MODE == 'parse':\n DEFAULT_DIR_HTML = DIR_HTML\n\n START = int(sys.argv[3])\n END = int(sys.argv[4])\n\n if END > len(os.listdir(DEFAULT_DIR_HTML)):\n END = len(os.listdir(DEFAULT_DIR_HTML))\n\n htmls = sorted([DEFAULT_DIR_HTML + h for h in os.listdir(DIR_HTML)])[START:END]\n\n result_file = open(DEFAULT_DIR_HTML + '../' + str(START) + '.tsv', 'w')\n result_file.write('\\t'.join(['Profile Identifier'] + sorted(DEFAULT_ATTRS) + ['title_history']) + '\\n')\n\n for i, h in enumerate(sorted(htmls)):\n print('\\r%d / %d' % (i + 1 + START, START + len(htmls)), end='')\n parsed = html2dict(h)\n save_tsv_file(parsed)\n result_file.close()\n",
"step-ids": [
6,
8,
9,
10,
11
]
}
|
[
6,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
wd.get('https://www.baidu.com/')
wd.find_element_by_id('kw').send_keys(u'哈哈')
wd.quit()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
wd = webdriver.Firefox()
wd.get('https://www.baidu.com/')
wd.find_element_by_id('kw').send_keys(u'哈哈')
wd.quit()
<|reserved_special_token_1|>
from selenium import webdriver
wd = webdriver.Firefox()
wd.get('https://www.baidu.com/')
wd.find_element_by_id('kw').send_keys(u'哈哈')
wd.quit()
<|reserved_special_token_1|>
#coding=utf-8
from selenium import webdriver
wd=webdriver.Firefox()
wd.get('https://www.baidu.com/')
wd.find_element_by_id('kw').send_keys(u'哈哈')
wd.quit()
|
flexible
|
{
"blob_id": "8de36400f21bfb4e24703d5a65471a961e1afddc",
"index": 9796,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwd.get('https://www.baidu.com/')\nwd.find_element_by_id('kw').send_keys(u'哈哈')\nwd.quit()\n",
"step-3": "<mask token>\nwd = webdriver.Firefox()\nwd.get('https://www.baidu.com/')\nwd.find_element_by_id('kw').send_keys(u'哈哈')\nwd.quit()\n",
"step-4": "from selenium import webdriver\nwd = webdriver.Firefox()\nwd.get('https://www.baidu.com/')\nwd.find_element_by_id('kw').send_keys(u'哈哈')\nwd.quit()\n",
"step-5": "#coding=utf-8\n\nfrom selenium import webdriver\n\nwd=webdriver.Firefox()\nwd.get('https://www.baidu.com/')\nwd.find_element_by_id('kw').send_keys(u'哈哈')\n\nwd.quit()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_status_name(status):
return '[%d]%s' % (status, statuses[status]['name'])
<|reserved_special_token_1|>
__author__ = 'virtual'
statuses = {None: {'name': 'None'}, (-1): {'name': 'unknown'}, (0): {'name':
''}, (1): {'name': 'Новый'}, (2): {'name': ''}, (3): {'name':
'Активный'}, (4): {'name': 'Приостановленный'}, (5): {'name':
'Заблокированный'}, (6): {'name': 'Удаленный'}, (7): {'name':
'Закрытый'}, (8): {'name': ''}}
def get_status_name(status):
return '[%d]%s' % (status, statuses[status]['name'])
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
__author__ = 'virtual'
statuses = {
None: {'name': 'None', },
-1: { 'name': 'unknown', },
0: { 'name': '',},
1: { 'name': 'Новый',},
2: { 'name': '',},
3: { 'name': 'Активный', },
4: { 'name': 'Приостановленный',},
5: { 'name': 'Заблокированный', },
6: { 'name': 'Удаленный', },
7: { 'name': 'Закрытый', },
8: { 'name': '', },
}
def get_status_name(status):
return '[%d]%s' % (status, statuses[status]['name'], )
|
flexible
|
{
"blob_id": "a847fc32af2602db3b5545c15186c0209eb8ae8d",
"index": 4008,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_status_name(status):\n return '[%d]%s' % (status, statuses[status]['name'])\n",
"step-3": "__author__ = 'virtual'\nstatuses = {None: {'name': 'None'}, (-1): {'name': 'unknown'}, (0): {'name':\n ''}, (1): {'name': 'Новый'}, (2): {'name': ''}, (3): {'name':\n 'Активный'}, (4): {'name': 'Приостановленный'}, (5): {'name':\n 'Заблокированный'}, (6): {'name': 'Удаленный'}, (7): {'name':\n 'Закрытый'}, (8): {'name': ''}}\n\n\ndef get_status_name(status):\n return '[%d]%s' % (status, statuses[status]['name'])\n",
"step-4": "# -*- coding: utf-8 -*-\n\n__author__ = 'virtual'\n\n\nstatuses = {\n None: {'name': 'None', },\n -1: { 'name': 'unknown', },\n 0: { 'name': '',},\n 1: { 'name': 'Новый',},\n 2: { 'name': '',},\n 3: { 'name': 'Активный', },\n 4: { 'name': 'Приостановленный',},\n 5: { 'name': 'Заблокированный', },\n 6: { 'name': 'Удаленный', },\n 7: { 'name': 'Закрытый', },\n 8: { 'name': '', },\n}\n\ndef get_status_name(status):\n return '[%d]%s' % (status, statuses[status]['name'], )\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#program_skeleton.py
#import load_json_files as bm
import write
import merge as m
import load_df as ldf
import load_vars as lv
import log as log
import clean_df as clean
import download as dl
import gc
import confirm_drcts as cfs
import fix_files as ff
import readwrite as rw
import df_filter as df_f
import realtor_scraper_sheets_3 as scraper
import get_creds as creds
import goog_sheets as sheets
from pprint import pprint
import google_drive as drive
import batch_download as download
import rew_scraper as rew_scraper
import rew_scraper3 as rew3
def program_skeleton(dictionary: dict):
## Batch Merge creates a back_up of contacts from csv in batches no greater than 500 contacts per document. Can be expanded. Keeps files from getting to large
if dictionary['tasks']['environmental_vars']['run'] == True:
dictionary['tasks']['environmental_vars']['log']['environmental_vars_set'] = lv.set_environmental_vars(dictionary['tasks'])
dictionary['tasks']['environmental_vars']['goog_creds'] = creds.get_creds()
dictionary['tasks']['environmental_vars']['sheets_service'] = sheets.get_sheet_service(dictionary['tasks']['environmental_vars']['goog_creds'])
dictionary['tasks']['environmental_vars']['drive_service'] = drive.get_drive_service(dictionary['tasks']['environmental_vars']['goog_creds'])
dictionary['tasks']['environmental_vars']['criteria_sheet_meta'] = sheets.confirm_sheet_ids(dictionary['tasks']['environmental_vars']['criteria_sheet_ids'],dictionary['tasks']['environmental_vars']['sheets_service'])
#dictionary['tasks']['environmental_vars']['output_sheet_meta'] = drive.add_spreadsheet_to_folder(dictionary['tasks']['environmental_vars']['drive_service'],dictionary['tasks']['environmental_vars']['output_folder_id'],dictionary['tasks']['environmental_vars']['date']['datetime'])
#dictionary['tasks']['environmental_vars']['dfs']['cities_search'] = goog_sheets.
#pprint(dictionary['tasks']['environmental_vars']['sheet_meta'])
lv.batchify(dictionary['tasks']['environmental_vars']['criteria_sheet_meta'],dictionary['tasks']['environmental_vars']['batch_size'])
dictionary['tasks']['environmental_vars']['dnn'] = sheets.batch_download(dictionary['tasks']['environmental_vars']['criteria_sheet_meta']['dnn'],dictionary['tasks']['environmental_vars']['sheets_service'],True)
#sheets.batch_download(dictionary['tasks']['environmental_vars'])
#print(dictionary['tasks']['environmental_vars']['directories']['log_directory'])
#log.json_dump(dictionary['tasks'])
#log.csv_dump(dictionary['tasks'])
#print(dictionary)
if dictionary['tasks']['scrape_web_data_rew']['run'] == True:
#if dictionary['tasks']['scrape_web_data_sheets']['input_list']['run'] == True:
#pprint(dictionary['tasks']['environmental_vars']['criteria_sheet_meta'])
#input_df = sheets.batch_download(dictionary['tasks']['environmental_vars']['criteria_sheet_meta']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'],True)
dictionary['tasks']['environmental_vars']['input_list'] = sheets.batch_download(dictionary['tasks']['environmental_vars']['criteria_sheet_meta']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'],True)
#pprint(dictionary['tasks']['environmental_vars']['sheets_service'])
rew3.initial(dictionary['tasks']['environmental_vars']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'])
#rew_scraper.scrape("agents/areas/toronto-on",dictionary['tasks']['environmental_vars']['sheets_service'],2,2)
#print('true')
if dictionary['tasks']['scrape_web_data_sheets']['run'] == True:
if dictionary['tasks']['scrape_web_data_sheets']['input_list']['run'] == True:
#pprint(dictionary['tasks']['environmental_vars']['criteria_sheet_meta'])
#input_df = sheets.batch_download(dictionary['tasks']['environmental_vars']['criteria_sheet_meta']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'],True)
dictionary['tasks']['environmental_vars']['input_list'] = sheets.batch_download(dictionary['tasks']['environmental_vars']['criteria_sheet_meta']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'],True)
scraper.scrape(dictionary['tasks']['environmental_vars']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'],dictionary['tasks']['environmental_vars']['drive_service'],dictionary['tasks']['environmental_vars']['output_folder_id'])
#print('true')
#download.batch_download(dictionary['tasks']['environmental_vars'])
if dictionary['tasks']['confirm_folder_structure']['run'] == True:
dictionary['tasks']['confirm_folder_structure']['log']['folder_structure_confirmed'] = cfs.confirm_folder_structure(dictionary)
#ff.fix_files(dictionary) # fix files if necessary. This is a fuck up on my end...
if dictionary['tasks']['scrape_web_data']['run'] == True:
dictionary['tasks']['scrape_web_data']['log']['cities'] = rw.file_list(dictionary['tasks']['environmental_vars']['directories']['cities'])
df = dictionary['tasks']['environmental_vars']['dfs']['cities'] = m.merge_zip_data(dictionary['tasks']['scrape_web_data']['log']['cities'])
df_f.filter_state_data(df,'ct')
#dictionary['tasks']['environmental_vars']['dfs']['cities']['directory'] = df. apply dictionary['tasks']['environmental_vars']['sep'].join((dictionary['tasks']['environmental_vars']['directories']['to_merge'], dictionary['tasks']['environmental_vars']['dfs']['cities'].state_name,dictionary['tasks']['environmental_vars']['dfs']['cities'].city))
df['to_merge'] = dictionary['tasks']['environmental_vars']['directories']['to_merge']
df['directory'] = df[['to_merge','state_name', 'city']].apply(lambda x: dictionary['tasks']['environmental_vars']['sep'].join(x), axis=1)
#df['period'] = df[['Year', 'quarter']].apply(lambda x: ''.join(x), axis=1)
#print(dictionary['tasks']['environmental_vars']['dfs']['cities'].directory)
scraper.scrape(df)
#dictionary['tasks']['environmental_vars']['dfs'][''] = m.merge_zip_data(dictionary['tasks']['scrape_web_data']['log']['zip_codes'])
#dictionary['tasks']['environmental_vars']['dfs']['zip_codes'] = rw.file_list(dictionary['tasks']['environmental_vars']['files']['zip_database'])
if dictionary['tasks']['merge_data']['run'] == True:
dictionary['tasks']['merge_data']['log']['files_to_merge'] = rw.file_list_walk(dictionary['tasks']['environmental_vars']['directories']['to_merge'])
dictionary['tasks']['environmental_vars']['dfs']['master_merge'] = m.merge_agent_data(dictionary['tasks']['merge_data']['log']['files_to_merge'])
#rw.df_toJson(dictionary['tasks'],dictionary['tasks']['environmental_vars']['file_names']['master_merge'],dictionary['tasks']['environmental_vars']['dfs']['master_merge'],dictionary['tasks']['environmental_vars']['directories']['merged_data'])
rw.df_toCsv(dictionary['tasks'],dictionary['tasks']['environmental_vars']['file_names']['agent_data_raw'],dictionary['tasks']['environmental_vars']['dfs']['master_merge'],dictionary['tasks']['environmental_vars']['directories']['merged_data'])
rw.df_toJson(dictionary['tasks'],dictionary['tasks']['environmental_vars']['file_names']['agent_data_raw'],dictionary['tasks']['environmental_vars']['dfs']['master_merge'],dictionary['tasks']['environmental_vars']['directories']['merged_data'])
#print(dictionary['tasks']['environmental_vars']['dfs']['master_merge'])
if dictionary['tasks']['filter_data']['run'] == True:
print('filtering_data')
dictionary['tasks']['filter_data']['log']['files_to_filter'] = rw.file_list(dictionary['tasks']['environmental_vars']['directories']['merged_data'])
dictionary['tasks']['filter_data']['log']['dnn_filter'] = rw.file_list(dictionary['tasks']['environmental_vars']['directories']['dnn'])
df = dictionary['tasks']['environmental_vars']['dfs']['dnn'] = m.merge_csv(dictionary['tasks']['filter_data']['log']['dnn_filter'])
df["first_name"] = df["first_name"].str.lower()
df["last_name"] = df["last_name"].str.lower()
## checks to see if the df is already in memory. If not the pass
try:
if dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'].empty:
#if try succeeds and if is true then fill it anyways
dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'] = m.merge_json(dictionary['tasks']['filter_data']['log']['files_to_filter'])
else:
#if alrady exists move on
print('The Df already exists')
pass
#do something
except:
#if exception is raised then the df does not exist. Create it
print('The Df no exists')
dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'] = m.merge_json(dictionary['tasks']['filter_data']['log']['files_to_filter'])
df_f.clean_realtor_data(dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'])
df_f.filter_realtor_data(dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'],df,800000,3)
rw.df_toCsv(dictionary['tasks'],dictionary['tasks']['environmental_vars']['file_names']['agent_data_mapped'],dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'],dictionary['tasks']['environmental_vars']['directories']['mapped_data'])
rw.df_toJson(dictionary['tasks'],dictionary['tasks']['environmental_vars']['file_names']['agent_data_mapped'],dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'],dictionary['tasks']['environmental_vars']['directories']['mapped_data'])
#if dictionary['tasks']['extract_agent_data']['run'] == True:
# dictionary['tasks']['environmental_vars']['dfs']['agent_data'] = m.merge_agent_data(dictionary['tasks'])
|
normal
|
{
"blob_id": "6a8007e44d2c4b56426cd49772cbc23df2eca49c",
"index": 6917,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef program_skeleton(dictionary: dict):\n if dictionary['tasks']['environmental_vars']['run'] == True:\n dictionary['tasks']['environmental_vars']['log'][\n 'environmental_vars_set'] = lv.set_environmental_vars(dictionary\n ['tasks'])\n dictionary['tasks']['environmental_vars']['goog_creds'\n ] = creds.get_creds()\n dictionary['tasks']['environmental_vars']['sheets_service'\n ] = sheets.get_sheet_service(dictionary['tasks'][\n 'environmental_vars']['goog_creds'])\n dictionary['tasks']['environmental_vars']['drive_service'\n ] = drive.get_drive_service(dictionary['tasks'][\n 'environmental_vars']['goog_creds'])\n dictionary['tasks']['environmental_vars']['criteria_sheet_meta'\n ] = sheets.confirm_sheet_ids(dictionary['tasks'][\n 'environmental_vars']['criteria_sheet_ids'], dictionary['tasks'\n ]['environmental_vars']['sheets_service'])\n lv.batchify(dictionary['tasks']['environmental_vars'][\n 'criteria_sheet_meta'], dictionary['tasks'][\n 'environmental_vars']['batch_size'])\n dictionary['tasks']['environmental_vars']['dnn'\n ] = sheets.batch_download(dictionary['tasks'][\n 'environmental_vars']['criteria_sheet_meta']['dnn'], dictionary\n ['tasks']['environmental_vars']['sheets_service'], True)\n if dictionary['tasks']['scrape_web_data_rew']['run'] == True:\n dictionary['tasks']['environmental_vars']['input_list'\n ] = sheets.batch_download(dictionary['tasks'][\n 'environmental_vars']['criteria_sheet_meta']['input_list'],\n dictionary['tasks']['environmental_vars']['sheets_service'], True)\n rew3.initial(dictionary['tasks']['environmental_vars']['input_list'\n ], dictionary['tasks']['environmental_vars']['sheets_service'])\n if dictionary['tasks']['scrape_web_data_sheets']['run'] == True:\n if dictionary['tasks']['scrape_web_data_sheets']['input_list']['run'\n ] == True:\n dictionary['tasks']['environmental_vars']['input_list'\n ] = sheets.batch_download(dictionary['tasks'][\n 'environmental_vars']['criteria_sheet_meta']['input_list'],\n dictionary['tasks']['environmental_vars']['sheets_service'],\n True)\n scraper.scrape(dictionary['tasks']['environmental_vars'][\n 'input_list'], dictionary['tasks']['environmental_vars'][\n 'sheets_service'], dictionary['tasks']['environmental_vars'\n ]['drive_service'], dictionary['tasks'][\n 'environmental_vars']['output_folder_id'])\n if dictionary['tasks']['confirm_folder_structure']['run'] == True:\n dictionary['tasks']['confirm_folder_structure']['log'][\n 'folder_structure_confirmed'] = cfs.confirm_folder_structure(\n dictionary)\n if dictionary['tasks']['scrape_web_data']['run'] == True:\n dictionary['tasks']['scrape_web_data']['log']['cities'] = rw.file_list(\n dictionary['tasks']['environmental_vars']['directories']['cities'])\n df = dictionary['tasks']['environmental_vars']['dfs']['cities'\n ] = m.merge_zip_data(dictionary['tasks']['scrape_web_data'][\n 'log']['cities'])\n df_f.filter_state_data(df, 'ct')\n df['to_merge'] = dictionary['tasks']['environmental_vars'][\n 'directories']['to_merge']\n df['directory'] = df[['to_merge', 'state_name', 'city']].apply(lambda\n x: dictionary['tasks']['environmental_vars']['sep'].join(x), axis=1\n )\n scraper.scrape(df)\n if dictionary['tasks']['merge_data']['run'] == True:\n dictionary['tasks']['merge_data']['log']['files_to_merge'\n ] = rw.file_list_walk(dictionary['tasks']['environmental_vars']\n ['directories']['to_merge'])\n dictionary['tasks']['environmental_vars']['dfs']['master_merge'\n ] = m.merge_agent_data(dictionary['tasks']['merge_data']['log']\n ['files_to_merge'])\n rw.df_toCsv(dictionary['tasks'], dictionary['tasks'][\n 'environmental_vars']['file_names']['agent_data_raw'],\n dictionary['tasks']['environmental_vars']['dfs']['master_merge'\n ], dictionary['tasks']['environmental_vars']['directories'][\n 'merged_data'])\n rw.df_toJson(dictionary['tasks'], dictionary['tasks'][\n 'environmental_vars']['file_names']['agent_data_raw'],\n dictionary['tasks']['environmental_vars']['dfs']['master_merge'\n ], dictionary['tasks']['environmental_vars']['directories'][\n 'merged_data'])\n if dictionary['tasks']['filter_data']['run'] == True:\n print('filtering_data')\n dictionary['tasks']['filter_data']['log']['files_to_filter'\n ] = rw.file_list(dictionary['tasks']['environmental_vars'][\n 'directories']['merged_data'])\n dictionary['tasks']['filter_data']['log']['dnn_filter'] = rw.file_list(\n dictionary['tasks']['environmental_vars']['directories']['dnn'])\n df = dictionary['tasks']['environmental_vars']['dfs']['dnn'\n ] = m.merge_csv(dictionary['tasks']['filter_data']['log'][\n 'dnn_filter'])\n df['first_name'] = df['first_name'].str.lower()\n df['last_name'] = df['last_name'].str.lower()\n try:\n if dictionary['tasks']['environmental_vars']['dfs'][\n 'merged_agent_data'].empty:\n dictionary['tasks']['environmental_vars']['dfs'][\n 'merged_agent_data'] = m.merge_json(dictionary['tasks']\n ['filter_data']['log']['files_to_filter'])\n else:\n print('The Df already exists')\n pass\n except:\n print('The Df no exists')\n dictionary['tasks']['environmental_vars']['dfs'][\n 'merged_agent_data'] = m.merge_json(dictionary['tasks'][\n 'filter_data']['log']['files_to_filter'])\n df_f.clean_realtor_data(dictionary['tasks']['environmental_vars'][\n 'dfs']['merged_agent_data'])\n df_f.filter_realtor_data(dictionary['tasks']['environmental_vars'][\n 'dfs']['merged_agent_data'], df, 800000, 3)\n rw.df_toCsv(dictionary['tasks'], dictionary['tasks'][\n 'environmental_vars']['file_names']['agent_data_mapped'],\n dictionary['tasks']['environmental_vars']['dfs'][\n 'merged_agent_data'], dictionary['tasks']['environmental_vars']\n ['directories']['mapped_data'])\n rw.df_toJson(dictionary['tasks'], dictionary['tasks'][\n 'environmental_vars']['file_names']['agent_data_mapped'],\n dictionary['tasks']['environmental_vars']['dfs'][\n 'merged_agent_data'], dictionary['tasks']['environmental_vars']\n ['directories']['mapped_data'])\n",
"step-3": "import write\nimport merge as m\nimport load_df as ldf\nimport load_vars as lv\nimport log as log\nimport clean_df as clean\nimport download as dl\nimport gc\nimport confirm_drcts as cfs\nimport fix_files as ff\nimport readwrite as rw\nimport df_filter as df_f\nimport realtor_scraper_sheets_3 as scraper\nimport get_creds as creds\nimport goog_sheets as sheets\nfrom pprint import pprint\nimport google_drive as drive\nimport batch_download as download\nimport rew_scraper as rew_scraper\nimport rew_scraper3 as rew3\n\n\ndef program_skeleton(dictionary: dict):\n if dictionary['tasks']['environmental_vars']['run'] == True:\n dictionary['tasks']['environmental_vars']['log'][\n 'environmental_vars_set'] = lv.set_environmental_vars(dictionary\n ['tasks'])\n dictionary['tasks']['environmental_vars']['goog_creds'\n ] = creds.get_creds()\n dictionary['tasks']['environmental_vars']['sheets_service'\n ] = sheets.get_sheet_service(dictionary['tasks'][\n 'environmental_vars']['goog_creds'])\n dictionary['tasks']['environmental_vars']['drive_service'\n ] = drive.get_drive_service(dictionary['tasks'][\n 'environmental_vars']['goog_creds'])\n dictionary['tasks']['environmental_vars']['criteria_sheet_meta'\n ] = sheets.confirm_sheet_ids(dictionary['tasks'][\n 'environmental_vars']['criteria_sheet_ids'], dictionary['tasks'\n ]['environmental_vars']['sheets_service'])\n lv.batchify(dictionary['tasks']['environmental_vars'][\n 'criteria_sheet_meta'], dictionary['tasks'][\n 'environmental_vars']['batch_size'])\n dictionary['tasks']['environmental_vars']['dnn'\n ] = sheets.batch_download(dictionary['tasks'][\n 'environmental_vars']['criteria_sheet_meta']['dnn'], dictionary\n ['tasks']['environmental_vars']['sheets_service'], True)\n if dictionary['tasks']['scrape_web_data_rew']['run'] == True:\n dictionary['tasks']['environmental_vars']['input_list'\n ] = sheets.batch_download(dictionary['tasks'][\n 'environmental_vars']['criteria_sheet_meta']['input_list'],\n dictionary['tasks']['environmental_vars']['sheets_service'], True)\n rew3.initial(dictionary['tasks']['environmental_vars']['input_list'\n ], dictionary['tasks']['environmental_vars']['sheets_service'])\n if dictionary['tasks']['scrape_web_data_sheets']['run'] == True:\n if dictionary['tasks']['scrape_web_data_sheets']['input_list']['run'\n ] == True:\n dictionary['tasks']['environmental_vars']['input_list'\n ] = sheets.batch_download(dictionary['tasks'][\n 'environmental_vars']['criteria_sheet_meta']['input_list'],\n dictionary['tasks']['environmental_vars']['sheets_service'],\n True)\n scraper.scrape(dictionary['tasks']['environmental_vars'][\n 'input_list'], dictionary['tasks']['environmental_vars'][\n 'sheets_service'], dictionary['tasks']['environmental_vars'\n ]['drive_service'], dictionary['tasks'][\n 'environmental_vars']['output_folder_id'])\n if dictionary['tasks']['confirm_folder_structure']['run'] == True:\n dictionary['tasks']['confirm_folder_structure']['log'][\n 'folder_structure_confirmed'] = cfs.confirm_folder_structure(\n dictionary)\n if dictionary['tasks']['scrape_web_data']['run'] == True:\n dictionary['tasks']['scrape_web_data']['log']['cities'] = rw.file_list(\n dictionary['tasks']['environmental_vars']['directories']['cities'])\n df = dictionary['tasks']['environmental_vars']['dfs']['cities'\n ] = m.merge_zip_data(dictionary['tasks']['scrape_web_data'][\n 'log']['cities'])\n df_f.filter_state_data(df, 'ct')\n df['to_merge'] = dictionary['tasks']['environmental_vars'][\n 'directories']['to_merge']\n df['directory'] = df[['to_merge', 'state_name', 'city']].apply(lambda\n x: dictionary['tasks']['environmental_vars']['sep'].join(x), axis=1\n )\n scraper.scrape(df)\n if dictionary['tasks']['merge_data']['run'] == True:\n dictionary['tasks']['merge_data']['log']['files_to_merge'\n ] = rw.file_list_walk(dictionary['tasks']['environmental_vars']\n ['directories']['to_merge'])\n dictionary['tasks']['environmental_vars']['dfs']['master_merge'\n ] = m.merge_agent_data(dictionary['tasks']['merge_data']['log']\n ['files_to_merge'])\n rw.df_toCsv(dictionary['tasks'], dictionary['tasks'][\n 'environmental_vars']['file_names']['agent_data_raw'],\n dictionary['tasks']['environmental_vars']['dfs']['master_merge'\n ], dictionary['tasks']['environmental_vars']['directories'][\n 'merged_data'])\n rw.df_toJson(dictionary['tasks'], dictionary['tasks'][\n 'environmental_vars']['file_names']['agent_data_raw'],\n dictionary['tasks']['environmental_vars']['dfs']['master_merge'\n ], dictionary['tasks']['environmental_vars']['directories'][\n 'merged_data'])\n if dictionary['tasks']['filter_data']['run'] == True:\n print('filtering_data')\n dictionary['tasks']['filter_data']['log']['files_to_filter'\n ] = rw.file_list(dictionary['tasks']['environmental_vars'][\n 'directories']['merged_data'])\n dictionary['tasks']['filter_data']['log']['dnn_filter'] = rw.file_list(\n dictionary['tasks']['environmental_vars']['directories']['dnn'])\n df = dictionary['tasks']['environmental_vars']['dfs']['dnn'\n ] = m.merge_csv(dictionary['tasks']['filter_data']['log'][\n 'dnn_filter'])\n df['first_name'] = df['first_name'].str.lower()\n df['last_name'] = df['last_name'].str.lower()\n try:\n if dictionary['tasks']['environmental_vars']['dfs'][\n 'merged_agent_data'].empty:\n dictionary['tasks']['environmental_vars']['dfs'][\n 'merged_agent_data'] = m.merge_json(dictionary['tasks']\n ['filter_data']['log']['files_to_filter'])\n else:\n print('The Df already exists')\n pass\n except:\n print('The Df no exists')\n dictionary['tasks']['environmental_vars']['dfs'][\n 'merged_agent_data'] = m.merge_json(dictionary['tasks'][\n 'filter_data']['log']['files_to_filter'])\n df_f.clean_realtor_data(dictionary['tasks']['environmental_vars'][\n 'dfs']['merged_agent_data'])\n df_f.filter_realtor_data(dictionary['tasks']['environmental_vars'][\n 'dfs']['merged_agent_data'], df, 800000, 3)\n rw.df_toCsv(dictionary['tasks'], dictionary['tasks'][\n 'environmental_vars']['file_names']['agent_data_mapped'],\n dictionary['tasks']['environmental_vars']['dfs'][\n 'merged_agent_data'], dictionary['tasks']['environmental_vars']\n ['directories']['mapped_data'])\n rw.df_toJson(dictionary['tasks'], dictionary['tasks'][\n 'environmental_vars']['file_names']['agent_data_mapped'],\n dictionary['tasks']['environmental_vars']['dfs'][\n 'merged_agent_data'], dictionary['tasks']['environmental_vars']\n ['directories']['mapped_data'])\n",
"step-4": "#program_skeleton.py\n#import load_json_files as bm\n\nimport write\nimport merge as m\nimport load_df as ldf\nimport load_vars as lv\nimport log as log\nimport clean_df as clean\nimport download as dl\nimport gc\nimport confirm_drcts as cfs\nimport fix_files as ff\nimport readwrite as rw\nimport df_filter as df_f\nimport realtor_scraper_sheets_3 as scraper\nimport get_creds as creds\nimport goog_sheets as sheets\nfrom pprint import pprint\nimport google_drive as drive\nimport batch_download as download\nimport rew_scraper as rew_scraper\nimport rew_scraper3 as rew3\n\n\ndef program_skeleton(dictionary: dict):\n\n## Batch Merge creates a back_up of contacts from csv in batches no greater than 500 contacts per document. Can be expanded. Keeps files from getting to large\n \n \n if dictionary['tasks']['environmental_vars']['run'] == True:\n dictionary['tasks']['environmental_vars']['log']['environmental_vars_set'] = lv.set_environmental_vars(dictionary['tasks'])\n dictionary['tasks']['environmental_vars']['goog_creds'] = creds.get_creds()\n dictionary['tasks']['environmental_vars']['sheets_service'] = sheets.get_sheet_service(dictionary['tasks']['environmental_vars']['goog_creds'])\n dictionary['tasks']['environmental_vars']['drive_service'] = drive.get_drive_service(dictionary['tasks']['environmental_vars']['goog_creds'])\n dictionary['tasks']['environmental_vars']['criteria_sheet_meta'] = sheets.confirm_sheet_ids(dictionary['tasks']['environmental_vars']['criteria_sheet_ids'],dictionary['tasks']['environmental_vars']['sheets_service'])\n #dictionary['tasks']['environmental_vars']['output_sheet_meta'] = drive.add_spreadsheet_to_folder(dictionary['tasks']['environmental_vars']['drive_service'],dictionary['tasks']['environmental_vars']['output_folder_id'],dictionary['tasks']['environmental_vars']['date']['datetime'])\n \n #dictionary['tasks']['environmental_vars']['dfs']['cities_search'] = goog_sheets.\n #pprint(dictionary['tasks']['environmental_vars']['sheet_meta'])\n lv.batchify(dictionary['tasks']['environmental_vars']['criteria_sheet_meta'],dictionary['tasks']['environmental_vars']['batch_size'])\n dictionary['tasks']['environmental_vars']['dnn'] = sheets.batch_download(dictionary['tasks']['environmental_vars']['criteria_sheet_meta']['dnn'],dictionary['tasks']['environmental_vars']['sheets_service'],True)\n \n #sheets.batch_download(dictionary['tasks']['environmental_vars'])\n #print(dictionary['tasks']['environmental_vars']['directories']['log_directory'])\n #log.json_dump(dictionary['tasks'])\n #log.csv_dump(dictionary['tasks'])\n #print(dictionary)\n if dictionary['tasks']['scrape_web_data_rew']['run'] == True:\n #if dictionary['tasks']['scrape_web_data_sheets']['input_list']['run'] == True:\n #pprint(dictionary['tasks']['environmental_vars']['criteria_sheet_meta'])\n #input_df = sheets.batch_download(dictionary['tasks']['environmental_vars']['criteria_sheet_meta']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'],True)\n dictionary['tasks']['environmental_vars']['input_list'] = sheets.batch_download(dictionary['tasks']['environmental_vars']['criteria_sheet_meta']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'],True)\n #pprint(dictionary['tasks']['environmental_vars']['sheets_service'])\n rew3.initial(dictionary['tasks']['environmental_vars']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'])\n #rew_scraper.scrape(\"agents/areas/toronto-on\",dictionary['tasks']['environmental_vars']['sheets_service'],2,2)\n #print('true')\n\n if dictionary['tasks']['scrape_web_data_sheets']['run'] == True:\n if dictionary['tasks']['scrape_web_data_sheets']['input_list']['run'] == True:\n #pprint(dictionary['tasks']['environmental_vars']['criteria_sheet_meta'])\n #input_df = sheets.batch_download(dictionary['tasks']['environmental_vars']['criteria_sheet_meta']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'],True)\n dictionary['tasks']['environmental_vars']['input_list'] = sheets.batch_download(dictionary['tasks']['environmental_vars']['criteria_sheet_meta']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'],True)\n scraper.scrape(dictionary['tasks']['environmental_vars']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'],dictionary['tasks']['environmental_vars']['drive_service'],dictionary['tasks']['environmental_vars']['output_folder_id'])\n #print('true')\n \n\n\n #download.batch_download(dictionary['tasks']['environmental_vars'])\n \n\n\n\n if dictionary['tasks']['confirm_folder_structure']['run'] == True:\n dictionary['tasks']['confirm_folder_structure']['log']['folder_structure_confirmed'] = cfs.confirm_folder_structure(dictionary)\n #ff.fix_files(dictionary) # fix files if necessary. This is a fuck up on my end...\n \n if dictionary['tasks']['scrape_web_data']['run'] == True:\n dictionary['tasks']['scrape_web_data']['log']['cities'] = rw.file_list(dictionary['tasks']['environmental_vars']['directories']['cities'])\n df = dictionary['tasks']['environmental_vars']['dfs']['cities'] = m.merge_zip_data(dictionary['tasks']['scrape_web_data']['log']['cities'])\n df_f.filter_state_data(df,'ct')\n #dictionary['tasks']['environmental_vars']['dfs']['cities']['directory'] = df. apply dictionary['tasks']['environmental_vars']['sep'].join((dictionary['tasks']['environmental_vars']['directories']['to_merge'], dictionary['tasks']['environmental_vars']['dfs']['cities'].state_name,dictionary['tasks']['environmental_vars']['dfs']['cities'].city))\n df['to_merge'] = dictionary['tasks']['environmental_vars']['directories']['to_merge']\n df['directory'] = df[['to_merge','state_name', 'city']].apply(lambda x: dictionary['tasks']['environmental_vars']['sep'].join(x), axis=1)\n #df['period'] = df[['Year', 'quarter']].apply(lambda x: ''.join(x), axis=1)\n #print(dictionary['tasks']['environmental_vars']['dfs']['cities'].directory)\n scraper.scrape(df)\n\n\n #dictionary['tasks']['environmental_vars']['dfs'][''] = m.merge_zip_data(dictionary['tasks']['scrape_web_data']['log']['zip_codes'])\n #dictionary['tasks']['environmental_vars']['dfs']['zip_codes'] = rw.file_list(dictionary['tasks']['environmental_vars']['files']['zip_database'])\n\n if dictionary['tasks']['merge_data']['run'] == True:\n dictionary['tasks']['merge_data']['log']['files_to_merge'] = rw.file_list_walk(dictionary['tasks']['environmental_vars']['directories']['to_merge'])\n dictionary['tasks']['environmental_vars']['dfs']['master_merge'] = m.merge_agent_data(dictionary['tasks']['merge_data']['log']['files_to_merge'])\n #rw.df_toJson(dictionary['tasks'],dictionary['tasks']['environmental_vars']['file_names']['master_merge'],dictionary['tasks']['environmental_vars']['dfs']['master_merge'],dictionary['tasks']['environmental_vars']['directories']['merged_data'])\n rw.df_toCsv(dictionary['tasks'],dictionary['tasks']['environmental_vars']['file_names']['agent_data_raw'],dictionary['tasks']['environmental_vars']['dfs']['master_merge'],dictionary['tasks']['environmental_vars']['directories']['merged_data'])\n rw.df_toJson(dictionary['tasks'],dictionary['tasks']['environmental_vars']['file_names']['agent_data_raw'],dictionary['tasks']['environmental_vars']['dfs']['master_merge'],dictionary['tasks']['environmental_vars']['directories']['merged_data'])\n #print(dictionary['tasks']['environmental_vars']['dfs']['master_merge'])\n\n if dictionary['tasks']['filter_data']['run'] == True:\n print('filtering_data')\n dictionary['tasks']['filter_data']['log']['files_to_filter'] = rw.file_list(dictionary['tasks']['environmental_vars']['directories']['merged_data'])\n dictionary['tasks']['filter_data']['log']['dnn_filter'] = rw.file_list(dictionary['tasks']['environmental_vars']['directories']['dnn'])\n \n \n df = dictionary['tasks']['environmental_vars']['dfs']['dnn'] = m.merge_csv(dictionary['tasks']['filter_data']['log']['dnn_filter'])\n \n df[\"first_name\"] = df[\"first_name\"].str.lower()\n df[\"last_name\"] = df[\"last_name\"].str.lower()\n ## checks to see if the df is already in memory. If not the pass \n try:\n if dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'].empty:\n #if try succeeds and if is true then fill it anyways\n dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'] = m.merge_json(dictionary['tasks']['filter_data']['log']['files_to_filter'])\n \n else:\n #if alrady exists move on\n print('The Df already exists')\n pass\n #do something\n except:\n #if exception is raised then the df does not exist. Create it\n print('The Df no exists')\n dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'] = m.merge_json(dictionary['tasks']['filter_data']['log']['files_to_filter'])\n\n \n \n df_f.clean_realtor_data(dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'])\n df_f.filter_realtor_data(dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'],df,800000,3)\n\n rw.df_toCsv(dictionary['tasks'],dictionary['tasks']['environmental_vars']['file_names']['agent_data_mapped'],dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'],dictionary['tasks']['environmental_vars']['directories']['mapped_data'])\n rw.df_toJson(dictionary['tasks'],dictionary['tasks']['environmental_vars']['file_names']['agent_data_mapped'],dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'],dictionary['tasks']['environmental_vars']['directories']['mapped_data'])\n\n\n \n \n \n\n\n\n\n\n\n #if dictionary['tasks']['extract_agent_data']['run'] == True:\n # dictionary['tasks']['environmental_vars']['dfs']['agent_data'] = m.merge_agent_data(dictionary['tasks'])\n\n\n \n\n \n\n ",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
import csv
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from aec.apps.vocabulary.serializers import DictionarySerializer
from aec.apps.vocabulary.models import Word
from aec.apps.library.serializers import LibrarySerializer
from aec.apps.library.models import Library
class Command(BaseCommand):
args = ''
help = 'load vocabulary from csv_file'
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.input_options = None
def add_arguments(self, parser):
parser.add_argument(
'-p', '--print',
default=False,
action='store_true',
dest='print',
help='Print info.'
)
parser.add_argument(
'-f', '--file',
dest='file',
help='File for load to db.'
)
parser.add_argument(
'--level',
dest='level',
help='Level for data.'
)
parser.add_argument(
'--lesson',
dest='lesson',
help='Lesson for data.'
)
def print_info(self, template='', context=None):
if self.input_options['print']:
context = context or {}
print str(template).format(**context)
def handle(self, *args, **options):
self.input_options = options
if not options['level']:
raise CommandError("Option `--level=...` must be specified.")
if not options['lesson']:
raise CommandError("Option `--lesson=...` must be specified.")
if not options['file']:
raise CommandError("Option `--file=...` must be specified.")
file_path = os.path.join(settings.BASE_DIR,
'data/{f}'.format(f=options['file']))
if not os.path.isfile(file_path):
raise CommandError("File does not exist at the specified path.")
try:
library = Library.objects.get(level=options['level'],
lesson=options['lesson'])
except ObjectDoesNotExist:
library_serializer = LibrarySerializer(data=options)
if library_serializer.is_valid():
library_serializer.save()
library = Library.objects.get(pk=library_serializer.data['id'])
else:
raise CommandError(library_serializer.errors)
with open(file_path) as dict_file:
csv_data = csv.DictReader(dict_file)
for row in csv_data:
row['english'] = row['english'].lower()
self.print_info('***\n{english}', row)
try:
vocabulary = Word.objects.get(english=row['english'])
self.print_info('{english} - lexicon already exist', row)
vocabulary.library.add(library)
vocabulary.save()
except ObjectDoesNotExist:
row['translate'] = row['translate'].decode('utf-8')
row['library'] = [library.id, ]
vocabulary_serializer = DictionarySerializer(data=row)
if vocabulary_serializer.is_valid():
vocabulary_serializer.save()
else:
self.print_info('error - {error}', dict(
word=row['english'],
error=vocabulary_serializer.errors))
|
normal
|
{
"blob_id": "7d4d5ca14c3e1479059f77c6a7f8dcfad599443b",
"index": 4729,
"step-1": "import os\nimport csv\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom aec.apps.vocabulary.serializers import DictionarySerializer\nfrom aec.apps.vocabulary.models import Word\nfrom aec.apps.library.serializers import LibrarySerializer\nfrom aec.apps.library.models import Library\n\n\nclass Command(BaseCommand):\n args = ''\n help = 'load vocabulary from csv_file'\n\n def __init__(self, *args, **kwargs):\n super(Command, self).__init__(*args, **kwargs)\n self.input_options = None\n\n def add_arguments(self, parser):\n parser.add_argument(\n '-p', '--print',\n default=False,\n action='store_true',\n dest='print',\n help='Print info.'\n )\n parser.add_argument(\n '-f', '--file',\n dest='file',\n help='File for load to db.'\n )\n parser.add_argument(\n '--level',\n dest='level',\n help='Level for data.'\n )\n parser.add_argument(\n '--lesson',\n dest='lesson',\n help='Lesson for data.'\n )\n\n def print_info(self, template='', context=None):\n if self.input_options['print']:\n context = context or {}\n print str(template).format(**context)\n\n def handle(self, *args, **options):\n\n self.input_options = options\n\n if not options['level']:\n raise CommandError(\"Option `--level=...` must be specified.\")\n\n if not options['lesson']:\n raise CommandError(\"Option `--lesson=...` must be specified.\")\n\n if not options['file']:\n raise CommandError(\"Option `--file=...` must be specified.\")\n\n file_path = os.path.join(settings.BASE_DIR,\n 'data/{f}'.format(f=options['file']))\n\n if not os.path.isfile(file_path):\n raise CommandError(\"File does not exist at the specified path.\")\n\n try:\n library = Library.objects.get(level=options['level'],\n lesson=options['lesson'])\n except ObjectDoesNotExist:\n library_serializer = LibrarySerializer(data=options)\n if library_serializer.is_valid():\n library_serializer.save()\n library = Library.objects.get(pk=library_serializer.data['id'])\n else:\n raise CommandError(library_serializer.errors)\n\n with open(file_path) as dict_file:\n csv_data = csv.DictReader(dict_file)\n for row in csv_data:\n row['english'] = row['english'].lower()\n self.print_info('***\\n{english}', row)\n try:\n vocabulary = Word.objects.get(english=row['english'])\n self.print_info('{english} - lexicon already exist', row)\n vocabulary.library.add(library)\n vocabulary.save()\n except ObjectDoesNotExist:\n row['translate'] = row['translate'].decode('utf-8')\n row['library'] = [library.id, ]\n vocabulary_serializer = DictionarySerializer(data=row)\n if vocabulary_serializer.is_valid():\n vocabulary_serializer.save()\n else:\n self.print_info('error - {error}', dict(\n word=row['english'],\n error=vocabulary_serializer.errors))\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('proposal', '0016_project_callobjectives')]
operations = [migrations.AlterModelOptions(name='setting', options={
'ordering': ['group', 'name']}), migrations.AddField(model_name=
'setting', name='description', field=models.TextField(blank=True,
help_text='Explain what this setting does, where it is used.',
verbose_name='Description of this setting'))]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('proposal', '0016_project_callobjectives')]
operations = [migrations.AlterModelOptions(name='setting', options={
'ordering': ['group', 'name']}), migrations.AddField(model_name=
'setting', name='description', field=models.TextField(blank=True,
help_text='Explain what this setting does, where it is used.',
verbose_name='Description of this setting'))]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-03-09 14:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('proposal', '0016_project_callobjectives'),
]
operations = [
migrations.AlterModelOptions(
name='setting',
options={'ordering': ['group', 'name']},
),
migrations.AddField(
model_name='setting',
name='description',
field=models.TextField(blank=True, help_text='Explain what this setting does, where it is used.', verbose_name='Description of this setting'),
),
]
|
flexible
|
{
"blob_id": "d5c7b8966e73c607d1d1c5da9814ef507dc53b59",
"index": 6852,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('proposal', '0016_project_callobjectives')]\n operations = [migrations.AlterModelOptions(name='setting', options={\n 'ordering': ['group', 'name']}), migrations.AddField(model_name=\n 'setting', name='description', field=models.TextField(blank=True,\n help_text='Explain what this setting does, where it is used.',\n verbose_name='Description of this setting'))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('proposal', '0016_project_callobjectives')]\n operations = [migrations.AlterModelOptions(name='setting', options={\n 'ordering': ['group', 'name']}), migrations.AddField(model_name=\n 'setting', name='description', field=models.TextField(blank=True,\n help_text='Explain what this setting does, where it is used.',\n verbose_name='Description of this setting'))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.3 on 2017-03-09 14:28\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('proposal', '0016_project_callobjectives'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='setting',\n options={'ordering': ['group', 'name']},\n ),\n migrations.AddField(\n model_name='setting',\n name='description',\n field=models.TextField(blank=True, help_text='Explain what this setting does, where it is used.', verbose_name='Description of this setting'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class InvoicePositionViewSet(ModelViewSet):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class CountryListView(ListAPIView):
queryset = Country.objects.all()
serializer_class = CountrySerializer
filter_backends = [filters.SearchFilter]
search_fields = ['value']
permission_classes = IsAuthenticated,
class InvoiceViewSet(ModelViewSet):
queryset = Invoice.objects.all()
serializer_class = InvoiceSerializer
filter_backends = [filters.SearchFilter]
search_fields = ['address__contact__name']
permission_classes = IsAuthenticated,
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class InvoicePositionViewSet(ModelViewSet):
queryset = InvoicePosition.objects.all()
serializer_class = InvoicePositionSerializer
permission_classes = IsAuthenticated,
class CountryListView(ListAPIView):
queryset = Country.objects.all()
serializer_class = CountrySerializer
filter_backends = [filters.SearchFilter]
search_fields = ['value']
permission_classes = IsAuthenticated,
class InvoiceViewSet(ModelViewSet):
queryset = Invoice.objects.all()
serializer_class = InvoiceSerializer
filter_backends = [filters.SearchFilter]
search_fields = ['address__contact__name']
permission_classes = IsAuthenticated,
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ContactViewSet(ModelViewSet):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class AddressViewSet(ModelViewSet):
queryset = Address.objects.all()
serializer_class = AddressSerializer
permission_classes = IsAuthenticated,
class InvoicePositionViewSet(ModelViewSet):
queryset = InvoicePosition.objects.all()
serializer_class = InvoicePositionSerializer
permission_classes = IsAuthenticated,
class CountryListView(ListAPIView):
queryset = Country.objects.all()
serializer_class = CountrySerializer
filter_backends = [filters.SearchFilter]
search_fields = ['value']
permission_classes = IsAuthenticated,
class InvoiceViewSet(ModelViewSet):
queryset = Invoice.objects.all()
serializer_class = InvoiceSerializer
filter_backends = [filters.SearchFilter]
search_fields = ['address__contact__name']
permission_classes = IsAuthenticated,
<|reserved_special_token_1|>
from rest_framework import filters
from rest_framework.generics import ListAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.viewsets import ModelViewSet
from apis.models import Contact, Address, InvoicePosition, Country, Invoice
from apis.serializers import ContactSerializer, AddressSerializer, InvoicePositionSerializer, CountrySerializer, InvoiceSerializer
class ContactViewSet(ModelViewSet):
queryset = Contact.objects.all()
serializer_class = ContactSerializer
filterset_fields = ['type']
permission_classes = IsAuthenticated,
class AddressViewSet(ModelViewSet):
queryset = Address.objects.all()
serializer_class = AddressSerializer
permission_classes = IsAuthenticated,
class InvoicePositionViewSet(ModelViewSet):
queryset = InvoicePosition.objects.all()
serializer_class = InvoicePositionSerializer
permission_classes = IsAuthenticated,
class CountryListView(ListAPIView):
queryset = Country.objects.all()
serializer_class = CountrySerializer
filter_backends = [filters.SearchFilter]
search_fields = ['value']
permission_classes = IsAuthenticated,
class InvoiceViewSet(ModelViewSet):
queryset = Invoice.objects.all()
serializer_class = InvoiceSerializer
filter_backends = [filters.SearchFilter]
search_fields = ['address__contact__name']
permission_classes = IsAuthenticated,
<|reserved_special_token_1|>
from rest_framework import filters
from rest_framework.generics import ListAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.viewsets import ModelViewSet
from apis.models import Contact, Address, InvoicePosition, Country, Invoice
from apis.serializers import ContactSerializer, AddressSerializer, InvoicePositionSerializer, CountrySerializer, \
InvoiceSerializer
class ContactViewSet(ModelViewSet):
queryset = Contact.objects.all()
serializer_class = ContactSerializer
filterset_fields = ['type']
permission_classes = (IsAuthenticated,)
class AddressViewSet(ModelViewSet):
queryset = Address.objects.all()
serializer_class = AddressSerializer
permission_classes = (IsAuthenticated,)
class InvoicePositionViewSet(ModelViewSet):
queryset = InvoicePosition.objects.all()
serializer_class = InvoicePositionSerializer
permission_classes = (IsAuthenticated,)
class CountryListView(ListAPIView):
queryset = Country.objects.all()
serializer_class = CountrySerializer
filter_backends = [filters.SearchFilter]
search_fields = ['value']
permission_classes = (IsAuthenticated,)
class InvoiceViewSet(ModelViewSet):
queryset = Invoice.objects.all()
serializer_class = InvoiceSerializer
filter_backends = [filters.SearchFilter]
search_fields = ['address__contact__name']
permission_classes = (IsAuthenticated,)
|
flexible
|
{
"blob_id": "43bad38d209b5c326cb9f17ba1ae135d06320e97",
"index": 145,
"step-1": "<mask token>\n\n\nclass InvoicePositionViewSet(ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass CountryListView(ListAPIView):\n queryset = Country.objects.all()\n serializer_class = CountrySerializer\n filter_backends = [filters.SearchFilter]\n search_fields = ['value']\n permission_classes = IsAuthenticated,\n\n\nclass InvoiceViewSet(ModelViewSet):\n queryset = Invoice.objects.all()\n serializer_class = InvoiceSerializer\n filter_backends = [filters.SearchFilter]\n search_fields = ['address__contact__name']\n permission_classes = IsAuthenticated,\n",
"step-2": "<mask token>\n\n\nclass InvoicePositionViewSet(ModelViewSet):\n queryset = InvoicePosition.objects.all()\n serializer_class = InvoicePositionSerializer\n permission_classes = IsAuthenticated,\n\n\nclass CountryListView(ListAPIView):\n queryset = Country.objects.all()\n serializer_class = CountrySerializer\n filter_backends = [filters.SearchFilter]\n search_fields = ['value']\n permission_classes = IsAuthenticated,\n\n\nclass InvoiceViewSet(ModelViewSet):\n queryset = Invoice.objects.all()\n serializer_class = InvoiceSerializer\n filter_backends = [filters.SearchFilter]\n search_fields = ['address__contact__name']\n permission_classes = IsAuthenticated,\n",
"step-3": "<mask token>\n\n\nclass ContactViewSet(ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass AddressViewSet(ModelViewSet):\n queryset = Address.objects.all()\n serializer_class = AddressSerializer\n permission_classes = IsAuthenticated,\n\n\nclass InvoicePositionViewSet(ModelViewSet):\n queryset = InvoicePosition.objects.all()\n serializer_class = InvoicePositionSerializer\n permission_classes = IsAuthenticated,\n\n\nclass CountryListView(ListAPIView):\n queryset = Country.objects.all()\n serializer_class = CountrySerializer\n filter_backends = [filters.SearchFilter]\n search_fields = ['value']\n permission_classes = IsAuthenticated,\n\n\nclass InvoiceViewSet(ModelViewSet):\n queryset = Invoice.objects.all()\n serializer_class = InvoiceSerializer\n filter_backends = [filters.SearchFilter]\n search_fields = ['address__contact__name']\n permission_classes = IsAuthenticated,\n",
"step-4": "from rest_framework import filters\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.viewsets import ModelViewSet\nfrom apis.models import Contact, Address, InvoicePosition, Country, Invoice\nfrom apis.serializers import ContactSerializer, AddressSerializer, InvoicePositionSerializer, CountrySerializer, InvoiceSerializer\n\n\nclass ContactViewSet(ModelViewSet):\n queryset = Contact.objects.all()\n serializer_class = ContactSerializer\n filterset_fields = ['type']\n permission_classes = IsAuthenticated,\n\n\nclass AddressViewSet(ModelViewSet):\n queryset = Address.objects.all()\n serializer_class = AddressSerializer\n permission_classes = IsAuthenticated,\n\n\nclass InvoicePositionViewSet(ModelViewSet):\n queryset = InvoicePosition.objects.all()\n serializer_class = InvoicePositionSerializer\n permission_classes = IsAuthenticated,\n\n\nclass CountryListView(ListAPIView):\n queryset = Country.objects.all()\n serializer_class = CountrySerializer\n filter_backends = [filters.SearchFilter]\n search_fields = ['value']\n permission_classes = IsAuthenticated,\n\n\nclass InvoiceViewSet(ModelViewSet):\n queryset = Invoice.objects.all()\n serializer_class = InvoiceSerializer\n filter_backends = [filters.SearchFilter]\n search_fields = ['address__contact__name']\n permission_classes = IsAuthenticated,\n",
"step-5": "from rest_framework import filters\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom apis.models import Contact, Address, InvoicePosition, Country, Invoice\nfrom apis.serializers import ContactSerializer, AddressSerializer, InvoicePositionSerializer, CountrySerializer, \\\n InvoiceSerializer\n\n\nclass ContactViewSet(ModelViewSet):\n queryset = Contact.objects.all()\n serializer_class = ContactSerializer\n filterset_fields = ['type']\n permission_classes = (IsAuthenticated,)\n\n\nclass AddressViewSet(ModelViewSet):\n queryset = Address.objects.all()\n serializer_class = AddressSerializer\n permission_classes = (IsAuthenticated,)\n\n\nclass InvoicePositionViewSet(ModelViewSet):\n queryset = InvoicePosition.objects.all()\n serializer_class = InvoicePositionSerializer\n permission_classes = (IsAuthenticated,)\n\n\nclass CountryListView(ListAPIView):\n queryset = Country.objects.all()\n serializer_class = CountrySerializer\n filter_backends = [filters.SearchFilter]\n search_fields = ['value']\n permission_classes = (IsAuthenticated,)\n\n\nclass InvoiceViewSet(ModelViewSet):\n queryset = Invoice.objects.all()\n serializer_class = InvoiceSerializer\n filter_backends = [filters.SearchFilter]\n search_fields = ['address__contact__name']\n permission_classes = (IsAuthenticated,)\n",
"step-ids": [
5,
6,
9,
11,
12
]
}
|
[
5,
6,
9,
11,
12
] |
from torchsummary import summary
import torch
import torch.nn as nn
import torch.nn.functional as F
from eva4modeltrainer import ModelTrainer
class Net(nn.Module):
"""
Base network that defines helper functions, summary and mapping to device
"""
def conv2d(self, in_channels, out_channels, kernel_size=(3,3), dilation=1, groups=1, padding=1, bias=False, padding_mode="zeros"):
return [nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, groups=groups, dilation=dilation, padding=padding, bias=bias, padding_mode=padding_mode)]
def separable_conv2d(self, in_channels, out_channels, kernel_size=(3,3), dilation=1, padding=1, bias=False, padding_mode="zeros"):
return [nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size, groups=in_channels, dilation=dilation, padding=padding, bias=bias, padding_mode=padding_mode),
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1,1), bias=bias)]
def activate(self, l, out_channels, bn=True, dropout=0, relu=True,max_pooling=0):
if(max_pooling>0):
l.append(nn.MaxPool2d(2,2))
if bn:
l.append(nn.BatchNorm2d(out_channels))
if dropout>0:
l.append(nn.Dropout(dropout))
if relu:
l.append(nn.ReLU())
return nn.Sequential(*l)
def create_conv2d(self, in_channels, out_channels, kernel_size=(3,3), dilation=1, groups=1, padding=1, bias=False, bn=True, dropout=0, relu=True, padding_mode="zeros",max_pooling=0):
return self.activate(self.conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, groups=groups, dilation=dilation, padding=padding, bias=bias, padding_mode=padding_mode), out_channels, bn, dropout, relu,max_pooling)
def create_depthwise_conv2d(self, in_channels, out_channels, kernel_size=(3,3), dilation=1, padding=1, bias=False, bn=True, dropout=0, relu=True, padding_mode="zeros"):
return self.activate(self.separable_conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, dilation=dilation, padding=padding, bias=bias, padding_mode=padding_mode),
out_channels, bn, dropout, relu)
def __init__(self, name="Model"):
super(Net, self).__init__()
self.trainer = None
self.name = name
def summary(self, input_size): #input_size=(1, 28, 28)
summary(self, input_size=input_size)
def gotrain(self, optimizer, train_loader, test_loader, epochs, statspath, scheduler=None, batch_scheduler=False, L1lambda=0):
self.trainer = ModelTrainer(self, optimizer, train_loader, test_loader, statspath, scheduler, batch_scheduler, L1lambda)
self.trainer.run(epochs)
def stats(self):
return self.trainer.stats if self.trainer else None
#implementation of the new resnet model
class newResnetS11(Net):
def __init__(self,name="Model",dropout_value=0):
super(newResnetS11,self).__init__(name)
self.prepLayer=self.create_conv2d(3, 64, dropout=dropout_value)
#layer1
self.layer1Conv1=self.create_conv2d(64,128, dropout=dropout_value,max_pooling=1)
self.layer1resnetBlock1=self.resnetBlock(128,128)
#layer2
self.layer2Conv1=self.create_conv2d(128,256, dropout=dropout_value,max_pooling=1)
#layer3
self.layer3Conv1=self.create_conv2d(256,512, dropout=dropout_value,max_pooling=1)
self.layer3resnetBlock1=self.resnetBlock(512,512)
#ending layer or layer-4
self.maxpool=nn.MaxPool2d(4,1)
self.fc_layer=self.create_conv2d(512, 10, kernel_size=(1,1), padding=0, bn=False, relu=False)
def resnetBlock(self,in_channels, out_channels):
l=[]
l.append(nn.Conv2d(in_channels,out_channels,(3,3),padding=1,bias=False))
l.append(nn.BatchNorm2d(out_channels))
l.append(nn.ReLU())
l.append(nn.Conv2d(in_channels,out_channels,(3,3),padding=1,bias=False))
l.append(nn.BatchNorm2d(out_channels))
l.append(nn.ReLU())
return nn.Sequential(*l)
def forward(self,x):
#prepLayer
x=self.prepLayer(x)
#Layer1
x=self.layer1Conv1(x)
r1=self.layer1resnetBlock1(x)
x=torch.add(x,r1)
#layer2
x=self.layer2Conv1(x)
#layer3
x=self.layer3Conv1(x)
r2=self.layer3resnetBlock1(x)
x=torch.add(x,r2)
#layer4 or ending layer
x=self.maxpool(x)
x=self.fc_layer(x)
x=x.view(-1,10)
return F.log_softmax(x,dim=-1)
|
normal
|
{
"blob_id": "f925b3b2f55c3f8daf57438d8d20b60446ae39af",
"index": 6111,
"step-1": "<mask token>\n\n\nclass Net(nn.Module):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def create_depthwise_conv2d(self, in_channels, out_channels,\n kernel_size=(3, 3), dilation=1, padding=1, bias=False, bn=True,\n dropout=0, relu=True, padding_mode='zeros'):\n return self.activate(self.separable_conv2d(in_channels=in_channels,\n out_channels=out_channels, kernel_size=kernel_size, dilation=\n dilation, padding=padding, bias=bias, padding_mode=padding_mode\n ), out_channels, bn, dropout, relu)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass newResnetS11(Net):\n\n def __init__(self, name='Model', dropout_value=0):\n super(newResnetS11, self).__init__(name)\n self.prepLayer = self.create_conv2d(3, 64, dropout=dropout_value)\n self.layer1Conv1 = self.create_conv2d(64, 128, dropout=\n dropout_value, max_pooling=1)\n self.layer1resnetBlock1 = self.resnetBlock(128, 128)\n self.layer2Conv1 = self.create_conv2d(128, 256, dropout=\n dropout_value, max_pooling=1)\n self.layer3Conv1 = self.create_conv2d(256, 512, dropout=\n dropout_value, max_pooling=1)\n self.layer3resnetBlock1 = self.resnetBlock(512, 512)\n self.maxpool = nn.MaxPool2d(4, 1)\n self.fc_layer = self.create_conv2d(512, 10, kernel_size=(1, 1),\n padding=0, bn=False, relu=False)\n\n def resnetBlock(self, in_channels, out_channels):\n l = []\n l.append(nn.Conv2d(in_channels, out_channels, (3, 3), padding=1,\n bias=False))\n l.append(nn.BatchNorm2d(out_channels))\n l.append(nn.ReLU())\n l.append(nn.Conv2d(in_channels, out_channels, (3, 3), padding=1,\n bias=False))\n l.append(nn.BatchNorm2d(out_channels))\n l.append(nn.ReLU())\n return nn.Sequential(*l)\n\n def forward(self, x):\n x = self.prepLayer(x)\n x = self.layer1Conv1(x)\n r1 = self.layer1resnetBlock1(x)\n x = torch.add(x, r1)\n x = self.layer2Conv1(x)\n x = self.layer3Conv1(x)\n r2 = self.layer3resnetBlock1(x)\n x = torch.add(x, r2)\n x = self.maxpool(x)\n x = self.fc_layer(x)\n x = x.view(-1, 10)\n return F.log_softmax(x, dim=-1)\n",
"step-2": "<mask token>\n\n\nclass Net(nn.Module):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def create_depthwise_conv2d(self, in_channels, out_channels,\n kernel_size=(3, 3), dilation=1, padding=1, bias=False, bn=True,\n dropout=0, relu=True, padding_mode='zeros'):\n return self.activate(self.separable_conv2d(in_channels=in_channels,\n out_channels=out_channels, kernel_size=kernel_size, dilation=\n dilation, padding=padding, bias=bias, padding_mode=padding_mode\n ), out_channels, bn, dropout, relu)\n <mask token>\n <mask token>\n\n def gotrain(self, optimizer, train_loader, test_loader, epochs,\n statspath, scheduler=None, batch_scheduler=False, L1lambda=0):\n self.trainer = ModelTrainer(self, optimizer, train_loader,\n test_loader, statspath, scheduler, batch_scheduler, L1lambda)\n self.trainer.run(epochs)\n <mask token>\n\n\nclass newResnetS11(Net):\n\n def __init__(self, name='Model', dropout_value=0):\n super(newResnetS11, self).__init__(name)\n self.prepLayer = self.create_conv2d(3, 64, dropout=dropout_value)\n self.layer1Conv1 = self.create_conv2d(64, 128, dropout=\n dropout_value, max_pooling=1)\n self.layer1resnetBlock1 = self.resnetBlock(128, 128)\n self.layer2Conv1 = self.create_conv2d(128, 256, dropout=\n dropout_value, max_pooling=1)\n self.layer3Conv1 = self.create_conv2d(256, 512, dropout=\n dropout_value, max_pooling=1)\n self.layer3resnetBlock1 = self.resnetBlock(512, 512)\n self.maxpool = nn.MaxPool2d(4, 1)\n self.fc_layer = self.create_conv2d(512, 10, kernel_size=(1, 1),\n padding=0, bn=False, relu=False)\n\n def resnetBlock(self, in_channels, out_channels):\n l = []\n l.append(nn.Conv2d(in_channels, out_channels, (3, 3), padding=1,\n bias=False))\n l.append(nn.BatchNorm2d(out_channels))\n l.append(nn.ReLU())\n l.append(nn.Conv2d(in_channels, out_channels, (3, 3), padding=1,\n bias=False))\n l.append(nn.BatchNorm2d(out_channels))\n l.append(nn.ReLU())\n return nn.Sequential(*l)\n\n def forward(self, x):\n x = self.prepLayer(x)\n x = self.layer1Conv1(x)\n r1 = self.layer1resnetBlock1(x)\n x = torch.add(x, r1)\n x = self.layer2Conv1(x)\n x = self.layer3Conv1(x)\n r2 = self.layer3resnetBlock1(x)\n x = torch.add(x, r2)\n x = self.maxpool(x)\n x = self.fc_layer(x)\n x = x.view(-1, 10)\n return F.log_softmax(x, dim=-1)\n",
"step-3": "<mask token>\n\n\nclass Net(nn.Module):\n <mask token>\n\n def conv2d(self, in_channels, out_channels, kernel_size=(3, 3),\n dilation=1, groups=1, padding=1, bias=False, padding_mode='zeros'):\n return [nn.Conv2d(in_channels=in_channels, out_channels=\n out_channels, kernel_size=kernel_size, groups=groups, dilation=\n dilation, padding=padding, bias=bias, padding_mode=padding_mode)]\n <mask token>\n\n def activate(self, l, out_channels, bn=True, dropout=0, relu=True,\n max_pooling=0):\n if max_pooling > 0:\n l.append(nn.MaxPool2d(2, 2))\n if bn:\n l.append(nn.BatchNorm2d(out_channels))\n if dropout > 0:\n l.append(nn.Dropout(dropout))\n if relu:\n l.append(nn.ReLU())\n return nn.Sequential(*l)\n <mask token>\n\n def create_depthwise_conv2d(self, in_channels, out_channels,\n kernel_size=(3, 3), dilation=1, padding=1, bias=False, bn=True,\n dropout=0, relu=True, padding_mode='zeros'):\n return self.activate(self.separable_conv2d(in_channels=in_channels,\n out_channels=out_channels, kernel_size=kernel_size, dilation=\n dilation, padding=padding, bias=bias, padding_mode=padding_mode\n ), out_channels, bn, dropout, relu)\n\n def __init__(self, name='Model'):\n super(Net, self).__init__()\n self.trainer = None\n self.name = name\n\n def summary(self, input_size):\n summary(self, input_size=input_size)\n\n def gotrain(self, optimizer, train_loader, test_loader, epochs,\n statspath, scheduler=None, batch_scheduler=False, L1lambda=0):\n self.trainer = ModelTrainer(self, optimizer, train_loader,\n test_loader, statspath, scheduler, batch_scheduler, L1lambda)\n self.trainer.run(epochs)\n <mask token>\n\n\nclass newResnetS11(Net):\n\n def __init__(self, name='Model', dropout_value=0):\n super(newResnetS11, self).__init__(name)\n self.prepLayer = self.create_conv2d(3, 64, dropout=dropout_value)\n self.layer1Conv1 = self.create_conv2d(64, 128, dropout=\n dropout_value, max_pooling=1)\n self.layer1resnetBlock1 = self.resnetBlock(128, 128)\n self.layer2Conv1 = self.create_conv2d(128, 256, dropout=\n dropout_value, max_pooling=1)\n self.layer3Conv1 = self.create_conv2d(256, 512, dropout=\n dropout_value, max_pooling=1)\n self.layer3resnetBlock1 = self.resnetBlock(512, 512)\n self.maxpool = nn.MaxPool2d(4, 1)\n self.fc_layer = self.create_conv2d(512, 10, kernel_size=(1, 1),\n padding=0, bn=False, relu=False)\n\n def resnetBlock(self, in_channels, out_channels):\n l = []\n l.append(nn.Conv2d(in_channels, out_channels, (3, 3), padding=1,\n bias=False))\n l.append(nn.BatchNorm2d(out_channels))\n l.append(nn.ReLU())\n l.append(nn.Conv2d(in_channels, out_channels, (3, 3), padding=1,\n bias=False))\n l.append(nn.BatchNorm2d(out_channels))\n l.append(nn.ReLU())\n return nn.Sequential(*l)\n\n def forward(self, x):\n x = self.prepLayer(x)\n x = self.layer1Conv1(x)\n r1 = self.layer1resnetBlock1(x)\n x = torch.add(x, r1)\n x = self.layer2Conv1(x)\n x = self.layer3Conv1(x)\n r2 = self.layer3resnetBlock1(x)\n x = torch.add(x, r2)\n x = self.maxpool(x)\n x = self.fc_layer(x)\n x = x.view(-1, 10)\n return F.log_softmax(x, dim=-1)\n",
"step-4": "<mask token>\n\n\nclass Net(nn.Module):\n <mask token>\n\n def conv2d(self, in_channels, out_channels, kernel_size=(3, 3),\n dilation=1, groups=1, padding=1, bias=False, padding_mode='zeros'):\n return [nn.Conv2d(in_channels=in_channels, out_channels=\n out_channels, kernel_size=kernel_size, groups=groups, dilation=\n dilation, padding=padding, bias=bias, padding_mode=padding_mode)]\n\n def separable_conv2d(self, in_channels, out_channels, kernel_size=(3, 3\n ), dilation=1, padding=1, bias=False, padding_mode='zeros'):\n return [nn.Conv2d(in_channels=in_channels, out_channels=in_channels,\n kernel_size=kernel_size, groups=in_channels, dilation=dilation,\n padding=padding, bias=bias, padding_mode=padding_mode), nn.\n Conv2d(in_channels=in_channels, out_channels=out_channels,\n kernel_size=(1, 1), bias=bias)]\n\n def activate(self, l, out_channels, bn=True, dropout=0, relu=True,\n max_pooling=0):\n if max_pooling > 0:\n l.append(nn.MaxPool2d(2, 2))\n if bn:\n l.append(nn.BatchNorm2d(out_channels))\n if dropout > 0:\n l.append(nn.Dropout(dropout))\n if relu:\n l.append(nn.ReLU())\n return nn.Sequential(*l)\n\n def create_conv2d(self, in_channels, out_channels, kernel_size=(3, 3),\n dilation=1, groups=1, padding=1, bias=False, bn=True, dropout=0,\n relu=True, padding_mode='zeros', max_pooling=0):\n return self.activate(self.conv2d(in_channels=in_channels,\n out_channels=out_channels, kernel_size=kernel_size, groups=\n groups, dilation=dilation, padding=padding, bias=bias,\n padding_mode=padding_mode), out_channels, bn, dropout, relu,\n max_pooling)\n\n def create_depthwise_conv2d(self, in_channels, out_channels,\n kernel_size=(3, 3), dilation=1, padding=1, bias=False, bn=True,\n dropout=0, relu=True, padding_mode='zeros'):\n return self.activate(self.separable_conv2d(in_channels=in_channels,\n out_channels=out_channels, kernel_size=kernel_size, dilation=\n dilation, padding=padding, bias=bias, padding_mode=padding_mode\n ), out_channels, bn, dropout, relu)\n\n def __init__(self, name='Model'):\n super(Net, self).__init__()\n self.trainer = None\n self.name = name\n\n def summary(self, input_size):\n summary(self, input_size=input_size)\n\n def gotrain(self, optimizer, train_loader, test_loader, epochs,\n statspath, scheduler=None, batch_scheduler=False, L1lambda=0):\n self.trainer = ModelTrainer(self, optimizer, train_loader,\n test_loader, statspath, scheduler, batch_scheduler, L1lambda)\n self.trainer.run(epochs)\n\n def stats(self):\n return self.trainer.stats if self.trainer else None\n\n\nclass newResnetS11(Net):\n\n def __init__(self, name='Model', dropout_value=0):\n super(newResnetS11, self).__init__(name)\n self.prepLayer = self.create_conv2d(3, 64, dropout=dropout_value)\n self.layer1Conv1 = self.create_conv2d(64, 128, dropout=\n dropout_value, max_pooling=1)\n self.layer1resnetBlock1 = self.resnetBlock(128, 128)\n self.layer2Conv1 = self.create_conv2d(128, 256, dropout=\n dropout_value, max_pooling=1)\n self.layer3Conv1 = self.create_conv2d(256, 512, dropout=\n dropout_value, max_pooling=1)\n self.layer3resnetBlock1 = self.resnetBlock(512, 512)\n self.maxpool = nn.MaxPool2d(4, 1)\n self.fc_layer = self.create_conv2d(512, 10, kernel_size=(1, 1),\n padding=0, bn=False, relu=False)\n\n def resnetBlock(self, in_channels, out_channels):\n l = []\n l.append(nn.Conv2d(in_channels, out_channels, (3, 3), padding=1,\n bias=False))\n l.append(nn.BatchNorm2d(out_channels))\n l.append(nn.ReLU())\n l.append(nn.Conv2d(in_channels, out_channels, (3, 3), padding=1,\n bias=False))\n l.append(nn.BatchNorm2d(out_channels))\n l.append(nn.ReLU())\n return nn.Sequential(*l)\n\n def forward(self, x):\n x = self.prepLayer(x)\n x = self.layer1Conv1(x)\n r1 = self.layer1resnetBlock1(x)\n x = torch.add(x, r1)\n x = self.layer2Conv1(x)\n x = self.layer3Conv1(x)\n r2 = self.layer3resnetBlock1(x)\n x = torch.add(x, r2)\n x = self.maxpool(x)\n x = self.fc_layer(x)\n x = x.view(-1, 10)\n return F.log_softmax(x, dim=-1)\n",
"step-5": "from torchsummary import summary\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom eva4modeltrainer import ModelTrainer\r\n\r\nclass Net(nn.Module):\r\n \"\"\"\r\n Base network that defines helper functions, summary and mapping to device\r\n \"\"\"\r\n def conv2d(self, in_channels, out_channels, kernel_size=(3,3), dilation=1, groups=1, padding=1, bias=False, padding_mode=\"zeros\"):\r\n return [nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, groups=groups, dilation=dilation, padding=padding, bias=bias, padding_mode=padding_mode)]\r\n\r\n def separable_conv2d(self, in_channels, out_channels, kernel_size=(3,3), dilation=1, padding=1, bias=False, padding_mode=\"zeros\"):\r\n return [nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size, groups=in_channels, dilation=dilation, padding=padding, bias=bias, padding_mode=padding_mode),\r\n nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1,1), bias=bias)]\r\n\r\n def activate(self, l, out_channels, bn=True, dropout=0, relu=True,max_pooling=0):\r\n if(max_pooling>0):\r\n l.append(nn.MaxPool2d(2,2))\r\n if bn:\r\n l.append(nn.BatchNorm2d(out_channels))\r\n if dropout>0:\r\n l.append(nn.Dropout(dropout))\r\n if relu:\r\n l.append(nn.ReLU())\r\n\r\n return nn.Sequential(*l)\r\n\r\n def create_conv2d(self, in_channels, out_channels, kernel_size=(3,3), dilation=1, groups=1, padding=1, bias=False, bn=True, dropout=0, relu=True, padding_mode=\"zeros\",max_pooling=0):\r\n return self.activate(self.conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, groups=groups, dilation=dilation, padding=padding, bias=bias, padding_mode=padding_mode), out_channels, bn, dropout, relu,max_pooling)\r\n\r\n def create_depthwise_conv2d(self, in_channels, out_channels, kernel_size=(3,3), dilation=1, padding=1, bias=False, bn=True, dropout=0, relu=True, padding_mode=\"zeros\"):\r\n return self.activate(self.separable_conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, dilation=dilation, padding=padding, bias=bias, padding_mode=padding_mode),\r\n out_channels, bn, dropout, relu)\r\n\r\n def __init__(self, name=\"Model\"):\r\n super(Net, self).__init__()\r\n self.trainer = None\r\n self.name = name\r\n\r\n def summary(self, input_size): #input_size=(1, 28, 28)\r\n summary(self, input_size=input_size)\r\n\r\n def gotrain(self, optimizer, train_loader, test_loader, epochs, statspath, scheduler=None, batch_scheduler=False, L1lambda=0):\r\n self.trainer = ModelTrainer(self, optimizer, train_loader, test_loader, statspath, scheduler, batch_scheduler, L1lambda)\r\n self.trainer.run(epochs)\r\n\r\n def stats(self):\r\n return self.trainer.stats if self.trainer else None\r\n\r\n\r\n\r\n#implementation of the new resnet model\r\nclass newResnetS11(Net):\r\n def __init__(self,name=\"Model\",dropout_value=0):\r\n super(newResnetS11,self).__init__(name)\r\n self.prepLayer=self.create_conv2d(3, 64, dropout=dropout_value)\r\n #layer1\r\n self.layer1Conv1=self.create_conv2d(64,128, dropout=dropout_value,max_pooling=1)\r\n self.layer1resnetBlock1=self.resnetBlock(128,128)\r\n #layer2\r\n self.layer2Conv1=self.create_conv2d(128,256, dropout=dropout_value,max_pooling=1)\r\n #layer3\r\n self.layer3Conv1=self.create_conv2d(256,512, dropout=dropout_value,max_pooling=1)\r\n self.layer3resnetBlock1=self.resnetBlock(512,512)\r\n #ending layer or layer-4\r\n self.maxpool=nn.MaxPool2d(4,1)\r\n self.fc_layer=self.create_conv2d(512, 10, kernel_size=(1,1), padding=0, bn=False, relu=False)\r\n def resnetBlock(self,in_channels, out_channels):\r\n l=[]\r\n l.append(nn.Conv2d(in_channels,out_channels,(3,3),padding=1,bias=False))\r\n l.append(nn.BatchNorm2d(out_channels))\r\n l.append(nn.ReLU())\r\n l.append(nn.Conv2d(in_channels,out_channels,(3,3),padding=1,bias=False))\r\n l.append(nn.BatchNorm2d(out_channels))\r\n l.append(nn.ReLU())\r\n return nn.Sequential(*l)\r\n\r\n def forward(self,x):\r\n #prepLayer\r\n x=self.prepLayer(x)\r\n #Layer1\r\n x=self.layer1Conv1(x)\r\n r1=self.layer1resnetBlock1(x)\r\n x=torch.add(x,r1)\r\n #layer2\r\n x=self.layer2Conv1(x)\r\n #layer3\r\n x=self.layer3Conv1(x)\r\n r2=self.layer3resnetBlock1(x)\r\n x=torch.add(x,r2)\r\n #layer4 or ending layer\r\n x=self.maxpool(x)\r\n x=self.fc_layer(x)\r\n x=x.view(-1,10)\r\n return F.log_softmax(x,dim=-1)\r\n",
"step-ids": [
6,
7,
11,
14,
17
]
}
|
[
6,
7,
11,
14,
17
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def run(*args, **kwargs):
text = ' '.join(map(str, args))
espeak.synth(text)
<|reserved_special_token_1|>
try:
from espeak import espeak
except ImportError:
class espeak:
@classmethod
def synth(*args):
print(
'Cannot generate speech. Please, install python3-espeak module.'
)
return 1
def run(*args, **kwargs):
text = ' '.join(map(str, args))
espeak.synth(text)
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
try:
from espeak import espeak
except ImportError:
class espeak():
@classmethod
def synth(*args):
print('Cannot generate speech. Please, install python3-espeak module.')
return 1
def run(*args, **kwargs):
text = ' '.join(map(str, args))
espeak.synth(text)
|
flexible
|
{
"blob_id": "cd5929496b13dd0d5f5ca97500c5bb3572907cc5",
"index": 2769,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef run(*args, **kwargs):\n text = ' '.join(map(str, args))\n espeak.synth(text)\n",
"step-3": "try:\n from espeak import espeak\nexcept ImportError:\n\n\n class espeak:\n\n @classmethod\n def synth(*args):\n print(\n 'Cannot generate speech. Please, install python3-espeak module.'\n )\n return 1\n\n\ndef run(*args, **kwargs):\n text = ' '.join(map(str, args))\n espeak.synth(text)\n",
"step-4": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\ntry:\n from espeak import espeak\nexcept ImportError:\n class espeak():\n @classmethod\n def synth(*args):\n print('Cannot generate speech. Please, install python3-espeak module.')\n return 1\n\n\ndef run(*args, **kwargs):\n text = ' '.join(map(str, args))\n espeak.synth(text)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestMixin(TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestMixin(TestCase):
@classmethod
def setUpClass(cls):
cls.base_dir = os.path.dirname(os.path.abspath(__file__))
cls.fixtures_dir = os.path.join(cls.base_dir, 'fixtures')
cls.bam_10xv2_path = os.path.join(cls.fixtures_dir, '10xv2.bam')
cls.fastq_10xv2_paths = [os.path.join(cls.fixtures_dir,
'10xv2_1.fastq.gz'), os.path.join(cls.fixtures_dir,
'10xv2_2.fastq.gz')]
<|reserved_special_token_1|>
import os
from unittest import TestCase
class TestMixin(TestCase):
@classmethod
def setUpClass(cls):
cls.base_dir = os.path.dirname(os.path.abspath(__file__))
cls.fixtures_dir = os.path.join(cls.base_dir, 'fixtures')
cls.bam_10xv2_path = os.path.join(cls.fixtures_dir, '10xv2.bam')
cls.fastq_10xv2_paths = [os.path.join(cls.fixtures_dir,
'10xv2_1.fastq.gz'), os.path.join(cls.fixtures_dir,
'10xv2_2.fastq.gz')]
|
flexible
|
{
"blob_id": "268a8252f74a2bdafdadae488f98997c91f5607c",
"index": 2686,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestMixin(TestCase):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestMixin(TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.base_dir = os.path.dirname(os.path.abspath(__file__))\n cls.fixtures_dir = os.path.join(cls.base_dir, 'fixtures')\n cls.bam_10xv2_path = os.path.join(cls.fixtures_dir, '10xv2.bam')\n cls.fastq_10xv2_paths = [os.path.join(cls.fixtures_dir,\n '10xv2_1.fastq.gz'), os.path.join(cls.fixtures_dir,\n '10xv2_2.fastq.gz')]\n",
"step-4": "import os\nfrom unittest import TestCase\n\n\nclass TestMixin(TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.base_dir = os.path.dirname(os.path.abspath(__file__))\n cls.fixtures_dir = os.path.join(cls.base_dir, 'fixtures')\n cls.bam_10xv2_path = os.path.join(cls.fixtures_dir, '10xv2.bam')\n cls.fastq_10xv2_paths = [os.path.join(cls.fixtures_dir,\n '10xv2_1.fastq.gz'), os.path.join(cls.fixtures_dir,\n '10xv2_2.fastq.gz')]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
@override_settings(USE_TZ=False)
class TestEmailUserManager(TestCase):
def setUp(self):
self.email = '[email protected]'
self.password = 'default'
def test_private_create_user_without_email(self):
"""
Test that EmailUser.objects._create_user without email raise an
ValueError exception
"""
with pytest.raises(ValueError) as exinfo:
EmailUser.objects._create_user(None, None, False, False)
self.assertIn('email must be set', str(exinfo.value))
@patch.object(timezone, 'now', return_value=fake_now)
def test_private_create_user_its_ok(self, mock_now):
user = EmailUser.objects._create_user(self.email, self.password,
True, False)
self.assertTrue(isinstance(user, EmailUser))
self.assertIsNotNone(user.pk)
self.assertEqual(user.email, self.email)
self.assertEqual(user.date_joined, fake_now)
self.assertEqual(user.last_login, fake_now)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_active)
self.assertFalse(user.is_superuser)
self.assertTrue(user.check_password(self.password))
<|reserved_special_token_0|>
@patch.object(EmailUserManager, '_create_user')
def test_create_user_call_private_create_user_without_staff(self,
mock_create_user):
EmailUser.objects.create_user(self.email, self.password)
mock_create_user.assert_called_once_with(self.email, self.password,
False, False)
@patch.object(EmailUserManager, '_create_user')
def test_create_user_call_private_create_user_with_staff(self,
mock_create_user):
EmailUser.objects.create_user(self.email, self.password, True)
mock_create_user.assert_called_once_with(self.email, self.password,
True, False)
@patch.object(EmailUserManager, '_create_user')
def test_create_superuser_call_private_create_user(self, mock_create_user):
EmailUser.objects.create_superuser(self.email, self.password)
mock_create_user.assert_called_once_with(self.email, self.password,
True, True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
from unittest.mock import patch
except ImportError:
from mock import patch
<|reserved_special_token_0|>
try:
from django.test import override_settings
except ImportError:
from django.test.utils import override_settings
<|reserved_special_token_0|>
@override_settings(USE_TZ=False)
class TestEmailUserManager(TestCase):
def setUp(self):
self.email = '[email protected]'
self.password = 'default'
def test_private_create_user_without_email(self):
"""
Test that EmailUser.objects._create_user without email raise an
ValueError exception
"""
with pytest.raises(ValueError) as exinfo:
EmailUser.objects._create_user(None, None, False, False)
self.assertIn('email must be set', str(exinfo.value))
@patch.object(timezone, 'now', return_value=fake_now)
def test_private_create_user_its_ok(self, mock_now):
user = EmailUser.objects._create_user(self.email, self.password,
True, False)
self.assertTrue(isinstance(user, EmailUser))
self.assertIsNotNone(user.pk)
self.assertEqual(user.email, self.email)
self.assertEqual(user.date_joined, fake_now)
self.assertEqual(user.last_login, fake_now)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_active)
self.assertFalse(user.is_superuser)
self.assertTrue(user.check_password(self.password))
def test_private_create_user_with_wrong_email(self):
with pytest.raises(ValueError) as exinfo:
EmailUser.objects._create_user('wrong@example', None, False, False)
self.assertIn('email must be a valid email', str(exinfo.value))
@patch.object(EmailUserManager, '_create_user')
def test_create_user_call_private_create_user_without_staff(self,
mock_create_user):
EmailUser.objects.create_user(self.email, self.password)
mock_create_user.assert_called_once_with(self.email, self.password,
False, False)
@patch.object(EmailUserManager, '_create_user')
def test_create_user_call_private_create_user_with_staff(self,
mock_create_user):
EmailUser.objects.create_user(self.email, self.password, True)
mock_create_user.assert_called_once_with(self.email, self.password,
True, False)
@patch.object(EmailUserManager, '_create_user')
def test_create_superuser_call_private_create_user(self, mock_create_user):
EmailUser.objects.create_superuser(self.email, self.password)
mock_create_user.assert_called_once_with(self.email, self.password,
True, True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
from unittest.mock import patch
except ImportError:
from mock import patch
<|reserved_special_token_0|>
try:
from django.test import override_settings
except ImportError:
from django.test.utils import override_settings
<|reserved_special_token_0|>
fake_now = datetime(2015, 9, 10)
@override_settings(USE_TZ=False)
class TestEmailUserManager(TestCase):
def setUp(self):
self.email = '[email protected]'
self.password = 'default'
def test_private_create_user_without_email(self):
"""
Test that EmailUser.objects._create_user without email raise an
ValueError exception
"""
with pytest.raises(ValueError) as exinfo:
EmailUser.objects._create_user(None, None, False, False)
self.assertIn('email must be set', str(exinfo.value))
@patch.object(timezone, 'now', return_value=fake_now)
def test_private_create_user_its_ok(self, mock_now):
user = EmailUser.objects._create_user(self.email, self.password,
True, False)
self.assertTrue(isinstance(user, EmailUser))
self.assertIsNotNone(user.pk)
self.assertEqual(user.email, self.email)
self.assertEqual(user.date_joined, fake_now)
self.assertEqual(user.last_login, fake_now)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_active)
self.assertFalse(user.is_superuser)
self.assertTrue(user.check_password(self.password))
def test_private_create_user_with_wrong_email(self):
with pytest.raises(ValueError) as exinfo:
EmailUser.objects._create_user('wrong@example', None, False, False)
self.assertIn('email must be a valid email', str(exinfo.value))
@patch.object(EmailUserManager, '_create_user')
def test_create_user_call_private_create_user_without_staff(self,
mock_create_user):
EmailUser.objects.create_user(self.email, self.password)
mock_create_user.assert_called_once_with(self.email, self.password,
False, False)
@patch.object(EmailUserManager, '_create_user')
def test_create_user_call_private_create_user_with_staff(self,
mock_create_user):
EmailUser.objects.create_user(self.email, self.password, True)
mock_create_user.assert_called_once_with(self.email, self.password,
True, False)
@patch.object(EmailUserManager, '_create_user')
def test_create_superuser_call_private_create_user(self, mock_create_user):
EmailUser.objects.create_superuser(self.email, self.password)
mock_create_user.assert_called_once_with(self.email, self.password,
True, True)
<|reserved_special_token_1|>
from __future__ import unicode_literals, absolute_import
from datetime import datetime
try:
from unittest.mock import patch
except ImportError:
from mock import patch
import pytest
from django.test import TestCase
try:
from django.test import override_settings
except ImportError:
from django.test.utils import override_settings
from django.utils import timezone
from custom_email_user.models import EmailUser
from custom_email_user.managers import EmailUserManager
fake_now = datetime(2015, 9, 10)
@override_settings(USE_TZ=False)
class TestEmailUserManager(TestCase):
def setUp(self):
self.email = '[email protected]'
self.password = 'default'
def test_private_create_user_without_email(self):
"""
Test that EmailUser.objects._create_user without email raise an
ValueError exception
"""
with pytest.raises(ValueError) as exinfo:
EmailUser.objects._create_user(None, None, False, False)
self.assertIn('email must be set', str(exinfo.value))
@patch.object(timezone, 'now', return_value=fake_now)
def test_private_create_user_its_ok(self, mock_now):
user = EmailUser.objects._create_user(self.email, self.password,
True, False)
self.assertTrue(isinstance(user, EmailUser))
self.assertIsNotNone(user.pk)
self.assertEqual(user.email, self.email)
self.assertEqual(user.date_joined, fake_now)
self.assertEqual(user.last_login, fake_now)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_active)
self.assertFalse(user.is_superuser)
self.assertTrue(user.check_password(self.password))
def test_private_create_user_with_wrong_email(self):
with pytest.raises(ValueError) as exinfo:
EmailUser.objects._create_user('wrong@example', None, False, False)
self.assertIn('email must be a valid email', str(exinfo.value))
@patch.object(EmailUserManager, '_create_user')
def test_create_user_call_private_create_user_without_staff(self,
mock_create_user):
EmailUser.objects.create_user(self.email, self.password)
mock_create_user.assert_called_once_with(self.email, self.password,
False, False)
@patch.object(EmailUserManager, '_create_user')
def test_create_user_call_private_create_user_with_staff(self,
mock_create_user):
EmailUser.objects.create_user(self.email, self.password, True)
mock_create_user.assert_called_once_with(self.email, self.password,
True, False)
@patch.object(EmailUserManager, '_create_user')
def test_create_superuser_call_private_create_user(self, mock_create_user):
EmailUser.objects.create_superuser(self.email, self.password)
mock_create_user.assert_called_once_with(self.email, self.password,
True, True)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from datetime import datetime
try:
from unittest.mock import patch
except ImportError:
from mock import patch
import pytest
from django.test import TestCase
try:
from django.test import override_settings
except ImportError:
from django.test.utils import override_settings
from django.utils import timezone
from custom_email_user.models import EmailUser
from custom_email_user.managers import EmailUserManager
fake_now = datetime(2015, 9, 10)
@override_settings(USE_TZ=False)
class TestEmailUserManager(TestCase):
def setUp(self):
self.email = '[email protected]'
self.password = 'default'
def test_private_create_user_without_email(self):
"""
Test that EmailUser.objects._create_user without email raise an
ValueError exception
"""
with pytest.raises(ValueError) as exinfo:
EmailUser.objects._create_user(None, None, False, False)
self.assertIn('email must be set', str(exinfo.value))
@patch.object(timezone, 'now', return_value=fake_now)
def test_private_create_user_its_ok(self, mock_now):
user = EmailUser.objects._create_user(self.email, self.password,
True, False)
self.assertTrue(isinstance(user, EmailUser))
self.assertIsNotNone(user.pk)
self.assertEqual(user.email, self.email)
self.assertEqual(user.date_joined, fake_now)
self.assertEqual(user.last_login, fake_now)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_active)
self.assertFalse(user.is_superuser)
self.assertTrue(user.check_password(self.password))
def test_private_create_user_with_wrong_email(self):
with pytest.raises(ValueError) as exinfo:
EmailUser.objects._create_user('wrong@example', None, False, False)
self.assertIn('email must be a valid email', str(exinfo.value))
@patch.object(EmailUserManager, '_create_user')
def test_create_user_call_private_create_user_without_staff(
self, mock_create_user):
EmailUser.objects.create_user(self.email, self.password)
mock_create_user.assert_called_once_with(
self.email, self.password, False, False)
@patch.object(EmailUserManager, '_create_user')
def test_create_user_call_private_create_user_with_staff(
self, mock_create_user):
EmailUser.objects.create_user(self.email, self.password, True)
mock_create_user.assert_called_once_with(
self.email, self.password, True, False)
@patch.object(EmailUserManager, '_create_user')
def test_create_superuser_call_private_create_user(self, mock_create_user):
EmailUser.objects.create_superuser(self.email, self.password)
mock_create_user.assert_called_once_with(
self.email, self.password, True, True)
|
flexible
|
{
"blob_id": "71f9d9d7973809654db3ea613073f2d431f2d65f",
"index": 1510,
"step-1": "<mask token>\n\n\n@override_settings(USE_TZ=False)\nclass TestEmailUserManager(TestCase):\n\n def setUp(self):\n self.email = '[email protected]'\n self.password = 'default'\n\n def test_private_create_user_without_email(self):\n \"\"\"\n Test that EmailUser.objects._create_user without email raise an\n ValueError exception\n \"\"\"\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user(None, None, False, False)\n self.assertIn('email must be set', str(exinfo.value))\n\n @patch.object(timezone, 'now', return_value=fake_now)\n def test_private_create_user_its_ok(self, mock_now):\n user = EmailUser.objects._create_user(self.email, self.password, \n True, False)\n self.assertTrue(isinstance(user, EmailUser))\n self.assertIsNotNone(user.pk)\n self.assertEqual(user.email, self.email)\n self.assertEqual(user.date_joined, fake_now)\n self.assertEqual(user.last_login, fake_now)\n self.assertTrue(user.is_staff)\n self.assertTrue(user.is_active)\n self.assertFalse(user.is_superuser)\n self.assertTrue(user.check_password(self.password))\n <mask token>\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_without_staff(self,\n mock_create_user):\n EmailUser.objects.create_user(self.email, self.password)\n mock_create_user.assert_called_once_with(self.email, self.password,\n False, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_with_staff(self,\n mock_create_user):\n EmailUser.objects.create_user(self.email, self.password, True)\n mock_create_user.assert_called_once_with(self.email, self.password,\n True, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_superuser_call_private_create_user(self, mock_create_user):\n EmailUser.objects.create_superuser(self.email, self.password)\n mock_create_user.assert_called_once_with(self.email, self.password,\n True, True)\n",
"step-2": "<mask token>\ntry:\n from unittest.mock import patch\nexcept ImportError:\n from mock import patch\n<mask token>\ntry:\n from django.test import override_settings\nexcept ImportError:\n from django.test.utils import override_settings\n<mask token>\n\n\n@override_settings(USE_TZ=False)\nclass TestEmailUserManager(TestCase):\n\n def setUp(self):\n self.email = '[email protected]'\n self.password = 'default'\n\n def test_private_create_user_without_email(self):\n \"\"\"\n Test that EmailUser.objects._create_user without email raise an\n ValueError exception\n \"\"\"\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user(None, None, False, False)\n self.assertIn('email must be set', str(exinfo.value))\n\n @patch.object(timezone, 'now', return_value=fake_now)\n def test_private_create_user_its_ok(self, mock_now):\n user = EmailUser.objects._create_user(self.email, self.password, \n True, False)\n self.assertTrue(isinstance(user, EmailUser))\n self.assertIsNotNone(user.pk)\n self.assertEqual(user.email, self.email)\n self.assertEqual(user.date_joined, fake_now)\n self.assertEqual(user.last_login, fake_now)\n self.assertTrue(user.is_staff)\n self.assertTrue(user.is_active)\n self.assertFalse(user.is_superuser)\n self.assertTrue(user.check_password(self.password))\n\n def test_private_create_user_with_wrong_email(self):\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user('wrong@example', None, False, False)\n self.assertIn('email must be a valid email', str(exinfo.value))\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_without_staff(self,\n mock_create_user):\n EmailUser.objects.create_user(self.email, self.password)\n mock_create_user.assert_called_once_with(self.email, self.password,\n False, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_with_staff(self,\n mock_create_user):\n EmailUser.objects.create_user(self.email, self.password, True)\n mock_create_user.assert_called_once_with(self.email, self.password,\n True, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_superuser_call_private_create_user(self, mock_create_user):\n EmailUser.objects.create_superuser(self.email, self.password)\n mock_create_user.assert_called_once_with(self.email, self.password,\n True, True)\n",
"step-3": "<mask token>\ntry:\n from unittest.mock import patch\nexcept ImportError:\n from mock import patch\n<mask token>\ntry:\n from django.test import override_settings\nexcept ImportError:\n from django.test.utils import override_settings\n<mask token>\nfake_now = datetime(2015, 9, 10)\n\n\n@override_settings(USE_TZ=False)\nclass TestEmailUserManager(TestCase):\n\n def setUp(self):\n self.email = '[email protected]'\n self.password = 'default'\n\n def test_private_create_user_without_email(self):\n \"\"\"\n Test that EmailUser.objects._create_user without email raise an\n ValueError exception\n \"\"\"\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user(None, None, False, False)\n self.assertIn('email must be set', str(exinfo.value))\n\n @patch.object(timezone, 'now', return_value=fake_now)\n def test_private_create_user_its_ok(self, mock_now):\n user = EmailUser.objects._create_user(self.email, self.password, \n True, False)\n self.assertTrue(isinstance(user, EmailUser))\n self.assertIsNotNone(user.pk)\n self.assertEqual(user.email, self.email)\n self.assertEqual(user.date_joined, fake_now)\n self.assertEqual(user.last_login, fake_now)\n self.assertTrue(user.is_staff)\n self.assertTrue(user.is_active)\n self.assertFalse(user.is_superuser)\n self.assertTrue(user.check_password(self.password))\n\n def test_private_create_user_with_wrong_email(self):\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user('wrong@example', None, False, False)\n self.assertIn('email must be a valid email', str(exinfo.value))\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_without_staff(self,\n mock_create_user):\n EmailUser.objects.create_user(self.email, self.password)\n mock_create_user.assert_called_once_with(self.email, self.password,\n False, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_with_staff(self,\n mock_create_user):\n EmailUser.objects.create_user(self.email, self.password, True)\n mock_create_user.assert_called_once_with(self.email, self.password,\n True, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_superuser_call_private_create_user(self, mock_create_user):\n EmailUser.objects.create_superuser(self.email, self.password)\n mock_create_user.assert_called_once_with(self.email, self.password,\n True, True)\n",
"step-4": "from __future__ import unicode_literals, absolute_import\nfrom datetime import datetime\ntry:\n from unittest.mock import patch\nexcept ImportError:\n from mock import patch\nimport pytest\nfrom django.test import TestCase\ntry:\n from django.test import override_settings\nexcept ImportError:\n from django.test.utils import override_settings\nfrom django.utils import timezone\nfrom custom_email_user.models import EmailUser\nfrom custom_email_user.managers import EmailUserManager\nfake_now = datetime(2015, 9, 10)\n\n\n@override_settings(USE_TZ=False)\nclass TestEmailUserManager(TestCase):\n\n def setUp(self):\n self.email = '[email protected]'\n self.password = 'default'\n\n def test_private_create_user_without_email(self):\n \"\"\"\n Test that EmailUser.objects._create_user without email raise an\n ValueError exception\n \"\"\"\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user(None, None, False, False)\n self.assertIn('email must be set', str(exinfo.value))\n\n @patch.object(timezone, 'now', return_value=fake_now)\n def test_private_create_user_its_ok(self, mock_now):\n user = EmailUser.objects._create_user(self.email, self.password, \n True, False)\n self.assertTrue(isinstance(user, EmailUser))\n self.assertIsNotNone(user.pk)\n self.assertEqual(user.email, self.email)\n self.assertEqual(user.date_joined, fake_now)\n self.assertEqual(user.last_login, fake_now)\n self.assertTrue(user.is_staff)\n self.assertTrue(user.is_active)\n self.assertFalse(user.is_superuser)\n self.assertTrue(user.check_password(self.password))\n\n def test_private_create_user_with_wrong_email(self):\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user('wrong@example', None, False, False)\n self.assertIn('email must be a valid email', str(exinfo.value))\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_without_staff(self,\n mock_create_user):\n EmailUser.objects.create_user(self.email, self.password)\n mock_create_user.assert_called_once_with(self.email, self.password,\n False, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_with_staff(self,\n mock_create_user):\n EmailUser.objects.create_user(self.email, self.password, True)\n mock_create_user.assert_called_once_with(self.email, self.password,\n True, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_superuser_call_private_create_user(self, mock_create_user):\n EmailUser.objects.create_superuser(self.email, self.password)\n mock_create_user.assert_called_once_with(self.email, self.password,\n True, True)\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, absolute_import\n\nfrom datetime import datetime\ntry:\n from unittest.mock import patch\nexcept ImportError:\n from mock import patch\n\nimport pytest\n\nfrom django.test import TestCase\ntry:\n from django.test import override_settings\nexcept ImportError:\n from django.test.utils import override_settings\nfrom django.utils import timezone\n\nfrom custom_email_user.models import EmailUser\nfrom custom_email_user.managers import EmailUserManager\n\nfake_now = datetime(2015, 9, 10)\n\n\n@override_settings(USE_TZ=False)\nclass TestEmailUserManager(TestCase):\n\n def setUp(self):\n self.email = '[email protected]'\n self.password = 'default'\n\n def test_private_create_user_without_email(self):\n \"\"\"\n Test that EmailUser.objects._create_user without email raise an\n ValueError exception\n \"\"\"\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user(None, None, False, False)\n self.assertIn('email must be set', str(exinfo.value))\n\n @patch.object(timezone, 'now', return_value=fake_now)\n def test_private_create_user_its_ok(self, mock_now):\n user = EmailUser.objects._create_user(self.email, self.password,\n True, False)\n self.assertTrue(isinstance(user, EmailUser))\n self.assertIsNotNone(user.pk)\n self.assertEqual(user.email, self.email)\n self.assertEqual(user.date_joined, fake_now)\n self.assertEqual(user.last_login, fake_now)\n self.assertTrue(user.is_staff)\n self.assertTrue(user.is_active)\n self.assertFalse(user.is_superuser)\n self.assertTrue(user.check_password(self.password))\n\n def test_private_create_user_with_wrong_email(self):\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user('wrong@example', None, False, False)\n self.assertIn('email must be a valid email', str(exinfo.value))\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_without_staff(\n self, mock_create_user):\n EmailUser.objects.create_user(self.email, self.password)\n mock_create_user.assert_called_once_with(\n self.email, self.password, False, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_with_staff(\n self, mock_create_user):\n EmailUser.objects.create_user(self.email, self.password, True)\n mock_create_user.assert_called_once_with(\n self.email, self.password, True, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_superuser_call_private_create_user(self, mock_create_user):\n EmailUser.objects.create_superuser(self.email, self.password)\n mock_create_user.assert_called_once_with(\n self.email, self.password, True, True)\n\n\n",
"step-ids": [
7,
9,
10,
11,
12
]
}
|
[
7,
9,
10,
11,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def get_features_da(config, sent_dict):
features = []
if len(config[0]) > 0:
top = config[0][-1]
top_stk_token_feature = 'TOP_STK_TOKEN_' + str(sent_dict['FORM'][
top].lower())
features.append(top_stk_token_feature)
top_stk_lemma = 'TOP_STK_LEMMA_' + str(sent_dict['LEMMA'][top].lower())
features.append(top_stk_lemma)
top_stk_cpostag = 'TOP_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][top
].lower())
features.append(top_stk_cpostag)
if len(config[1]) > 0:
top_buffer = config[1][-1]
top_buffer_token_feature = 'TOP_BUFFER_TOKEN' + str(sent_dict[
'FORM'][top_buffer].lower())
features.append(top_buffer_token_feature)
top_buffer_lemma = 'TOP_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][
top_buffer].lower())
features.append(top_buffer_lemma)
top_buffer_cpostag = 'TOP_BUFFER_CPOSTAG_' + str(sent_dict[
'CPOSTAG'][top_buffer].lower())
features.append(top_buffer_cpostag)
if len(config[0]) > 1:
two = config[0][-2]
two_stk_cpostag = 'TWO_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][two
].lower())
features.append(two_stk_cpostag)
if len(config[1]) > 1:
two_buffer = config[1][-2]
two_buffer_token = 'TWO_BUFFER_TOKEN_' + str(sent_dict['FORM'][
two_buffer].lower())
features.append(two_buffer_token)
two_buffer_cpostag = 'TWO_BUFFER_CPOSTAG_' + str(sent_dict[
'CPOSTAG'][two_buffer].lower())
features.append(two_buffer_cpostag)
if len(config[1]) > 2:
three_buffer = config[1][-3]
three_buffer_cpostag = 'THREE_BUFFER_CPOSTAG_' + str(sent_dict[
'CPOSTAG'][three_buffer].lower())
features.append(three_buffer_cpostag)
if len(config[1]) > 3:
four_buffer = config[1][-4]
four_buffer_cpostag = 'FOUR_BUFFER_CPOSTAG_' + str(sent_dict[
'CPOSTAG'][four_buffer].lower())
features.append(four_buffer_cpostag)
return features
<|reserved_special_token_1|>
# config {stack,buffer,label}
def get_features_da(config,sent_dict):
features = []
# TODO Improve Features
if len(config[0]) > 0:
# Top of stack.
top = config[0][-1]
top_stk_token_feature = 'TOP_STK_TOKEN_'+str(sent_dict['FORM'][top].lower())
features.append(top_stk_token_feature)
top_stk_lemma = 'TOP_STK_LEMMA_' + str(sent_dict['LEMMA'][top].lower()) # not converting to lower has helped to increase the f1 score slightly
features.append(top_stk_lemma)
top_stk_cpostag = 'TOP_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][top].lower())
features.append(top_stk_cpostag)
if len(config[1]) > 0:
top_buffer = config[1][-1] # top of buffer, since it is in descending order
top_buffer_token_feature = 'TOP_BUFFER_TOKEN'+str(sent_dict['FORM'][top_buffer].lower())
features.append(top_buffer_token_feature)
top_buffer_lemma = 'TOP_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][top_buffer].lower())
features.append(top_buffer_lemma)
top_buffer_cpostag = 'TOP_BUFFER_CPOSTAG_' + str(sent_dict['CPOSTAG'][top_buffer].lower())
features.append(top_buffer_cpostag)
if len(config[0]) > 1:
two = config[0][-2] # 2nd from top in stack
# two_stk_token = 'two_stk_token_'+str(sent_dict['FORM'][two].lower())
# features.append(two_stk_token)
# two_stk_lemma = 'TWO_STK_LEMMA_' + str(sent_dict['LEMMA'][two].lower())
# features.append(two_stk_lemma)
two_stk_cpostag = 'TWO_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][two].lower())
features.append(two_stk_cpostag)
if len(config[1]) > 1:
two_buffer = config[1][-2] # 2nd from top in buffer
two_buffer_token = 'TWO_BUFFER_TOKEN_'+str(sent_dict['FORM'][two_buffer].lower())
features.append(two_buffer_token)
# two_buffer_lemma = 'TWO_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][two_buffer])
# features.append(two_buffer_lemma)
two_buffer_cpostag = 'TWO_BUFFER_CPOSTAG_' + str(sent_dict['CPOSTAG'][two_buffer].lower())
features.append(two_buffer_cpostag)
# if len(config[0]) > 2:
# three = config[0][-3] # 3rd from top in stack
# three_stk_lemma = 'THREE_STACK_LEMMA_' + str(sent_dict['LEMMA'][three])
# features.append(three_stk_lemma)
# three_stk_cpostag = 'THREE_STACK_CPOSTAG_' + str(sent_dict['CPOSTAG'][three].lower())
# features.append(three_stk_cpostag)
if len(config[1]) > 2:
three_buffer = config[1][-3] # 3rd from top in buffer
# three_buffer_lemma = 'THREE_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][three_buffer].lower())
# features.append(three_buffer_lemma)
three_buffer_cpostag = 'THREE_BUFFER_CPOSTAG_' + str(sent_dict['CPOSTAG'][three_buffer].lower())
features.append(three_buffer_cpostag)
# if len(config[0]) > 3:
# four = config[0][-4] # 4th from top in stack
# four_stk_lemma = 'FOUR_STK_LEMMA_' + str(sent_dict['LEMMA'][four].lower())
# features.append(four_stk_lemma)
# four_stk_cpostag = 'FOUR_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][four].lower())
# features.append(four_stk_cpostag)
if len(config[1]) > 3:
four_buffer = config[1][-4] # 4th from top in buffer
# four_buffer_lemma = 'FOUR_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][four_buffer].lower())
# features.append(four_buffer_lemma)
four_buffer_cpostag = 'FOUR_BUFFER_CPOSTAG_' + str(sent_dict['CPOSTAG'][four_buffer].lower())
features.append(four_buffer_cpostag)
return features
|
flexible
|
{
"blob_id": "e0ce8a8ad9c842b013bbb1ea1c585b6c4c2a68f5",
"index": 2868,
"step-1": "<mask token>\n",
"step-2": "def get_features_da(config, sent_dict):\n features = []\n if len(config[0]) > 0:\n top = config[0][-1]\n top_stk_token_feature = 'TOP_STK_TOKEN_' + str(sent_dict['FORM'][\n top].lower())\n features.append(top_stk_token_feature)\n top_stk_lemma = 'TOP_STK_LEMMA_' + str(sent_dict['LEMMA'][top].lower())\n features.append(top_stk_lemma)\n top_stk_cpostag = 'TOP_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][top\n ].lower())\n features.append(top_stk_cpostag)\n if len(config[1]) > 0:\n top_buffer = config[1][-1]\n top_buffer_token_feature = 'TOP_BUFFER_TOKEN' + str(sent_dict[\n 'FORM'][top_buffer].lower())\n features.append(top_buffer_token_feature)\n top_buffer_lemma = 'TOP_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][\n top_buffer].lower())\n features.append(top_buffer_lemma)\n top_buffer_cpostag = 'TOP_BUFFER_CPOSTAG_' + str(sent_dict[\n 'CPOSTAG'][top_buffer].lower())\n features.append(top_buffer_cpostag)\n if len(config[0]) > 1:\n two = config[0][-2]\n two_stk_cpostag = 'TWO_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][two\n ].lower())\n features.append(two_stk_cpostag)\n if len(config[1]) > 1:\n two_buffer = config[1][-2]\n two_buffer_token = 'TWO_BUFFER_TOKEN_' + str(sent_dict['FORM'][\n two_buffer].lower())\n features.append(two_buffer_token)\n two_buffer_cpostag = 'TWO_BUFFER_CPOSTAG_' + str(sent_dict[\n 'CPOSTAG'][two_buffer].lower())\n features.append(two_buffer_cpostag)\n if len(config[1]) > 2:\n three_buffer = config[1][-3]\n three_buffer_cpostag = 'THREE_BUFFER_CPOSTAG_' + str(sent_dict[\n 'CPOSTAG'][three_buffer].lower())\n features.append(three_buffer_cpostag)\n if len(config[1]) > 3:\n four_buffer = config[1][-4]\n four_buffer_cpostag = 'FOUR_BUFFER_CPOSTAG_' + str(sent_dict[\n 'CPOSTAG'][four_buffer].lower())\n features.append(four_buffer_cpostag)\n return features\n",
"step-3": "# config {stack,buffer,label}\ndef get_features_da(config,sent_dict):\n features = []\n\n # TODO Improve Features\n \n if len(config[0]) > 0:\n # Top of stack.\n top = config[0][-1] \n \n top_stk_token_feature = 'TOP_STK_TOKEN_'+str(sent_dict['FORM'][top].lower())\n features.append(top_stk_token_feature)\n\t\n top_stk_lemma = 'TOP_STK_LEMMA_' + str(sent_dict['LEMMA'][top].lower()) # not converting to lower has helped to increase the f1 score slightly\n features.append(top_stk_lemma)\n\n top_stk_cpostag = 'TOP_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][top].lower())\n features.append(top_stk_cpostag)\n\t\n \n if len(config[1]) > 0:\n \ttop_buffer = config[1][-1] # top of buffer, since it is in descending order\n\n \ttop_buffer_token_feature = 'TOP_BUFFER_TOKEN'+str(sent_dict['FORM'][top_buffer].lower())\n \tfeatures.append(top_buffer_token_feature)\n\n \ttop_buffer_lemma = 'TOP_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][top_buffer].lower())\n \tfeatures.append(top_buffer_lemma)\n\n \ttop_buffer_cpostag = 'TOP_BUFFER_CPOSTAG_' + str(sent_dict['CPOSTAG'][top_buffer].lower())\n \tfeatures.append(top_buffer_cpostag)\n\t\n\n if len(config[0]) > 1:\n \ttwo = config[0][-2] # 2nd from top in stack\n \t\n \t# two_stk_token = 'two_stk_token_'+str(sent_dict['FORM'][two].lower())\n \t# features.append(two_stk_token)\n\n \t# two_stk_lemma = 'TWO_STK_LEMMA_' + str(sent_dict['LEMMA'][two].lower())\n \t# features.append(two_stk_lemma)\n\n \ttwo_stk_cpostag = 'TWO_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][two].lower())\n \tfeatures.append(two_stk_cpostag)\n\n if len(config[1]) > 1:\n \ttwo_buffer = config[1][-2] # 2nd from top in buffer\n\n \ttwo_buffer_token = 'TWO_BUFFER_TOKEN_'+str(sent_dict['FORM'][two_buffer].lower())\n \tfeatures.append(two_buffer_token)\n\n \t# two_buffer_lemma = 'TWO_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][two_buffer])\n \t# features.append(two_buffer_lemma)\n\n \ttwo_buffer_cpostag = 'TWO_BUFFER_CPOSTAG_' + str(sent_dict['CPOSTAG'][two_buffer].lower())\n \tfeatures.append(two_buffer_cpostag)\n\t\n\n # if len(config[0]) > 2:\n # \tthree = config[0][-3] # 3rd from top in stack\n\n # \tthree_stk_lemma = 'THREE_STACK_LEMMA_' + str(sent_dict['LEMMA'][three])\n # \tfeatures.append(three_stk_lemma)\n\n # \tthree_stk_cpostag = 'THREE_STACK_CPOSTAG_' + str(sent_dict['CPOSTAG'][three].lower())\n # \tfeatures.append(three_stk_cpostag)\n\n if len(config[1]) > 2:\n \tthree_buffer = config[1][-3] # 3rd from top in buffer\n\n \t# three_buffer_lemma = 'THREE_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][three_buffer].lower())\n \t# features.append(three_buffer_lemma)\n\n \tthree_buffer_cpostag = 'THREE_BUFFER_CPOSTAG_' + str(sent_dict['CPOSTAG'][three_buffer].lower())\n \tfeatures.append(three_buffer_cpostag)\n\n # if len(config[0]) > 3:\n # \tfour = config[0][-4] # 4th from top in stack\n\n # \tfour_stk_lemma = 'FOUR_STK_LEMMA_' + str(sent_dict['LEMMA'][four].lower())\n # \tfeatures.append(four_stk_lemma)\n\n # \tfour_stk_cpostag = 'FOUR_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][four].lower())\n # \tfeatures.append(four_stk_cpostag)\n\n if len(config[1]) > 3:\n \tfour_buffer = config[1][-4] # 4th from top in buffer\n\n \t# four_buffer_lemma = 'FOUR_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][four_buffer].lower())\n \t# features.append(four_buffer_lemma)\n\n \tfour_buffer_cpostag = 'FOUR_BUFFER_CPOSTAG_' + str(sent_dict['CPOSTAG'][four_buffer].lower())\n \tfeatures.append(four_buffer_cpostag)\n\n\n return features\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python
''' export_claims -- export claims in CSV format
https://sfreeclaims.anvicare.com/docs/forms/Reference-CSV%20Specifications.txt
'''
import csv
from itertools import groupby
from operator import itemgetter
import wsgiref.handlers
import MySQLdb
import ocap
from hhtcb import Xataface, WSGI
def cgi_main(xf, cal):
app = ReportApp(xf, cal)
wsgiref.handlers.CGIHandler().run(app)
def _test_main(argv, xf):
outfn, visits = argv[1], argv[2:]
host, user, password, name = xf.dbopts()
content, pages = format_claims(MySQLdb.connect(host=host, user=user,
passwd=password, db=name),
map(int, visits))
outfp = open(outfn, 'w')
for part in content:
outfp.write(part)
print pages
class ReportApp(object):
def __init__(self, xf, cal):
self._xf = xf
self._datesrc = cal
def __call__(self, env, start_response):
try:
host, user, password, name = self._xf.webapp_login(env)
except KeyError:
start_response('400 bad request', WSGI.PLAIN)
return ['missing key parameter ']
except OSError:
start_response('401 unauthorized', WSGI.PLAIN)
return ['report key does not match.']
conn = MySQLdb.connect(host=host, user=user, passwd=password, db=name)
start_response('200 ok',
[('content-type', 'text/plain'),
('Content-Disposition',
'attachment; filename=claims-%s.csv'
% self._datesrc.today())])
content, pages = format_claims(conn)
return content
def format_claims(conn, visit_ids):
cursor = conn.cursor(MySQLdb.cursors.DictCursor)
cursor.execute(QUERY % dict(
visit_ids=', '.join([str(i) for i in visit_ids])))
pages = []
buf = ListWriter()
out = csv.DictWriter(buf, COLUMNS, quoting=csv.QUOTE_ALL)
out.writerow(dict(zip(COLUMNS, COLUMNS)))
for client_id, records in by_page(groupby(cursor.fetchall(),
itemgetter('client_id')),
pg_size=6):
claim = records[0]
tot = claim['28-TotalCharge']
for idx in range(1, len(records)):
for k, v in records[idx].items():
if k.startswith('24.'):
kk = k.replace('.1.', '.%d.' % (idx + 1))
claim[kk] = v
if k == '24.1.f-Charges':
tot += v
claim['28-TotalCharge'] = tot
# is there ever an amount paid?
claim['30-BalanceDue'] = tot
#import pprint
#pprint.pprint(claim)
visit_ids = [r['visit_id'] for r in records]
pages.append(dict(client_id=client_id,
total=tot,
visit_ids=visit_ids,
items=records,
detail=claim))
del claim['client_id']
del claim['visit_id']
out.writerow(claim)
return buf.parts, pages
def by_page(record_groups, pg_size):
for k, group in record_groups:
gl = list(group)
offset = 0
while offset < len(gl):
yield k, gl[offset:offset + pg_size]
offset += pg_size
class ListWriter(object):
def __init__(self):
self.parts = []
def write(self, txt):
self.parts.append(txt)
QUERY = r'''
select c.id client_id, v.id visit_id
, co.name as `Insurance Company Name`
, co.address `Insurance Company Address 1`
, co.city_st_zip `Insurance Company Address 2`
, ins.payer_type `1-InsuredPlanName`
, ins.id_number `1a-InsuredIDNo`
, c.name as `2-PatientName`
, date_format(c.DOB, '%%m/%%d/%%Y') as `3-PatientDOB`
, ins.patient_sex `3-PatientGender`
, case when upper(ins.patient_rel) = 'SELF'
then c.name
else ins.insured_name end `4-InsuredName`
, c.address `5-PatientAddress`
, c.city `5-PatientCity`
, c.state `5-PatientState`
, c.zip `5-PatientZip`
, c.phone `5-PatientPhone`
, upper(ins.patient_rel) `6-PatientRel`
, case when upper(ins.patient_rel) = 'SELF'
then c.address
else ins.insured_address end `7-InsuredAddr`
, case when upper(ins.patient_rel) = 'SELF'
then c.city
else ins.insured_city end `7-InsAddCity`
, case when upper(ins.patient_rel) = 'SELF'
then c.state
else ins.insured_state end `7-InsAddState`
, case when upper(ins.patient_rel) = 'SELF'
then c.zip
else ins.insured_zip end `7-InsAddZip`
, case when upper(ins.patient_rel) = 'SELF'
then c.phone
else ins.insured_phone end `7-InsAddPhone`
, ins.patient_status `8-MaritalStatus`
, ins.patient_status2 `8-Employed?`
, 'NO' as `10a-CondEmployment`
, 'NO' as `10b-CondAutoAccident`
, 'NO' as `10c-CondOtherAccident`
, ins.insured_policy `11-InsuredGroupNo`
, date_format(case when upper(ins.patient_rel) = 'SELF'
then c.dob
else ins.insured_dob end, '%%m/%%d/%%Y') `11a-InsuredsDOB`
, case when upper(ins.patient_rel) = 'SELF'
then ins.patient_sex
else ins.insured_sex end `11a-InsuredsGender`
, 'Signature on file' `12-PatientSign`
, date_format(current_date, '%%m/%%d/%%Y') `12-Date`
, 'Signature on file' as `13-AuthSign`
, 'NO' as `20-OutsideLab`
, '0.00' as `20-Charges`
, ins.dx1 `21.1-Diagnosis`
, ins.dx2 `21.2-Diagnosis`
, ins.approval `23-PriorAuth`
, date_format(s.session_date, '%%m/%%d/%%Y') `24.1.a-DOSFrom`
, date_format(s.session_date, '%%m/%%d/%%Y') `24.1.a-DOSTo`
, v.cpt as `24.1.d-CPT`
, '11' as `24.1.b-Place`
, 1 as `24.1.e-Code`
, p.price `24.1.f-Charges`
, 1 as `24.1.g-Units`
, bp.npi `24.1.j-ProvNPI`
, bp.tax_id `25-TaxID`
, 'SSN' as `25-SSN/EIN`
, concat(upper(substr(c.name, 1, 3)), '.',
upper(substr(c.name, instr(c.name, ',') + 2, 3)), '.',
convert(c.id, char)) as `26-PatientAcctNo`
, 'Y' as `27-AcceptAssign`
, p.price as `28-TotalCharge`
, 0 `29-AmountPaid`
, p.price as `30-BalanceDue`
, bp.name as `31-PhysicianSignature`
, date_format(current_date, '%%m/%%d/%%Y') `31-Date`
, bp.name `33-ClinicName`
, bp.address as `33-ClinicAddressLine1`
, bp.city_st_zip as `33-ClinicCityStateZip`
, bp.npi as `33-a-NPI`
from Insurance ins
join Client c on ins.Client_id = c.id
join Carrier co on ins.Carrier_id = co.id
join Visit v on v.Client_id = c.id
join `Procedure` p on p.cpt = v.cpt
join `Session` s on v.Session_id = s.id
join `Group` g on s.Group_id = g.id
join Therapist as bp on bp.tax_id is not null
where v.bill_date is null and v.check_date is null
and v.id in (%(visit_ids)s)
order by c.name, c.id, s.session_date, v.id
'''
COLUMNS = [literal.strip()[1:-1] for literal in '''
"Insurance Company Name"
"Insurance Company Name 2"
"Insurance Company Address 1"
"Insurance Company Address 2"
"1-InsuredPlanName"
"1a-InsuredIDNo"
"2-PatientName"
"3-PatientDOB"
"3-PatientGender"
"4-InsuredName"
"5-PatientAddress"
"5-PatientCity"
"5-PatientState"
"5-PatientZip"
"5-PatientPhone"
"6-PatientRel"
"7-InsuredAddr"
"7-InsAddCity"
"7-InsAddState"
"7-InsAddZip"
"7-InsAddPhone"
"8-MaritalStatus"
"8-Employed?"
"9-InsuredName2"
"9a-InsuredGroupNo2"
"9b-Insureds2DOB"
"9b-Insureds2Gender"
"9c-EmployerName"
"9d-InsuredPlanName2"
"10a-CondEmployment"
"10b-CondAutoAccident"
"10c-CondOtherAccident"
"10b2-AccidentState"
"10d-LocalUse"
"11-InsuredGroupNo"
"11a-InsuredsDOB"
"11a-InsuredsGender"
"11b-EmployerName"
"11c-InsuredPlanName"
"11d-OtherHealthPlan"
"12-PatientSign"
"12-Date"
"13-AuthSign"
"14-DateOfCondition"
"15-FirstDateOfCondition"
"16-DateFromNoWork"
"16-DateToNoWork"
"17-ReferringPhysician"
"17a-PhysicianNo"
"17b-ReferNPI"
"18-DateFromHosp"
"18-DateToHosp"
"19-LocalUse"
"20-OutsideLab"
"20-Charges"
"21.1-Diagnosis"
"21.2-Diagnosis"
"21.3-Diagnosis"
"21.4-Diagnosis"
"22-MedicaidResubmissionCode"
"22-MedicaidResubmissionRefNo"
"23-PriorAuth"
"24.1.a-DOSFrom"
"24.1.a-DOSTo"
"24.1.b-Place"
"24.1.c-EMG"
"24.1.d-CPT"
"24.1.d-Modifier"
"24.1.e-Code"
"24.1.f-Charges"
"24.1.g-Units"
"24.1.h-Epsot"
"24.1.i-Qualifier"
"24.1.j-ProvLegacyNo"
"24.1.j-ProvNPI"
"24.2.a-DOSFrom"
"24.2.a-DOSTo"
"24.2.b-Place"
"24.2.c-EMG"
"24.2.d-CPT"
"24.2.d-Modifier"
"24.2.e-Code"
"24.2.f-Charges"
"24.2.g-Units"
"24.2.h-Epsot"
"24.2.i-Qualifier"
"24.2.j-ProvLegacyNo"
"24.2.j-ProvNPI"
"24.3.a-DOSFrom"
"24.3.a-DOSTo"
"24.3.b-Place"
"24.3.c-EMG"
"24.3.d-CPT"
"24.3.d-Modifier"
"24.3.e-Code"
"24.3.f-Charges"
"24.3.g-Units"
"24.3.h-Epsot"
"24.3.i-Qualifier"
"24.3.j-ProvLegacyNo"
"24.3.j-ProvNPI"
"24.4.a-DOSFrom"
"24.4.a-DOSTo"
"24.4.b-Place"
"24.4.c-EMG"
"24.4.d-CPT"
"24.4.d-Modifier"
"24.4.e-Code"
"24.4.f-Charges"
"24.4.g-Units"
"24.4.h-Epsot"
"24.4.i-Qualifier"
"24.4.j-ProvLegacyNo"
"24.4.j-ProvNPI"
"24.5.a-DOSFrom"
"24.5.a-DOSTo"
"24.5.b-Place"
"24.5.c-EMG"
"24.5.d-CPT"
"24.5.d-Modifier"
"24.5.e-Code"
"24.5.f-Charges"
"24.5.g-Units"
"24.5.h-Epsot"
"24.5.i-Qualifier"
"24.5.j-ProvLegacyNo"
"24.5.j-ProvNPI"
"24.6.a-DOSFrom"
"24.6.a-DOSTo"
"24.6.b-Place"
"24.6.c-EMG"
"24.6.d-CPT"
"24.6.d-Modifier"
"24.6.e-Code"
"24.6.f-Charges"
"24.6.g-Units"
"24.6.h-Epsot"
"24.6.i-Qualifier"
"24.6.j-ProvLegacyNo"
"24.6.j-ProvNPI"
"25-TaxID"
"25-SSN/EIN"
"26-PatientAcctNo"
"27-AcceptAssign"
"28-TotalCharge"
"29-AmountPaid"
"30-BalanceDue"
"31-PhysicianSignature"
"31-Date"
"32-FacilityName"
"32-FacilityAddressLine1"
"32-FacilityAddressLine2"
"32-FacilityCityStateZip"
"32-FacilityNPI"
"33-ClinicName"
"33-ClinicAddressLine1"
"33-ClinicAddressLine2"
"33-ClinicCityStateZip"
"33-PIN#"
"33-GRP#"
"33-a-NPI"
"33-b-GrpLegacyNo"
'''.strip().split('\n')]
if __name__ == '__main__':
def _with_caps():
from os import environ, path as os_path
import datetime
here = ocap.Rd(os_path.dirname(__file__), os_path,
open_rd=lambda n: open(n))
xf = Xataface.make(here)
if 'SCRIPT_NAME' in environ:
cgi_main(xf, cal=datetime.date)
else:
from sys import argv
_test_main(argv, xf)
|
normal
|
{
"blob_id": "41f70cdfc9cbe5ec4560c1f3271a4636cca06d16",
"index": 3012,
"step-1": "#!/usr/bin/env python\n''' export_claims -- export claims in CSV format\n\nhttps://sfreeclaims.anvicare.com/docs/forms/Reference-CSV%20Specifications.txt\n'''\n\nimport csv\nfrom itertools import groupby\nfrom operator import itemgetter\nimport wsgiref.handlers\n\nimport MySQLdb\n\nimport ocap\nfrom hhtcb import Xataface, WSGI\n\n\ndef cgi_main(xf, cal):\n app = ReportApp(xf, cal)\n wsgiref.handlers.CGIHandler().run(app)\n\n\ndef _test_main(argv, xf):\n outfn, visits = argv[1], argv[2:]\n host, user, password, name = xf.dbopts()\n\n content, pages = format_claims(MySQLdb.connect(host=host, user=user,\n passwd=password, db=name),\n map(int, visits))\n outfp = open(outfn, 'w')\n for part in content:\n outfp.write(part)\n print pages\n\n\nclass ReportApp(object):\n def __init__(self, xf, cal):\n self._xf = xf\n self._datesrc = cal\n\n def __call__(self, env, start_response):\n try:\n host, user, password, name = self._xf.webapp_login(env)\n except KeyError:\n start_response('400 bad request', WSGI.PLAIN)\n return ['missing key parameter ']\n except OSError:\n start_response('401 unauthorized', WSGI.PLAIN)\n return ['report key does not match.']\n\n conn = MySQLdb.connect(host=host, user=user, passwd=password, db=name)\n\n start_response('200 ok',\n [('content-type', 'text/plain'),\n ('Content-Disposition',\n 'attachment; filename=claims-%s.csv'\n % self._datesrc.today())])\n\n content, pages = format_claims(conn)\n return content\n\n\ndef format_claims(conn, visit_ids):\n cursor = conn.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(QUERY % dict(\n visit_ids=', '.join([str(i) for i in visit_ids])))\n\n pages = []\n buf = ListWriter()\n out = csv.DictWriter(buf, COLUMNS, quoting=csv.QUOTE_ALL)\n out.writerow(dict(zip(COLUMNS, COLUMNS)))\n\n for client_id, records in by_page(groupby(cursor.fetchall(),\n itemgetter('client_id')),\n pg_size=6):\n claim = records[0]\n\n tot = claim['28-TotalCharge']\n for idx in range(1, len(records)):\n for k, v in records[idx].items():\n if k.startswith('24.'):\n kk = k.replace('.1.', '.%d.' % (idx + 1))\n claim[kk] = v\n if k == '24.1.f-Charges':\n tot += v\n\n claim['28-TotalCharge'] = tot\n # is there ever an amount paid?\n claim['30-BalanceDue'] = tot\n\n #import pprint\n #pprint.pprint(claim)\n visit_ids = [r['visit_id'] for r in records]\n pages.append(dict(client_id=client_id,\n total=tot,\n visit_ids=visit_ids,\n items=records,\n detail=claim))\n del claim['client_id']\n del claim['visit_id']\n out.writerow(claim)\n\n return buf.parts, pages\n\n\ndef by_page(record_groups, pg_size):\n for k, group in record_groups:\n gl = list(group)\n offset = 0\n while offset < len(gl):\n yield k, gl[offset:offset + pg_size]\n offset += pg_size\n\n\nclass ListWriter(object):\n def __init__(self):\n self.parts = []\n\n def write(self, txt):\n self.parts.append(txt)\n\n\nQUERY = r'''\nselect c.id client_id, v.id visit_id\n , co.name as `Insurance Company Name`\n , co.address `Insurance Company Address 1`\n , co.city_st_zip `Insurance Company Address 2`\n , ins.payer_type `1-InsuredPlanName`\n , ins.id_number `1a-InsuredIDNo`\n , c.name as `2-PatientName`\n , date_format(c.DOB, '%%m/%%d/%%Y') as `3-PatientDOB`\n , ins.patient_sex `3-PatientGender`\n , case when upper(ins.patient_rel) = 'SELF'\n then c.name\n else ins.insured_name end `4-InsuredName`\n , c.address `5-PatientAddress`\n , c.city `5-PatientCity`\n , c.state `5-PatientState`\n , c.zip `5-PatientZip`\n , c.phone `5-PatientPhone`\n , upper(ins.patient_rel) `6-PatientRel`\n , case when upper(ins.patient_rel) = 'SELF'\n then c.address\n else ins.insured_address end `7-InsuredAddr`\n , case when upper(ins.patient_rel) = 'SELF'\n then c.city\n else ins.insured_city end `7-InsAddCity`\n , case when upper(ins.patient_rel) = 'SELF'\n then c.state\n else ins.insured_state end `7-InsAddState`\n , case when upper(ins.patient_rel) = 'SELF'\n then c.zip\n else ins.insured_zip end `7-InsAddZip`\n , case when upper(ins.patient_rel) = 'SELF'\n then c.phone\n else ins.insured_phone end `7-InsAddPhone`\n , ins.patient_status `8-MaritalStatus`\n , ins.patient_status2 `8-Employed?`\n , 'NO' as `10a-CondEmployment`\n , 'NO' as `10b-CondAutoAccident`\n , 'NO' as `10c-CondOtherAccident`\n , ins.insured_policy `11-InsuredGroupNo`\n , date_format(case when upper(ins.patient_rel) = 'SELF'\n then c.dob\n else ins.insured_dob end, '%%m/%%d/%%Y') `11a-InsuredsDOB`\n , case when upper(ins.patient_rel) = 'SELF'\n then ins.patient_sex\n else ins.insured_sex end `11a-InsuredsGender`\n , 'Signature on file' `12-PatientSign`\n , date_format(current_date, '%%m/%%d/%%Y') `12-Date`\n , 'Signature on file' as `13-AuthSign`\n , 'NO' as `20-OutsideLab`\n , '0.00' as `20-Charges`\n , ins.dx1 `21.1-Diagnosis`\n , ins.dx2 `21.2-Diagnosis`\n , ins.approval `23-PriorAuth`\n\n , date_format(s.session_date, '%%m/%%d/%%Y') `24.1.a-DOSFrom`\n , date_format(s.session_date, '%%m/%%d/%%Y') `24.1.a-DOSTo`\n , v.cpt as `24.1.d-CPT`\n , '11' as `24.1.b-Place`\n , 1 as `24.1.e-Code`\n , p.price `24.1.f-Charges`\n , 1 as `24.1.g-Units`\n , bp.npi `24.1.j-ProvNPI`\n\n , bp.tax_id `25-TaxID`\n , 'SSN' as `25-SSN/EIN`\n , concat(upper(substr(c.name, 1, 3)), '.',\n upper(substr(c.name, instr(c.name, ',') + 2, 3)), '.',\n convert(c.id, char)) as `26-PatientAcctNo`\n , 'Y' as `27-AcceptAssign`\n , p.price as `28-TotalCharge`\n , 0 `29-AmountPaid`\n , p.price as `30-BalanceDue`\n , bp.name as `31-PhysicianSignature`\n , date_format(current_date, '%%m/%%d/%%Y') `31-Date`\n , bp.name `33-ClinicName`\n , bp.address as `33-ClinicAddressLine1`\n , bp.city_st_zip as `33-ClinicCityStateZip`\n , bp.npi as `33-a-NPI`\nfrom Insurance ins\njoin Client c on ins.Client_id = c.id\njoin Carrier co on ins.Carrier_id = co.id\njoin Visit v on v.Client_id = c.id\njoin `Procedure` p on p.cpt = v.cpt\njoin `Session` s on v.Session_id = s.id\njoin `Group` g on s.Group_id = g.id\njoin Therapist as bp on bp.tax_id is not null\nwhere v.bill_date is null and v.check_date is null\nand v.id in (%(visit_ids)s)\norder by c.name, c.id, s.session_date, v.id\n'''\n\nCOLUMNS = [literal.strip()[1:-1] for literal in '''\n \"Insurance Company Name\"\n \"Insurance Company Name 2\"\n \"Insurance Company Address 1\"\n \"Insurance Company Address 2\"\n \"1-InsuredPlanName\"\n \"1a-InsuredIDNo\"\n \"2-PatientName\"\n \"3-PatientDOB\"\n \"3-PatientGender\"\n \"4-InsuredName\"\n \"5-PatientAddress\"\n \"5-PatientCity\"\n \"5-PatientState\"\n \"5-PatientZip\"\n \"5-PatientPhone\"\n \"6-PatientRel\"\n \"7-InsuredAddr\"\n \"7-InsAddCity\"\n \"7-InsAddState\"\n \"7-InsAddZip\"\n \"7-InsAddPhone\"\n \"8-MaritalStatus\"\n \"8-Employed?\"\n \"9-InsuredName2\"\n \"9a-InsuredGroupNo2\"\n \"9b-Insureds2DOB\"\n \"9b-Insureds2Gender\"\n \"9c-EmployerName\"\n \"9d-InsuredPlanName2\"\n \"10a-CondEmployment\"\n \"10b-CondAutoAccident\"\n \"10c-CondOtherAccident\"\n \"10b2-AccidentState\"\n \"10d-LocalUse\"\n \"11-InsuredGroupNo\"\n \"11a-InsuredsDOB\"\n \"11a-InsuredsGender\"\n \"11b-EmployerName\"\n \"11c-InsuredPlanName\"\n \"11d-OtherHealthPlan\"\n \"12-PatientSign\"\n \"12-Date\"\n \"13-AuthSign\"\n \"14-DateOfCondition\"\n \"15-FirstDateOfCondition\"\n \"16-DateFromNoWork\"\n \"16-DateToNoWork\"\n \"17-ReferringPhysician\"\n \"17a-PhysicianNo\"\n \"17b-ReferNPI\"\n \"18-DateFromHosp\"\n \"18-DateToHosp\"\n \"19-LocalUse\"\n \"20-OutsideLab\"\n \"20-Charges\"\n \"21.1-Diagnosis\"\n \"21.2-Diagnosis\"\n \"21.3-Diagnosis\"\n \"21.4-Diagnosis\"\n \"22-MedicaidResubmissionCode\"\n \"22-MedicaidResubmissionRefNo\"\n \"23-PriorAuth\"\n \"24.1.a-DOSFrom\"\n \"24.1.a-DOSTo\"\n \"24.1.b-Place\"\n \"24.1.c-EMG\"\n \"24.1.d-CPT\"\n \"24.1.d-Modifier\"\n \"24.1.e-Code\"\n \"24.1.f-Charges\"\n \"24.1.g-Units\"\n \"24.1.h-Epsot\"\n \"24.1.i-Qualifier\"\n \"24.1.j-ProvLegacyNo\"\n \"24.1.j-ProvNPI\"\n \"24.2.a-DOSFrom\"\n \"24.2.a-DOSTo\"\n \"24.2.b-Place\"\n \"24.2.c-EMG\"\n \"24.2.d-CPT\"\n \"24.2.d-Modifier\"\n \"24.2.e-Code\"\n \"24.2.f-Charges\"\n \"24.2.g-Units\"\n \"24.2.h-Epsot\"\n \"24.2.i-Qualifier\"\n \"24.2.j-ProvLegacyNo\"\n \"24.2.j-ProvNPI\"\n \"24.3.a-DOSFrom\"\n \"24.3.a-DOSTo\"\n \"24.3.b-Place\"\n \"24.3.c-EMG\"\n \"24.3.d-CPT\"\n \"24.3.d-Modifier\"\n \"24.3.e-Code\"\n \"24.3.f-Charges\"\n \"24.3.g-Units\"\n \"24.3.h-Epsot\"\n \"24.3.i-Qualifier\"\n \"24.3.j-ProvLegacyNo\"\n \"24.3.j-ProvNPI\"\n \"24.4.a-DOSFrom\"\n \"24.4.a-DOSTo\"\n \"24.4.b-Place\"\n \"24.4.c-EMG\"\n \"24.4.d-CPT\"\n \"24.4.d-Modifier\"\n \"24.4.e-Code\"\n \"24.4.f-Charges\"\n \"24.4.g-Units\"\n \"24.4.h-Epsot\"\n \"24.4.i-Qualifier\"\n \"24.4.j-ProvLegacyNo\"\n \"24.4.j-ProvNPI\"\n \"24.5.a-DOSFrom\"\n \"24.5.a-DOSTo\"\n \"24.5.b-Place\"\n \"24.5.c-EMG\"\n \"24.5.d-CPT\"\n \"24.5.d-Modifier\"\n \"24.5.e-Code\"\n \"24.5.f-Charges\"\n \"24.5.g-Units\"\n \"24.5.h-Epsot\"\n \"24.5.i-Qualifier\"\n \"24.5.j-ProvLegacyNo\"\n \"24.5.j-ProvNPI\"\n \"24.6.a-DOSFrom\"\n \"24.6.a-DOSTo\"\n \"24.6.b-Place\"\n \"24.6.c-EMG\"\n \"24.6.d-CPT\"\n \"24.6.d-Modifier\"\n \"24.6.e-Code\"\n \"24.6.f-Charges\"\n \"24.6.g-Units\"\n \"24.6.h-Epsot\"\n \"24.6.i-Qualifier\"\n \"24.6.j-ProvLegacyNo\"\n \"24.6.j-ProvNPI\"\n \"25-TaxID\"\n \"25-SSN/EIN\"\n \"26-PatientAcctNo\"\n \"27-AcceptAssign\"\n \"28-TotalCharge\"\n \"29-AmountPaid\"\n \"30-BalanceDue\"\n \"31-PhysicianSignature\"\n \"31-Date\"\n \"32-FacilityName\"\n \"32-FacilityAddressLine1\"\n \"32-FacilityAddressLine2\"\n \"32-FacilityCityStateZip\"\n \"32-FacilityNPI\"\n \"33-ClinicName\"\n \"33-ClinicAddressLine1\"\n \"33-ClinicAddressLine2\"\n \"33-ClinicCityStateZip\"\n \"33-PIN#\"\n \"33-GRP#\"\n \"33-a-NPI\"\n \"33-b-GrpLegacyNo\"\n'''.strip().split('\\n')]\n\nif __name__ == '__main__':\n def _with_caps():\n from os import environ, path as os_path\n import datetime\n\n here = ocap.Rd(os_path.dirname(__file__), os_path,\n open_rd=lambda n: open(n))\n xf = Xataface.make(here)\n\n if 'SCRIPT_NAME' in environ:\n cgi_main(xf, cal=datetime.date)\n else:\n from sys import argv\n _test_main(argv, xf)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
db.open('ECS165')
print(db)
<|reserved_special_token_0|>
print('Merge Start')
q.table.merge(0)
print('Merge End')
db.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
db = Database()
db.open('ECS165')
print(db)
g_table = db.get_table('Grades')
q = Query(g_table)
print('Merge Start')
q.table.merge(0)
print('Merge End')
db.close()
<|reserved_special_token_1|>
from template.db import Database
from template.query import Query
import os
<|reserved_special_token_0|>
db = Database()
db.open('ECS165')
print(db)
g_table = db.get_table('Grades')
q = Query(g_table)
print('Merge Start')
q.table.merge(0)
print('Merge End')
db.close()
<|reserved_special_token_1|>
from template.db import Database
from template.query import Query
import os
'''
READ ME!!
Before using this demo, be sure that the Tail_Const is set to a value high enough
to guaranteed that all updates are contained within the same block.
config.py -> TAIL_CONST = 4
This program is meant to run sequentially through all parts starting with an empty ECS165
directory.
'''
db = Database()
db.open("ECS165")
print(db)
g_table = db.get_table('Grades')
q = Query(g_table)
print("Merge Start")
q.table.merge(0)
print("Merge End")
db.close()
|
flexible
|
{
"blob_id": "8f5b7711d913c7375d6816dd94731f1ce5ca1a62",
"index": 8289,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndb.open('ECS165')\nprint(db)\n<mask token>\nprint('Merge Start')\nq.table.merge(0)\nprint('Merge End')\ndb.close()\n",
"step-3": "<mask token>\ndb = Database()\ndb.open('ECS165')\nprint(db)\ng_table = db.get_table('Grades')\nq = Query(g_table)\nprint('Merge Start')\nq.table.merge(0)\nprint('Merge End')\ndb.close()\n",
"step-4": "from template.db import Database\nfrom template.query import Query\nimport os\n<mask token>\ndb = Database()\ndb.open('ECS165')\nprint(db)\ng_table = db.get_table('Grades')\nq = Query(g_table)\nprint('Merge Start')\nq.table.merge(0)\nprint('Merge End')\ndb.close()\n",
"step-5": "from template.db import Database\r\nfrom template.query import Query\r\nimport os\r\n\r\n'''\r\nREAD ME!!\r\n Before using this demo, be sure that the Tail_Const is set to a value high enough\r\n to guaranteed that all updates are contained within the same block.\r\n config.py -> TAIL_CONST = 4\r\n\r\n This program is meant to run sequentially through all parts starting with an empty ECS165\r\n directory.\r\n'''\r\ndb = Database()\r\ndb.open(\"ECS165\")\r\nprint(db)\r\ng_table = db.get_table('Grades')\r\nq = Query(g_table)\r\n\r\nprint(\"Merge Start\")\r\nq.table.merge(0)\r\nprint(\"Merge End\")\r\n\r\ndb.close()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from __future__ import print_function
import re
import sys
from pyspark import SparkContext
# define a regular expression for delimiters
NON_WORDS_DELIMITER = re.compile(r'[^\w\d]+')
def main():
if len(sys.argv) < 2:
print('''Usage: pyspark q2.py <file>
e.g. pyspark q2.py file:///home/cloudera/test_file''')
exit(-1)
sc = SparkContext(appName="HW4_Q2_LC")
try:
n = sc.textFile(sys.argv[1]) \
.filter(lambda x: len(NON_WORDS_DELIMITER.split(x)) > 10).count()
print("=" * 20)
print(" R E S U L T S ")
print("Lines with more than 10 words:", n)
print("=" * 20)
finally:
sc.stop()
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "deff4eb3ae933a99036f39213ceaf2144b682904",
"index": 5025,
"step-1": "<mask token>\n\n\ndef main():\n if len(sys.argv) < 2:\n print(\n 'Usage: pyspark q2.py <file>\\n e.g. pyspark q2.py file:///home/cloudera/test_file'\n )\n exit(-1)\n sc = SparkContext(appName='HW4_Q2_LC')\n try:\n n = sc.textFile(sys.argv[1]).filter(lambda x: len(\n NON_WORDS_DELIMITER.split(x)) > 10).count()\n print('=' * 20)\n print(' R E S U L T S ')\n print('Lines with more than 10 words:', n)\n print('=' * 20)\n finally:\n sc.stop()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n if len(sys.argv) < 2:\n print(\n 'Usage: pyspark q2.py <file>\\n e.g. pyspark q2.py file:///home/cloudera/test_file'\n )\n exit(-1)\n sc = SparkContext(appName='HW4_Q2_LC')\n try:\n n = sc.textFile(sys.argv[1]).filter(lambda x: len(\n NON_WORDS_DELIMITER.split(x)) > 10).count()\n print('=' * 20)\n print(' R E S U L T S ')\n print('Lines with more than 10 words:', n)\n print('=' * 20)\n finally:\n sc.stop()\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nNON_WORDS_DELIMITER = re.compile('[^\\\\w\\\\d]+')\n\n\ndef main():\n if len(sys.argv) < 2:\n print(\n 'Usage: pyspark q2.py <file>\\n e.g. pyspark q2.py file:///home/cloudera/test_file'\n )\n exit(-1)\n sc = SparkContext(appName='HW4_Q2_LC')\n try:\n n = sc.textFile(sys.argv[1]).filter(lambda x: len(\n NON_WORDS_DELIMITER.split(x)) > 10).count()\n print('=' * 20)\n print(' R E S U L T S ')\n print('Lines with more than 10 words:', n)\n print('=' * 20)\n finally:\n sc.stop()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from __future__ import print_function\nimport re\nimport sys\nfrom pyspark import SparkContext\nNON_WORDS_DELIMITER = re.compile('[^\\\\w\\\\d]+')\n\n\ndef main():\n if len(sys.argv) < 2:\n print(\n 'Usage: pyspark q2.py <file>\\n e.g. pyspark q2.py file:///home/cloudera/test_file'\n )\n exit(-1)\n sc = SparkContext(appName='HW4_Q2_LC')\n try:\n n = sc.textFile(sys.argv[1]).filter(lambda x: len(\n NON_WORDS_DELIMITER.split(x)) > 10).count()\n print('=' * 20)\n print(' R E S U L T S ')\n print('Lines with more than 10 words:', n)\n print('=' * 20)\n finally:\n sc.stop()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from __future__ import print_function\n\nimport re\nimport sys\nfrom pyspark import SparkContext\n\n\n# define a regular expression for delimiters\nNON_WORDS_DELIMITER = re.compile(r'[^\\w\\d]+')\n\n\ndef main():\n if len(sys.argv) < 2:\n print('''Usage: pyspark q2.py <file>\n e.g. pyspark q2.py file:///home/cloudera/test_file''')\n exit(-1)\n\n sc = SparkContext(appName=\"HW4_Q2_LC\")\n try:\n n = sc.textFile(sys.argv[1]) \\\n .filter(lambda x: len(NON_WORDS_DELIMITER.split(x)) > 10).count()\n print(\"=\" * 20)\n print(\" R E S U L T S \")\n print(\"Lines with more than 10 words:\", n)\n print(\"=\" * 20)\n finally:\n sc.stop()\n \n\nif __name__ == '__main__':\n main()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(
"""
Instructions
press Enter to begin.
Afterwards press Enter to "click" the stopwatch.
Press Ctrl-C to quit"""
)
input()
print('Started')
<|reserved_special_token_0|>
try:
while True:
input()
lapTime = round(time.time() - lastTime, 2)
totalTime = round(time.time() - startTime, 2)
print(f'Lap #{lapNum}: {totalTime} {lapTime}', end='')
lapNum += 1
lastTime = time.time()
except KeyboardInterrupt:
print('\nDone')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(
"""
Instructions
press Enter to begin.
Afterwards press Enter to "click" the stopwatch.
Press Ctrl-C to quit"""
)
input()
print('Started')
startTime = time.time()
lastTime = startTime
lapNum = 1
try:
while True:
input()
lapTime = round(time.time() - lastTime, 2)
totalTime = round(time.time() - startTime, 2)
print(f'Lap #{lapNum}: {totalTime} {lapTime}', end='')
lapNum += 1
lastTime = time.time()
except KeyboardInterrupt:
print('\nDone')
<|reserved_special_token_1|>
import time
print(
"""
Instructions
press Enter to begin.
Afterwards press Enter to "click" the stopwatch.
Press Ctrl-C to quit"""
)
input()
print('Started')
startTime = time.time()
lastTime = startTime
lapNum = 1
try:
while True:
input()
lapTime = round(time.time() - lastTime, 2)
totalTime = round(time.time() - startTime, 2)
print(f'Lap #{lapNum}: {totalTime} {lapTime}', end='')
lapNum += 1
lastTime = time.time()
except KeyboardInterrupt:
print('\nDone')
<|reserved_special_token_1|>
# stopwatch.py - A simple stopwatch program.
import time
# Display the porgram's instructions
print(
""" \n\nInstructions\n
press Enter to begin.\n
Afterwards press Enter to "click" the stopwatch.\n
Press Ctrl-C to quit"""
)
input() # press Enter to begin
print("Started")
startTime = time.time()
lastTime = startTime
lapNum = 1
# TODO: start tracking the lap times.
try:
while True:
input()
lapTime = round(time.time() - lastTime, 2)
totalTime = round(time.time() - startTime, 2)
print(f"Lap #{lapNum}: {totalTime} {lapTime}", end="")
lapNum += 1
lastTime = time.time() # reset the last lap time
except KeyboardInterrupt:
# handle the ctrl-c exception to keep its message from displaying.
print("\nDone")
|
flexible
|
{
"blob_id": "cc87682d4ebb283e2d0ef7c09ad28ba708c904bd",
"index": 4407,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(\n \"\"\" \n\nInstructions\n\npress Enter to begin.\n\nAfterwards press Enter to \"click\" the stopwatch.\n\nPress Ctrl-C to quit\"\"\"\n )\ninput()\nprint('Started')\n<mask token>\ntry:\n while True:\n input()\n lapTime = round(time.time() - lastTime, 2)\n totalTime = round(time.time() - startTime, 2)\n print(f'Lap #{lapNum}: {totalTime} {lapTime}', end='')\n lapNum += 1\n lastTime = time.time()\nexcept KeyboardInterrupt:\n print('\\nDone')\n",
"step-3": "<mask token>\nprint(\n \"\"\" \n\nInstructions\n\npress Enter to begin.\n\nAfterwards press Enter to \"click\" the stopwatch.\n\nPress Ctrl-C to quit\"\"\"\n )\ninput()\nprint('Started')\nstartTime = time.time()\nlastTime = startTime\nlapNum = 1\ntry:\n while True:\n input()\n lapTime = round(time.time() - lastTime, 2)\n totalTime = round(time.time() - startTime, 2)\n print(f'Lap #{lapNum}: {totalTime} {lapTime}', end='')\n lapNum += 1\n lastTime = time.time()\nexcept KeyboardInterrupt:\n print('\\nDone')\n",
"step-4": "import time\nprint(\n \"\"\" \n\nInstructions\n\npress Enter to begin.\n\nAfterwards press Enter to \"click\" the stopwatch.\n\nPress Ctrl-C to quit\"\"\"\n )\ninput()\nprint('Started')\nstartTime = time.time()\nlastTime = startTime\nlapNum = 1\ntry:\n while True:\n input()\n lapTime = round(time.time() - lastTime, 2)\n totalTime = round(time.time() - startTime, 2)\n print(f'Lap #{lapNum}: {totalTime} {lapTime}', end='')\n lapNum += 1\n lastTime = time.time()\nexcept KeyboardInterrupt:\n print('\\nDone')\n",
"step-5": "# stopwatch.py - A simple stopwatch program.\n\nimport time\n\n# Display the porgram's instructions\n\nprint(\n \"\"\" \\n\\nInstructions\\n\npress Enter to begin.\\n\nAfterwards press Enter to \"click\" the stopwatch.\\n\nPress Ctrl-C to quit\"\"\"\n)\ninput() # press Enter to begin\nprint(\"Started\")\nstartTime = time.time()\nlastTime = startTime\nlapNum = 1\n\n# TODO: start tracking the lap times.\ntry:\n while True:\n input()\n lapTime = round(time.time() - lastTime, 2)\n totalTime = round(time.time() - startTime, 2)\n print(f\"Lap #{lapNum}: {totalTime} {lapTime}\", end=\"\")\n lapNum += 1\n lastTime = time.time() # reset the last lap time\n\nexcept KeyboardInterrupt:\n # handle the ctrl-c exception to keep its message from displaying.\n print(\"\\nDone\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def pw_validator(pw):
pw = list(pw)
if len(pw) < 6 or len(pw) > 16:
return 'Please enter a valid password.'
num_count = 0
lower_count = 0
upper_count = 0
spec_count = 0
for i in pw:
if i in '0123456789':
idx = pw.index(i)
pw[idx] = int(i)
num_count += 1
if i in 'abcdefghijklmnopqrstuvwxyz':
idx = pw.index(i)
lower_count += 1
if i in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
idx = pw.index(i)
upper_count += 1
if i in '~!@#$%&*':
idx = pw.index(i)
spec_count += 1
if (num_count == 0 or lower_count == 0 or upper_count == 0 or
spec_count == 0):
return 'Please enter a valid password.'
else:
return 'Success!'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def pw_validator(pw):
pw = list(pw)
if len(pw) < 6 or len(pw) > 16:
return 'Please enter a valid password.'
num_count = 0
lower_count = 0
upper_count = 0
spec_count = 0
for i in pw:
if i in '0123456789':
idx = pw.index(i)
pw[idx] = int(i)
num_count += 1
if i in 'abcdefghijklmnopqrstuvwxyz':
idx = pw.index(i)
lower_count += 1
if i in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
idx = pw.index(i)
upper_count += 1
if i in '~!@#$%&*':
idx = pw.index(i)
spec_count += 1
if (num_count == 0 or lower_count == 0 or upper_count == 0 or
spec_count == 0):
return 'Please enter a valid password.'
else:
return 'Success!'
<|reserved_special_token_0|>
print(f'abc: {a}')
<|reserved_special_token_0|>
print(f'1234567890abcdefg: {b}')
<|reserved_special_token_0|>
print(f'@bcdEFGh!j: {c}')
<|reserved_special_token_0|>
print(f'@BCD3EFGH!J: {d}')
<|reserved_special_token_0|>
print(f'@bcd3efgh!j: {e}')
<|reserved_special_token_0|>
print(f'Abcd3FGhIj112: {f}')
<|reserved_special_token_0|>
print(f'P$kj35S&7: {g}')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def pw_validator(pw):
pw = list(pw)
if len(pw) < 6 or len(pw) > 16:
return 'Please enter a valid password.'
num_count = 0
lower_count = 0
upper_count = 0
spec_count = 0
for i in pw:
if i in '0123456789':
idx = pw.index(i)
pw[idx] = int(i)
num_count += 1
if i in 'abcdefghijklmnopqrstuvwxyz':
idx = pw.index(i)
lower_count += 1
if i in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
idx = pw.index(i)
upper_count += 1
if i in '~!@#$%&*':
idx = pw.index(i)
spec_count += 1
if (num_count == 0 or lower_count == 0 or upper_count == 0 or
spec_count == 0):
return 'Please enter a valid password.'
else:
return 'Success!'
a = pw_validator('abc')
print(f'abc: {a}')
b = pw_validator('1234567890abcdefg')
print(f'1234567890abcdefg: {b}')
c = pw_validator('@bcdEFGh!j')
print(f'@bcdEFGh!j: {c}')
d = pw_validator('@BCD3EFGH!J')
print(f'@BCD3EFGH!J: {d}')
e = pw_validator('@bcd3efgh!j')
print(f'@bcd3efgh!j: {e}')
f = pw_validator('Abcd3FGhIj112')
print(f'Abcd3FGhIj112: {f}')
g = pw_validator('P$kj35S&7')
print(f'P$kj35S&7: {g}')
<|reserved_special_token_1|>
"""
Password Requirements
"""
# Write a Python program called "pw_validator" to validate a password based on the security requirements outlined below.
# VALIDATION REQUIREMENTS:
## At least 1 lowercase letter [a-z]
## At least 1 uppercase letter [A-Z].
## At least 1 number [0-9].
## At least 1 special character [~!@#$%&*].
## Min length 6 characters.
## Max length 16 characters.
def pw_validator(pw):
pw = list(pw)
if len(pw) < 6 or len(pw) > 16:
return 'Please enter a valid password.'
num_count = 0
lower_count = 0
upper_count = 0
spec_count = 0
for i in pw:
# check numbers
if i in '0123456789':
idx = pw.index(i)
pw[idx] = int(i)
num_count += 1
# check lowercase letters
if i in 'abcdefghijklmnopqrstuvwxyz':
idx = pw.index(i)
lower_count += 1
# check uppercase letters
if i in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
idx = pw.index(i)
upper_count += 1
# check special char
if i in '~!@#$%&*':
idx = pw.index(i)
spec_count += 1
if num_count == 0 or lower_count == 0 or upper_count == 0 or spec_count == 0:
return 'Please enter a valid password.'
else:
return 'Success!'
# < 6 char
a = pw_validator('abc')
print(f'abc: {a}')
# > 16 char
b = pw_validator('1234567890abcdefg')
print(f'1234567890abcdefg: {b}')
# no numbers
c = pw_validator('@bcdEFGh!j')
print(f'@bcdEFGh!j: {c}')
# no lowercase letters
d = pw_validator('@BCD3EFGH!J')
print(f'@BCD3EFGH!J: {d}')
# no uppercase letters
e = pw_validator('@bcd3efgh!j')
print(f'@bcd3efgh!j: {e}')
# no special characters
f = pw_validator('Abcd3FGhIj112')
print(f'Abcd3FGhIj112: {f}')
# valid pw
g = pw_validator('P$kj35S&7')
print(f'P$kj35S&7: {g}')
|
flexible
|
{
"blob_id": "d72f9d521613accfd93e6de25a71d188626a0952",
"index": 4807,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef pw_validator(pw):\n pw = list(pw)\n if len(pw) < 6 or len(pw) > 16:\n return 'Please enter a valid password.'\n num_count = 0\n lower_count = 0\n upper_count = 0\n spec_count = 0\n for i in pw:\n if i in '0123456789':\n idx = pw.index(i)\n pw[idx] = int(i)\n num_count += 1\n if i in 'abcdefghijklmnopqrstuvwxyz':\n idx = pw.index(i)\n lower_count += 1\n if i in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':\n idx = pw.index(i)\n upper_count += 1\n if i in '~!@#$%&*':\n idx = pw.index(i)\n spec_count += 1\n if (num_count == 0 or lower_count == 0 or upper_count == 0 or \n spec_count == 0):\n return 'Please enter a valid password.'\n else:\n return 'Success!'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef pw_validator(pw):\n pw = list(pw)\n if len(pw) < 6 or len(pw) > 16:\n return 'Please enter a valid password.'\n num_count = 0\n lower_count = 0\n upper_count = 0\n spec_count = 0\n for i in pw:\n if i in '0123456789':\n idx = pw.index(i)\n pw[idx] = int(i)\n num_count += 1\n if i in 'abcdefghijklmnopqrstuvwxyz':\n idx = pw.index(i)\n lower_count += 1\n if i in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':\n idx = pw.index(i)\n upper_count += 1\n if i in '~!@#$%&*':\n idx = pw.index(i)\n spec_count += 1\n if (num_count == 0 or lower_count == 0 or upper_count == 0 or \n spec_count == 0):\n return 'Please enter a valid password.'\n else:\n return 'Success!'\n\n\n<mask token>\nprint(f'abc: {a}')\n<mask token>\nprint(f'1234567890abcdefg: {b}')\n<mask token>\nprint(f'@bcdEFGh!j: {c}')\n<mask token>\nprint(f'@BCD3EFGH!J: {d}')\n<mask token>\nprint(f'@bcd3efgh!j: {e}')\n<mask token>\nprint(f'Abcd3FGhIj112: {f}')\n<mask token>\nprint(f'P$kj35S&7: {g}')\n",
"step-4": "<mask token>\n\n\ndef pw_validator(pw):\n pw = list(pw)\n if len(pw) < 6 or len(pw) > 16:\n return 'Please enter a valid password.'\n num_count = 0\n lower_count = 0\n upper_count = 0\n spec_count = 0\n for i in pw:\n if i in '0123456789':\n idx = pw.index(i)\n pw[idx] = int(i)\n num_count += 1\n if i in 'abcdefghijklmnopqrstuvwxyz':\n idx = pw.index(i)\n lower_count += 1\n if i in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':\n idx = pw.index(i)\n upper_count += 1\n if i in '~!@#$%&*':\n idx = pw.index(i)\n spec_count += 1\n if (num_count == 0 or lower_count == 0 or upper_count == 0 or \n spec_count == 0):\n return 'Please enter a valid password.'\n else:\n return 'Success!'\n\n\na = pw_validator('abc')\nprint(f'abc: {a}')\nb = pw_validator('1234567890abcdefg')\nprint(f'1234567890abcdefg: {b}')\nc = pw_validator('@bcdEFGh!j')\nprint(f'@bcdEFGh!j: {c}')\nd = pw_validator('@BCD3EFGH!J')\nprint(f'@BCD3EFGH!J: {d}')\ne = pw_validator('@bcd3efgh!j')\nprint(f'@bcd3efgh!j: {e}')\nf = pw_validator('Abcd3FGhIj112')\nprint(f'Abcd3FGhIj112: {f}')\ng = pw_validator('P$kj35S&7')\nprint(f'P$kj35S&7: {g}')\n",
"step-5": "\"\"\"\nPassword Requirements\n\"\"\"\n\n# Write a Python program called \"pw_validator\" to validate a password based on the security requirements outlined below.\n\n# VALIDATION REQUIREMENTS:\n## At least 1 lowercase letter [a-z]\n## At least 1 uppercase letter [A-Z].\n## At least 1 number [0-9].\n## At least 1 special character [~!@#$%&*].\n## Min length 6 characters.\n## Max length 16 characters.\n\ndef pw_validator(pw):\n pw = list(pw)\n\n if len(pw) < 6 or len(pw) > 16:\n return 'Please enter a valid password.'\n\n num_count = 0\n lower_count = 0\n upper_count = 0\n spec_count = 0\n\n for i in pw:\n # check numbers\n if i in '0123456789':\n idx = pw.index(i)\n pw[idx] = int(i)\n num_count += 1\n # check lowercase letters\n if i in 'abcdefghijklmnopqrstuvwxyz':\n idx = pw.index(i)\n lower_count += 1\n # check uppercase letters\n if i in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':\n idx = pw.index(i)\n upper_count += 1\n # check special char\n if i in '~!@#$%&*':\n idx = pw.index(i)\n spec_count += 1\n\n if num_count == 0 or lower_count == 0 or upper_count == 0 or spec_count == 0:\n return 'Please enter a valid password.'\n else: \n return 'Success!'\n\n# < 6 char\na = pw_validator('abc')\nprint(f'abc: {a}')\n\n# > 16 char\nb = pw_validator('1234567890abcdefg')\nprint(f'1234567890abcdefg: {b}')\n\n# no numbers\nc = pw_validator('@bcdEFGh!j')\nprint(f'@bcdEFGh!j: {c}')\n\n# no lowercase letters\nd = pw_validator('@BCD3EFGH!J')\nprint(f'@BCD3EFGH!J: {d}')\n\n# no uppercase letters\ne = pw_validator('@bcd3efgh!j')\nprint(f'@bcd3efgh!j: {e}')\n\n# no special characters\nf = pw_validator('Abcd3FGhIj112')\nprint(f'Abcd3FGhIj112: {f}')\n\n# valid pw\ng = pw_validator('P$kj35S&7')\nprint(f'P$kj35S&7: {g}')",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def base_url():
default_request = {'base_url': url, 'headers': {'Content-Type':
'application/json;charset=UTF-8'}}
return default_request['base_url']
def random_Num(length, string=[]):
for i in range(length):
y = str(random.randint(0, 9))
string.append(y)
string = ''.join(string)
return string
<|reserved_special_token_0|>
def returnId():
return a[0]
def returnTimestamp():
return a[1]
def query_mysql(sql, *params, database='zbcf_injury_test'):
conn = pymysql.connect(host=
'rm-wz97oujls3998784i.mysql.rds.aliyuncs.com', user='testuser',
password='testuser@2018', database=database, charset='utf8',
cursorclass=pymysql.cursors.DictCursor)
cursor = conn.cursor()
cursor.execute(sql, params)
data = cursor.fetchone()
cursor.close()
conn.close()
return data
<|reserved_special_token_0|>
def sleep(num):
time.sleep(num)
def queryLoginNum(orderId):
sql = (
"SELECT token from t_auth_info t where t.operation_id = (SELECT id from t_operation where order_id = '%s') and t.del_flag = '0'"
)
query_result = query_mysql(sql, orderId)
print(orderId)
return query_result['token']
def opetationId_queryLoginNum(operation_Id):
sql = (
"SELECT token from t_auth_info where operation_id = '%s' and del_flag = '0'"
)
query_result = query_mysql(sql, operation_Id)
return query_result['token']
<|reserved_special_token_0|>
def queryOperationId():
pass
<|reserved_special_token_0|>
def getH5Token(accessCode):
url = 'http://testrsapp.cias.cn/injury/user/h5/login'
headers = {'Content-Type':
'application/x-www-form-urlencoded; charset=UTF-8'}
data = {'accessCode': accessCode}
req = requests.post(url=url, headers=headers, data=data).json()
return req['data']['token']
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def base_url():
default_request = {'base_url': url, 'headers': {'Content-Type':
'application/json;charset=UTF-8'}}
return default_request['base_url']
def random_Num(length, string=[]):
for i in range(length):
y = str(random.randint(0, 9))
string.append(y)
string = ''.join(string)
return string
<|reserved_special_token_0|>
def returnId():
return a[0]
def returnTimestamp():
return a[1]
def query_mysql(sql, *params, database='zbcf_injury_test'):
conn = pymysql.connect(host=
'rm-wz97oujls3998784i.mysql.rds.aliyuncs.com', user='testuser',
password='testuser@2018', database=database, charset='utf8',
cursorclass=pymysql.cursors.DictCursor)
cursor = conn.cursor()
cursor.execute(sql, params)
data = cursor.fetchone()
cursor.close()
conn.close()
return data
def orderTimeOut(order_id, database='zbcf_injury_test'):
conn = pymysql.connect(host=
'rm-wz97oujls3998784i.mysql.rds.aliyuncs.com', user='testuser',
password='testuser@2018', database=database, charset='utf8',
cursorclass=pymysql.cursors.DictCursor)
cursor = conn.cursor()
now = datetime.datetime.now()
now = now.strftime('%Y-%m-%d %H:%M:%S')
sql = (
"UPDATE t_auth_info t SET end_effect_time = '%s' WHERE t.operation_id = (SELECT id from t_operation where order_id = '%s')"
% (now, order_id))
effectRows = cursor.execute(sql)
conn.commit()
print('make order time out!')
cursor.close()
conn.close()
return effectRows
def sleep(num):
time.sleep(num)
def queryLoginNum(orderId):
sql = (
"SELECT token from t_auth_info t where t.operation_id = (SELECT id from t_operation where order_id = '%s') and t.del_flag = '0'"
)
query_result = query_mysql(sql, orderId)
print(orderId)
return query_result['token']
def opetationId_queryLoginNum(operation_Id):
sql = (
"SELECT token from t_auth_info where operation_id = '%s' and del_flag = '0'"
)
query_result = query_mysql(sql, operation_Id)
return query_result['token']
def queryOrderId():
pass
def queryOperationId():
pass
<|reserved_special_token_0|>
def getH5Token(accessCode):
url = 'http://testrsapp.cias.cn/injury/user/h5/login'
headers = {'Content-Type':
'application/x-www-form-urlencoded; charset=UTF-8'}
data = {'accessCode': accessCode}
req = requests.post(url=url, headers=headers, data=data).json()
return req['data']['token']
def getWebToken(accessCode):
url = 'http://testrsapp.cias.cn/injury/user/pc/login'
headers = {'Content-Type':
'application/x-www-form-urlencoded; charset=UTF-8'}
data = {'loginName': 'haadmin003', 'loginPass': 'Y2lhczEyMzQ1Ng==',
'verifyCode': 'tubd'}
req = requests.post(url=url, headers=headers, data=data).json()
return req['data']['token']
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def base_url():
default_request = {'base_url': url, 'headers': {'Content-Type':
'application/json;charset=UTF-8'}}
return default_request['base_url']
def random_Num(length, string=[]):
for i in range(length):
y = str(random.randint(0, 9))
string.append(y)
string = ''.join(string)
return string
def generator():
districtcode = DistrictList[random.randint(0, len(DistrictList) - 1)][
'code']
date = datetime.datetime.now() - datetime.timedelta(weeks=random.
randint(1, 2350))
birthDay = date.strftime('%Y%m%d')
randomNum = str(random.randint(100, 300))
idnum = districtcode + birthDay + randomNum
i = 0
count = 0
weight = [7, 9, 10, 5, 8, 4, 2, 1, 6, 3, 7, 9, 10, 5, 8, 4, 2]
checkcode = {'0': '1', '1': '0', '2': 'X', '3': '9', '4': '8', '5': '7',
'6': '6', '7': '5', '8': '5', '9': '3', '10': '2'}
for i in range(0, len(idnum)):
count = count + int(idnum[i]) * weight[i]
id = idnum + checkcode[str(count % 11)]
timstamp = int(time.mktime(date.timetuple()) * 1000)
return id, timstamp
<|reserved_special_token_0|>
def returnId():
return a[0]
def returnTimestamp():
return a[1]
def query_mysql(sql, *params, database='zbcf_injury_test'):
conn = pymysql.connect(host=
'rm-wz97oujls3998784i.mysql.rds.aliyuncs.com', user='testuser',
password='testuser@2018', database=database, charset='utf8',
cursorclass=pymysql.cursors.DictCursor)
cursor = conn.cursor()
cursor.execute(sql, params)
data = cursor.fetchone()
cursor.close()
conn.close()
return data
def orderTimeOut(order_id, database='zbcf_injury_test'):
conn = pymysql.connect(host=
'rm-wz97oujls3998784i.mysql.rds.aliyuncs.com', user='testuser',
password='testuser@2018', database=database, charset='utf8',
cursorclass=pymysql.cursors.DictCursor)
cursor = conn.cursor()
now = datetime.datetime.now()
now = now.strftime('%Y-%m-%d %H:%M:%S')
sql = (
"UPDATE t_auth_info t SET end_effect_time = '%s' WHERE t.operation_id = (SELECT id from t_operation where order_id = '%s')"
% (now, order_id))
effectRows = cursor.execute(sql)
conn.commit()
print('make order time out!')
cursor.close()
conn.close()
return effectRows
def sleep(num):
time.sleep(num)
def queryLoginNum(orderId):
sql = (
"SELECT token from t_auth_info t where t.operation_id = (SELECT id from t_operation where order_id = '%s') and t.del_flag = '0'"
)
query_result = query_mysql(sql, orderId)
print(orderId)
return query_result['token']
def opetationId_queryLoginNum(operation_Id):
sql = (
"SELECT token from t_auth_info where operation_id = '%s' and del_flag = '0'"
)
query_result = query_mysql(sql, operation_Id)
return query_result['token']
def queryOrderId():
pass
def queryOperationId():
pass
def queryOrderStatus():
pass
def getH5Token(accessCode):
url = 'http://testrsapp.cias.cn/injury/user/h5/login'
headers = {'Content-Type':
'application/x-www-form-urlencoded; charset=UTF-8'}
data = {'accessCode': accessCode}
req = requests.post(url=url, headers=headers, data=data).json()
return req['data']['token']
def getWebToken(accessCode):
url = 'http://testrsapp.cias.cn/injury/user/pc/login'
headers = {'Content-Type':
'application/x-www-form-urlencoded; charset=UTF-8'}
data = {'loginName': 'haadmin003', 'loginPass': 'Y2lhczEyMzQ1Ng==',
'verifyCode': 'tubd'}
req = requests.post(url=url, headers=headers, data=data).json()
return req['data']['token']
<|reserved_special_token_1|>
import datetime
import time
import requests
from config import url
from data import DistrictList
import random
import pymysql
def base_url():
default_request = {'base_url': url, 'headers': {'Content-Type':
'application/json;charset=UTF-8'}}
return default_request['base_url']
def random_Num(length, string=[]):
for i in range(length):
y = str(random.randint(0, 9))
string.append(y)
string = ''.join(string)
return string
def generator():
districtcode = DistrictList[random.randint(0, len(DistrictList) - 1)][
'code']
date = datetime.datetime.now() - datetime.timedelta(weeks=random.
randint(1, 2350))
birthDay = date.strftime('%Y%m%d')
randomNum = str(random.randint(100, 300))
idnum = districtcode + birthDay + randomNum
i = 0
count = 0
weight = [7, 9, 10, 5, 8, 4, 2, 1, 6, 3, 7, 9, 10, 5, 8, 4, 2]
checkcode = {'0': '1', '1': '0', '2': 'X', '3': '9', '4': '8', '5': '7',
'6': '6', '7': '5', '8': '5', '9': '3', '10': '2'}
for i in range(0, len(idnum)):
count = count + int(idnum[i]) * weight[i]
id = idnum + checkcode[str(count % 11)]
timstamp = int(time.mktime(date.timetuple()) * 1000)
return id, timstamp
a = generator()
def returnId():
return a[0]
def returnTimestamp():
return a[1]
def query_mysql(sql, *params, database='zbcf_injury_test'):
conn = pymysql.connect(host=
'rm-wz97oujls3998784i.mysql.rds.aliyuncs.com', user='testuser',
password='testuser@2018', database=database, charset='utf8',
cursorclass=pymysql.cursors.DictCursor)
cursor = conn.cursor()
cursor.execute(sql, params)
data = cursor.fetchone()
cursor.close()
conn.close()
return data
def orderTimeOut(order_id, database='zbcf_injury_test'):
conn = pymysql.connect(host=
'rm-wz97oujls3998784i.mysql.rds.aliyuncs.com', user='testuser',
password='testuser@2018', database=database, charset='utf8',
cursorclass=pymysql.cursors.DictCursor)
cursor = conn.cursor()
now = datetime.datetime.now()
now = now.strftime('%Y-%m-%d %H:%M:%S')
sql = (
"UPDATE t_auth_info t SET end_effect_time = '%s' WHERE t.operation_id = (SELECT id from t_operation where order_id = '%s')"
% (now, order_id))
effectRows = cursor.execute(sql)
conn.commit()
print('make order time out!')
cursor.close()
conn.close()
return effectRows
def sleep(num):
time.sleep(num)
def queryLoginNum(orderId):
sql = (
"SELECT token from t_auth_info t where t.operation_id = (SELECT id from t_operation where order_id = '%s') and t.del_flag = '0'"
)
query_result = query_mysql(sql, orderId)
print(orderId)
return query_result['token']
def opetationId_queryLoginNum(operation_Id):
sql = (
"SELECT token from t_auth_info where operation_id = '%s' and del_flag = '0'"
)
query_result = query_mysql(sql, operation_Id)
return query_result['token']
def queryOrderId():
pass
def queryOperationId():
pass
def queryOrderStatus():
pass
def getH5Token(accessCode):
url = 'http://testrsapp.cias.cn/injury/user/h5/login'
headers = {'Content-Type':
'application/x-www-form-urlencoded; charset=UTF-8'}
data = {'accessCode': accessCode}
req = requests.post(url=url, headers=headers, data=data).json()
return req['data']['token']
def getWebToken(accessCode):
url = 'http://testrsapp.cias.cn/injury/user/pc/login'
headers = {'Content-Type':
'application/x-www-form-urlencoded; charset=UTF-8'}
data = {'loginName': 'haadmin003', 'loginPass': 'Y2lhczEyMzQ1Ng==',
'verifyCode': 'tubd'}
req = requests.post(url=url, headers=headers, data=data).json()
return req['data']['token']
<|reserved_special_token_1|>
import datetime
import time
import requests
from config import url
from data import DistrictList
import random
import pymysql
def base_url():
default_request = {
'base_url': url,
'headers': {
"Content-Type": "application/json;charset=UTF-8"}
}
return default_request['base_url']
# 生成一个指定长度的随机数
def random_Num(length, string=[]):
for i in range(length):
y = str(random.randint(0, 9))
string.append(y)
string = ''.join(string)
return string
# a = random_Num(9, ['1','3'])
# b = random_Num(6, ['粤','B'])
# c = random_Num(9)
# print(a,b,c)
# 生成一个身份证号码,以及对应的生日
def generator():
# 生成身份证号码
districtcode = DistrictList[random.randint(0, len(DistrictList) - 1)]['code']
# date = datetime.date.today() - datetime.timedelta(weeks=random.randint(1, 3840))
date = datetime.datetime.now() - datetime.timedelta(weeks=random.randint(1, 2350))
birthDay = date.strftime('%Y%m%d')
randomNum = str(random.randint(100, 300))
idnum = districtcode + birthDay + randomNum
i = 0
count = 0
weight = [7, 9, 10, 5, 8, 4, 2, 1, 6, 3, 7, 9, 10, 5, 8, 4, 2]
checkcode = {'0': '1', '1': '0', '2': 'X', '3': '9', '4': '8', '5': '7', '6': '6', '7': '5', '8': '5', '9': '3', '10': '2'}
for i in range(0, len(idnum)):
count = count + int(idnum[i]) * weight[i]
id = idnum + checkcode[str(count%11)]
# 生成生日时间戳
# timstamp = date.strftime('%Y%m%d%H%M%S')
# timstamp = datetime.datetime.strptime(date, '%Y%m%d%H%M%S').timestamp()
timstamp = int(time.mktime(date.timetuple()) * 1000)
return id, timstamp
a = generator()
def returnId():
return a[0]
def returnTimestamp():
return a[1]
# 连接数据库公用方法
def query_mysql(sql, *params, database="zbcf_injury_test"):
conn = pymysql.connect(host="rm-wz97oujls3998784i.mysql.rds.aliyuncs.com", user="testuser",
password="testuser@2018", database=database, charset='utf8',
cursorclass=pymysql.cursors.DictCursor)
cursor = conn.cursor()
cursor.execute(sql, params)
data = cursor.fetchone()
cursor.close()
conn.close()
return data
# 模拟订单超过48/12小时/7天
# 只需将数据库过期时间设置为当前时间
# def orderTimeOut(operation_id):
# now = datetime.datetime.now()
# now = now.strftime('%Y-%m-%d %H:%M:%S')
# # delta = datetime.timedelta(days=outTime)
# # now = now + delta
# print(now)
# # sql = "UPDATE t_auth_info t SET end_effect_time = str_to_date(\'%s\','%%Y-%%m-%%d %%H:%%i:%%s') WHERE t.operation_id = '%s'"
# sql = "UPDATE t_auth_info t SET end_effect_time = '%s' WHERE t.operation_id = '%s'"
# params = [now, operation_id]
# update_result = query_mysql(sql, *params)
# return update_result
# # return now.strftime('%Y-%m-%d %H:%M:%S')
# # 有什么问题??
# a = orderTimeOut(289)
# print(a)
# 模拟订单超过48/12小时/7天
# 只需将数据库过期时间设置为当前时间
def orderTimeOut(order_id, database="zbcf_injury_test"):
conn = pymysql.connect(host="rm-wz97oujls3998784i.mysql.rds.aliyuncs.com", user="testuser",
password="testuser@2018", database=database, charset='utf8',
cursorclass=pymysql.cursors.DictCursor)
cursor = conn.cursor()
now = datetime.datetime.now()
now = now.strftime('%Y-%m-%d %H:%M:%S')
sql = "UPDATE t_auth_info t SET end_effect_time = '%s' WHERE t.operation_id = (SELECT id from t_operation where order_id = '%s')" % (now, order_id)
effectRows = cursor.execute(sql)
conn.commit()
print('make order time out!')
cursor.close()
conn.close()
return effectRows
# a = orderTimeOut(260)
# print(a)
def sleep(num):
time.sleep(num)
# 依据order_Id查询登录码
def queryLoginNum(orderId):
sql = "SELECT token from t_auth_info t where t.operation_id = (SELECT id from t_operation where order_id = '%s') and t.del_flag = '0'"
query_result = query_mysql(sql, orderId)
print(orderId)
return query_result['token']
# a = queryLoginNum(418)
# print (a)
# 依据operation_Id查询登录码
def opetationId_queryLoginNum(operation_Id):
sql = "SELECT token from t_auth_info where operation_id = '%s' and del_flag = '0'"
query_result = query_mysql(sql, operation_Id)
return query_result['token']
# a = queryLoginNum(290)
# print (a)
# 返回orderId的方法
def queryOrderId():
pass
# 返回operationId的方法
def queryOperationId():
pass
# 查询订单状态
def queryOrderStatus():
pass
# # 登录PC端获取token,拼接到headers中
# def setup_hook_token(request):
# #print(request)
# url_path="http://testrenshang.cias.cn/injury/user/pc/login"
# header={"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"}
# payload={"loginName": "haadmin003", "loginPass": "Y2lhczEyMzQ1Ng==", "verifyCode": "tubd"}
# req=requests.post(url=url_path, headers=header, params=payload).json()
# token=req['data']['token']
# request["headers"]['token']=token
# # print(token,'\n', req)
# # print(request)
# # request = {'headers':{'Content-Type': 'application/json;charset=UTF-8', 'method': 'GET', 'url': '$uri', 'token': '$token'}}
# # setup_hook_token(request)
# 登录H5端获取token
def getH5Token(accessCode):
url = "http://testrsapp.cias.cn/injury/user/h5/login"
headers = {"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"}
data = {"accessCode": accessCode}
req = requests.post(url=url, headers=headers, data=data).json()
return req['data']['token']
# a = getH5Token('31583310')
# print(a)
# 登录Web端获取token
def getWebToken(accessCode):
url = "http://testrsapp.cias.cn/injury/user/pc/login"
headers = {"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"}
data = {"loginName": "haadmin003", "loginPass": "Y2lhczEyMzQ1Ng==", "verifyCode": "tubd"}
req = requests.post(url=url, headers=headers, data=data).json()
return req['data']['token']
# a = getWebToken('31583310')
# print(a)
#
|
flexible
|
{
"blob_id": "c55b6fed92a5f4f2961c6f8d5b150b22a5f622e8",
"index": 4520,
"step-1": "<mask token>\n\n\ndef base_url():\n default_request = {'base_url': url, 'headers': {'Content-Type':\n 'application/json;charset=UTF-8'}}\n return default_request['base_url']\n\n\ndef random_Num(length, string=[]):\n for i in range(length):\n y = str(random.randint(0, 9))\n string.append(y)\n string = ''.join(string)\n return string\n\n\n<mask token>\n\n\ndef returnId():\n return a[0]\n\n\ndef returnTimestamp():\n return a[1]\n\n\ndef query_mysql(sql, *params, database='zbcf_injury_test'):\n conn = pymysql.connect(host=\n 'rm-wz97oujls3998784i.mysql.rds.aliyuncs.com', user='testuser',\n password='testuser@2018', database=database, charset='utf8',\n cursorclass=pymysql.cursors.DictCursor)\n cursor = conn.cursor()\n cursor.execute(sql, params)\n data = cursor.fetchone()\n cursor.close()\n conn.close()\n return data\n\n\n<mask token>\n\n\ndef sleep(num):\n time.sleep(num)\n\n\ndef queryLoginNum(orderId):\n sql = (\n \"SELECT token from t_auth_info t where t.operation_id = (SELECT id from t_operation where order_id = '%s') and t.del_flag = '0'\"\n )\n query_result = query_mysql(sql, orderId)\n print(orderId)\n return query_result['token']\n\n\ndef opetationId_queryLoginNum(operation_Id):\n sql = (\n \"SELECT token from t_auth_info where operation_id = '%s' and del_flag = '0'\"\n )\n query_result = query_mysql(sql, operation_Id)\n return query_result['token']\n\n\n<mask token>\n\n\ndef queryOperationId():\n pass\n\n\n<mask token>\n\n\ndef getH5Token(accessCode):\n url = 'http://testrsapp.cias.cn/injury/user/h5/login'\n headers = {'Content-Type':\n 'application/x-www-form-urlencoded; charset=UTF-8'}\n data = {'accessCode': accessCode}\n req = requests.post(url=url, headers=headers, data=data).json()\n return req['data']['token']\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef base_url():\n default_request = {'base_url': url, 'headers': {'Content-Type':\n 'application/json;charset=UTF-8'}}\n return default_request['base_url']\n\n\ndef random_Num(length, string=[]):\n for i in range(length):\n y = str(random.randint(0, 9))\n string.append(y)\n string = ''.join(string)\n return string\n\n\n<mask token>\n\n\ndef returnId():\n return a[0]\n\n\ndef returnTimestamp():\n return a[1]\n\n\ndef query_mysql(sql, *params, database='zbcf_injury_test'):\n conn = pymysql.connect(host=\n 'rm-wz97oujls3998784i.mysql.rds.aliyuncs.com', user='testuser',\n password='testuser@2018', database=database, charset='utf8',\n cursorclass=pymysql.cursors.DictCursor)\n cursor = conn.cursor()\n cursor.execute(sql, params)\n data = cursor.fetchone()\n cursor.close()\n conn.close()\n return data\n\n\ndef orderTimeOut(order_id, database='zbcf_injury_test'):\n conn = pymysql.connect(host=\n 'rm-wz97oujls3998784i.mysql.rds.aliyuncs.com', user='testuser',\n password='testuser@2018', database=database, charset='utf8',\n cursorclass=pymysql.cursors.DictCursor)\n cursor = conn.cursor()\n now = datetime.datetime.now()\n now = now.strftime('%Y-%m-%d %H:%M:%S')\n sql = (\n \"UPDATE t_auth_info t SET end_effect_time = '%s' WHERE t.operation_id = (SELECT id from t_operation where order_id = '%s')\"\n % (now, order_id))\n effectRows = cursor.execute(sql)\n conn.commit()\n print('make order time out!')\n cursor.close()\n conn.close()\n return effectRows\n\n\ndef sleep(num):\n time.sleep(num)\n\n\ndef queryLoginNum(orderId):\n sql = (\n \"SELECT token from t_auth_info t where t.operation_id = (SELECT id from t_operation where order_id = '%s') and t.del_flag = '0'\"\n )\n query_result = query_mysql(sql, orderId)\n print(orderId)\n return query_result['token']\n\n\ndef opetationId_queryLoginNum(operation_Id):\n sql = (\n \"SELECT token from t_auth_info where operation_id = '%s' and del_flag = '0'\"\n )\n query_result = query_mysql(sql, operation_Id)\n return query_result['token']\n\n\ndef queryOrderId():\n pass\n\n\ndef queryOperationId():\n pass\n\n\n<mask token>\n\n\ndef getH5Token(accessCode):\n url = 'http://testrsapp.cias.cn/injury/user/h5/login'\n headers = {'Content-Type':\n 'application/x-www-form-urlencoded; charset=UTF-8'}\n data = {'accessCode': accessCode}\n req = requests.post(url=url, headers=headers, data=data).json()\n return req['data']['token']\n\n\ndef getWebToken(accessCode):\n url = 'http://testrsapp.cias.cn/injury/user/pc/login'\n headers = {'Content-Type':\n 'application/x-www-form-urlencoded; charset=UTF-8'}\n data = {'loginName': 'haadmin003', 'loginPass': 'Y2lhczEyMzQ1Ng==',\n 'verifyCode': 'tubd'}\n req = requests.post(url=url, headers=headers, data=data).json()\n return req['data']['token']\n",
"step-3": "<mask token>\n\n\ndef base_url():\n default_request = {'base_url': url, 'headers': {'Content-Type':\n 'application/json;charset=UTF-8'}}\n return default_request['base_url']\n\n\ndef random_Num(length, string=[]):\n for i in range(length):\n y = str(random.randint(0, 9))\n string.append(y)\n string = ''.join(string)\n return string\n\n\ndef generator():\n districtcode = DistrictList[random.randint(0, len(DistrictList) - 1)][\n 'code']\n date = datetime.datetime.now() - datetime.timedelta(weeks=random.\n randint(1, 2350))\n birthDay = date.strftime('%Y%m%d')\n randomNum = str(random.randint(100, 300))\n idnum = districtcode + birthDay + randomNum\n i = 0\n count = 0\n weight = [7, 9, 10, 5, 8, 4, 2, 1, 6, 3, 7, 9, 10, 5, 8, 4, 2]\n checkcode = {'0': '1', '1': '0', '2': 'X', '3': '9', '4': '8', '5': '7',\n '6': '6', '7': '5', '8': '5', '9': '3', '10': '2'}\n for i in range(0, len(idnum)):\n count = count + int(idnum[i]) * weight[i]\n id = idnum + checkcode[str(count % 11)]\n timstamp = int(time.mktime(date.timetuple()) * 1000)\n return id, timstamp\n\n\n<mask token>\n\n\ndef returnId():\n return a[0]\n\n\ndef returnTimestamp():\n return a[1]\n\n\ndef query_mysql(sql, *params, database='zbcf_injury_test'):\n conn = pymysql.connect(host=\n 'rm-wz97oujls3998784i.mysql.rds.aliyuncs.com', user='testuser',\n password='testuser@2018', database=database, charset='utf8',\n cursorclass=pymysql.cursors.DictCursor)\n cursor = conn.cursor()\n cursor.execute(sql, params)\n data = cursor.fetchone()\n cursor.close()\n conn.close()\n return data\n\n\ndef orderTimeOut(order_id, database='zbcf_injury_test'):\n conn = pymysql.connect(host=\n 'rm-wz97oujls3998784i.mysql.rds.aliyuncs.com', user='testuser',\n password='testuser@2018', database=database, charset='utf8',\n cursorclass=pymysql.cursors.DictCursor)\n cursor = conn.cursor()\n now = datetime.datetime.now()\n now = now.strftime('%Y-%m-%d %H:%M:%S')\n sql = (\n \"UPDATE t_auth_info t SET end_effect_time = '%s' WHERE t.operation_id = (SELECT id from t_operation where order_id = '%s')\"\n % (now, order_id))\n effectRows = cursor.execute(sql)\n conn.commit()\n print('make order time out!')\n cursor.close()\n conn.close()\n return effectRows\n\n\ndef sleep(num):\n time.sleep(num)\n\n\ndef queryLoginNum(orderId):\n sql = (\n \"SELECT token from t_auth_info t where t.operation_id = (SELECT id from t_operation where order_id = '%s') and t.del_flag = '0'\"\n )\n query_result = query_mysql(sql, orderId)\n print(orderId)\n return query_result['token']\n\n\ndef opetationId_queryLoginNum(operation_Id):\n sql = (\n \"SELECT token from t_auth_info where operation_id = '%s' and del_flag = '0'\"\n )\n query_result = query_mysql(sql, operation_Id)\n return query_result['token']\n\n\ndef queryOrderId():\n pass\n\n\ndef queryOperationId():\n pass\n\n\ndef queryOrderStatus():\n pass\n\n\ndef getH5Token(accessCode):\n url = 'http://testrsapp.cias.cn/injury/user/h5/login'\n headers = {'Content-Type':\n 'application/x-www-form-urlencoded; charset=UTF-8'}\n data = {'accessCode': accessCode}\n req = requests.post(url=url, headers=headers, data=data).json()\n return req['data']['token']\n\n\ndef getWebToken(accessCode):\n url = 'http://testrsapp.cias.cn/injury/user/pc/login'\n headers = {'Content-Type':\n 'application/x-www-form-urlencoded; charset=UTF-8'}\n data = {'loginName': 'haadmin003', 'loginPass': 'Y2lhczEyMzQ1Ng==',\n 'verifyCode': 'tubd'}\n req = requests.post(url=url, headers=headers, data=data).json()\n return req['data']['token']\n",
"step-4": "import datetime\nimport time\nimport requests\nfrom config import url\nfrom data import DistrictList\nimport random\nimport pymysql\n\n\ndef base_url():\n default_request = {'base_url': url, 'headers': {'Content-Type':\n 'application/json;charset=UTF-8'}}\n return default_request['base_url']\n\n\ndef random_Num(length, string=[]):\n for i in range(length):\n y = str(random.randint(0, 9))\n string.append(y)\n string = ''.join(string)\n return string\n\n\ndef generator():\n districtcode = DistrictList[random.randint(0, len(DistrictList) - 1)][\n 'code']\n date = datetime.datetime.now() - datetime.timedelta(weeks=random.\n randint(1, 2350))\n birthDay = date.strftime('%Y%m%d')\n randomNum = str(random.randint(100, 300))\n idnum = districtcode + birthDay + randomNum\n i = 0\n count = 0\n weight = [7, 9, 10, 5, 8, 4, 2, 1, 6, 3, 7, 9, 10, 5, 8, 4, 2]\n checkcode = {'0': '1', '1': '0', '2': 'X', '3': '9', '4': '8', '5': '7',\n '6': '6', '7': '5', '8': '5', '9': '3', '10': '2'}\n for i in range(0, len(idnum)):\n count = count + int(idnum[i]) * weight[i]\n id = idnum + checkcode[str(count % 11)]\n timstamp = int(time.mktime(date.timetuple()) * 1000)\n return id, timstamp\n\n\na = generator()\n\n\ndef returnId():\n return a[0]\n\n\ndef returnTimestamp():\n return a[1]\n\n\ndef query_mysql(sql, *params, database='zbcf_injury_test'):\n conn = pymysql.connect(host=\n 'rm-wz97oujls3998784i.mysql.rds.aliyuncs.com', user='testuser',\n password='testuser@2018', database=database, charset='utf8',\n cursorclass=pymysql.cursors.DictCursor)\n cursor = conn.cursor()\n cursor.execute(sql, params)\n data = cursor.fetchone()\n cursor.close()\n conn.close()\n return data\n\n\ndef orderTimeOut(order_id, database='zbcf_injury_test'):\n conn = pymysql.connect(host=\n 'rm-wz97oujls3998784i.mysql.rds.aliyuncs.com', user='testuser',\n password='testuser@2018', database=database, charset='utf8',\n cursorclass=pymysql.cursors.DictCursor)\n cursor = conn.cursor()\n now = datetime.datetime.now()\n now = now.strftime('%Y-%m-%d %H:%M:%S')\n sql = (\n \"UPDATE t_auth_info t SET end_effect_time = '%s' WHERE t.operation_id = (SELECT id from t_operation where order_id = '%s')\"\n % (now, order_id))\n effectRows = cursor.execute(sql)\n conn.commit()\n print('make order time out!')\n cursor.close()\n conn.close()\n return effectRows\n\n\ndef sleep(num):\n time.sleep(num)\n\n\ndef queryLoginNum(orderId):\n sql = (\n \"SELECT token from t_auth_info t where t.operation_id = (SELECT id from t_operation where order_id = '%s') and t.del_flag = '0'\"\n )\n query_result = query_mysql(sql, orderId)\n print(orderId)\n return query_result['token']\n\n\ndef opetationId_queryLoginNum(operation_Id):\n sql = (\n \"SELECT token from t_auth_info where operation_id = '%s' and del_flag = '0'\"\n )\n query_result = query_mysql(sql, operation_Id)\n return query_result['token']\n\n\ndef queryOrderId():\n pass\n\n\ndef queryOperationId():\n pass\n\n\ndef queryOrderStatus():\n pass\n\n\ndef getH5Token(accessCode):\n url = 'http://testrsapp.cias.cn/injury/user/h5/login'\n headers = {'Content-Type':\n 'application/x-www-form-urlencoded; charset=UTF-8'}\n data = {'accessCode': accessCode}\n req = requests.post(url=url, headers=headers, data=data).json()\n return req['data']['token']\n\n\ndef getWebToken(accessCode):\n url = 'http://testrsapp.cias.cn/injury/user/pc/login'\n headers = {'Content-Type':\n 'application/x-www-form-urlencoded; charset=UTF-8'}\n data = {'loginName': 'haadmin003', 'loginPass': 'Y2lhczEyMzQ1Ng==',\n 'verifyCode': 'tubd'}\n req = requests.post(url=url, headers=headers, data=data).json()\n return req['data']['token']\n",
"step-5": "import datetime\nimport time\n\nimport requests\n\nfrom config import url\nfrom data import DistrictList\nimport random\nimport pymysql\n\ndef base_url():\n default_request = {\n 'base_url': url,\n 'headers': {\n \"Content-Type\": \"application/json;charset=UTF-8\"}\n }\n return default_request['base_url']\n\n\n# 生成一个指定长度的随机数\ndef random_Num(length, string=[]):\n for i in range(length):\n y = str(random.randint(0, 9))\n string.append(y)\n string = ''.join(string)\n return string\n\n# a = random_Num(9, ['1','3'])\n# b = random_Num(6, ['粤','B'])\n# c = random_Num(9)\n# print(a,b,c)\n\n# 生成一个身份证号码,以及对应的生日\ndef generator():\n # 生成身份证号码\n districtcode = DistrictList[random.randint(0, len(DistrictList) - 1)]['code']\n # date = datetime.date.today() - datetime.timedelta(weeks=random.randint(1, 3840))\n date = datetime.datetime.now() - datetime.timedelta(weeks=random.randint(1, 2350))\n birthDay = date.strftime('%Y%m%d')\n randomNum = str(random.randint(100, 300))\n idnum = districtcode + birthDay + randomNum\n i = 0\n count = 0\n weight = [7, 9, 10, 5, 8, 4, 2, 1, 6, 3, 7, 9, 10, 5, 8, 4, 2]\n checkcode = {'0': '1', '1': '0', '2': 'X', '3': '9', '4': '8', '5': '7', '6': '6', '7': '5', '8': '5', '9': '3', '10': '2'}\n for i in range(0, len(idnum)):\n count = count + int(idnum[i]) * weight[i]\n id = idnum + checkcode[str(count%11)]\n # 生成生日时间戳\n # timstamp = date.strftime('%Y%m%d%H%M%S')\n # timstamp = datetime.datetime.strptime(date, '%Y%m%d%H%M%S').timestamp()\n timstamp = int(time.mktime(date.timetuple()) * 1000)\n return id, timstamp\n\na = generator()\n\ndef returnId():\n return a[0]\n\ndef returnTimestamp():\n return a[1]\n\n\n\n# 连接数据库公用方法\ndef query_mysql(sql, *params, database=\"zbcf_injury_test\"):\n conn = pymysql.connect(host=\"rm-wz97oujls3998784i.mysql.rds.aliyuncs.com\", user=\"testuser\",\n password=\"testuser@2018\", database=database, charset='utf8',\n cursorclass=pymysql.cursors.DictCursor)\n cursor = conn.cursor()\n cursor.execute(sql, params)\n data = cursor.fetchone()\n cursor.close()\n conn.close()\n return data\n\n\n# 模拟订单超过48/12小时/7天\n# 只需将数据库过期时间设置为当前时间\n# def orderTimeOut(operation_id):\n# now = datetime.datetime.now()\n# now = now.strftime('%Y-%m-%d %H:%M:%S')\n# # delta = datetime.timedelta(days=outTime)\n# # now = now + delta\n# print(now)\n# # sql = \"UPDATE t_auth_info t SET end_effect_time = str_to_date(\\'%s\\','%%Y-%%m-%%d %%H:%%i:%%s') WHERE t.operation_id = '%s'\"\n# sql = \"UPDATE t_auth_info t SET end_effect_time = '%s' WHERE t.operation_id = '%s'\"\n# params = [now, operation_id]\n# update_result = query_mysql(sql, *params)\n# return update_result\n# # return now.strftime('%Y-%m-%d %H:%M:%S')\n# # 有什么问题??\n# a = orderTimeOut(289)\n# print(a)\n\n# 模拟订单超过48/12小时/7天\n# 只需将数据库过期时间设置为当前时间\ndef orderTimeOut(order_id, database=\"zbcf_injury_test\"):\n conn = pymysql.connect(host=\"rm-wz97oujls3998784i.mysql.rds.aliyuncs.com\", user=\"testuser\",\n password=\"testuser@2018\", database=database, charset='utf8',\n cursorclass=pymysql.cursors.DictCursor)\n cursor = conn.cursor()\n now = datetime.datetime.now()\n now = now.strftime('%Y-%m-%d %H:%M:%S')\n sql = \"UPDATE t_auth_info t SET end_effect_time = '%s' WHERE t.operation_id = (SELECT id from t_operation where order_id = '%s')\" % (now, order_id)\n effectRows = cursor.execute(sql)\n conn.commit()\n print('make order time out!')\n cursor.close()\n conn.close()\n return effectRows\n# a = orderTimeOut(260)\n# print(a)\ndef sleep(num):\n time.sleep(num)\n\n# 依据order_Id查询登录码\ndef queryLoginNum(orderId):\n sql = \"SELECT token from t_auth_info t where t.operation_id = (SELECT id from t_operation where order_id = '%s') and t.del_flag = '0'\"\n query_result = query_mysql(sql, orderId)\n print(orderId)\n return query_result['token']\n\n# a = queryLoginNum(418)\n# print (a)\n\n\n# 依据operation_Id查询登录码\ndef opetationId_queryLoginNum(operation_Id):\n sql = \"SELECT token from t_auth_info where operation_id = '%s' and del_flag = '0'\"\n query_result = query_mysql(sql, operation_Id)\n return query_result['token']\n\n# a = queryLoginNum(290)\n# print (a)\n\n\n# 返回orderId的方法\ndef queryOrderId():\n pass\n\n\n# 返回operationId的方法\ndef queryOperationId():\n pass\n\n# 查询订单状态\ndef queryOrderStatus():\n pass\n\n\n# # 登录PC端获取token,拼接到headers中\n# def setup_hook_token(request):\n# #print(request)\n# url_path=\"http://testrenshang.cias.cn/injury/user/pc/login\"\n# header={\"Content-Type\": \"application/x-www-form-urlencoded; charset=UTF-8\"}\n# payload={\"loginName\": \"haadmin003\", \"loginPass\": \"Y2lhczEyMzQ1Ng==\", \"verifyCode\": \"tubd\"}\n# req=requests.post(url=url_path, headers=header, params=payload).json()\n# token=req['data']['token']\n# request[\"headers\"]['token']=token\n# # print(token,'\\n', req)\n# # print(request)\n# # request = {'headers':{'Content-Type': 'application/json;charset=UTF-8', 'method': 'GET', 'url': '$uri', 'token': '$token'}}\n# # setup_hook_token(request)\n\n# 登录H5端获取token\ndef getH5Token(accessCode):\n url = \"http://testrsapp.cias.cn/injury/user/h5/login\"\n headers = {\"Content-Type\": \"application/x-www-form-urlencoded; charset=UTF-8\"}\n data = {\"accessCode\": accessCode}\n req = requests.post(url=url, headers=headers, data=data).json()\n return req['data']['token']\n\n# a = getH5Token('31583310')\n# print(a)\n\n# 登录Web端获取token\ndef getWebToken(accessCode):\n url = \"http://testrsapp.cias.cn/injury/user/pc/login\"\n headers = {\"Content-Type\": \"application/x-www-form-urlencoded; charset=UTF-8\"}\n data = {\"loginName\": \"haadmin003\", \"loginPass\": \"Y2lhczEyMzQ1Ng==\", \"verifyCode\": \"tubd\"}\n req = requests.post(url=url, headers=headers, data=data).json()\n return req['data']['token']\n\n# a = getWebToken('31583310')\n# print(a)\n\n\n#",
"step-ids": [
10,
13,
15,
17,
18
]
}
|
[
10,
13,
15,
17,
18
] |
from nodes.Value import Value
class Number(Value):
def __init__(self, number: int):
if abs(number) > 2 ** 31:
raise SyntaxError(str(number) + ' number is out of range')
self.number = number
def __str__(self):
return str(self.number)
|
normal
|
{
"blob_id": "7da274803de80f2864471d00c9d15aff1103372f",
"index": 3648,
"step-1": "<mask token>\n\n\nclass Number(Value):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Number(Value):\n <mask token>\n\n def __str__(self):\n return str(self.number)\n",
"step-3": "<mask token>\n\n\nclass Number(Value):\n\n def __init__(self, number: int):\n if abs(number) > 2 ** 31:\n raise SyntaxError(str(number) + ' number is out of range')\n self.number = number\n\n def __str__(self):\n return str(self.number)\n",
"step-4": "from nodes.Value import Value\n\n\nclass Number(Value):\n\n def __init__(self, number: int):\n if abs(number) > 2 ** 31:\n raise SyntaxError(str(number) + ' number is out of range')\n self.number = number\n\n def __str__(self):\n return str(self.number)\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
# myapp/serializers.py
from rest_framework import serializers
from rest_framework.authtoken.models import Token
from .models import *
# Serializers define the API representation.
class GeneralSerializer(serializers.ModelSerializer):
class Meta:
model = None
fields = '__all__'
class V2OfUsersSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = V2OfUsers
fields = ('firstname', 'lastname', 'username', 'email', 'password')
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
user = User(
email=validated_data['email'],
username=validated_data['username']
)
user.set_password(validated_data['password'])
user.save()
Token.objects.create(user=user)
return user
class MeasurementsSerializer(serializers.ModelSerializer):
class Meta:
model = Measurements
fields = '__all__'
def __init__(self, *args, **kwargs):
super(MeasurementsSerializer, self).__init__(*args, **kwargs)
request = self.context.get("request")
if request and request.query_params.get('fields'):
fields = request.query_params.get('fields')
if fields:
fields = fields.split(',')
allowed = set(fields)
existing = set(self.fields.keys())
for field_name in existing - allowed:
self.fields.pop(field_name)
# Serializer for Counting Providers
# and Network Type e.g 2G, 3G, 4G
class CountSerializer(serializers.Serializer):
key = serializers.CharField(max_length=20)
value = serializers.IntegerField()
# Serializer for Mobile Operating System
class OperatingSystemSerializer(serializers.ModelSerializer):
value = serializers.CharField(max_length=30)
key = serializers.CharField(source='versionname', max_length=30)
class Meta:
model = Measurements
fields = ('key', 'value')
# Serializer for Vendors
class VendorsSerializer(serializers.ModelSerializer):
value = serializers.CharField(max_length=30)
key = serializers.CharField(source='devicemanufacturer', max_length=30)
class Meta:
model = Measurements
fields = ('key', 'value')
# General Serializer for DownLink and UpLink for all
# Providers and Network Types with date range parameters
class GlobalSerializer(serializers.Serializer):
key = serializers.CharField(max_length=20)
avg = serializers.IntegerField()
min = serializers.IntegerField()
max = serializers.IntegerField()
|
normal
|
{
"blob_id": "44cbe1face91d3ac7edcd93d0b470bce90c8b674",
"index": 2916,
"step-1": "<mask token>\n\n\nclass MeasurementsSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Measurements\n fields = '__all__'\n <mask token>\n\n\nclass CountSerializer(serializers.Serializer):\n key = serializers.CharField(max_length=20)\n value = serializers.IntegerField()\n\n\nclass OperatingSystemSerializer(serializers.ModelSerializer):\n value = serializers.CharField(max_length=30)\n key = serializers.CharField(source='versionname', max_length=30)\n\n\n class Meta:\n model = Measurements\n fields = 'key', 'value'\n\n\nclass VendorsSerializer(serializers.ModelSerializer):\n value = serializers.CharField(max_length=30)\n key = serializers.CharField(source='devicemanufacturer', max_length=30)\n\n\n class Meta:\n model = Measurements\n fields = 'key', 'value'\n\n\nclass GlobalSerializer(serializers.Serializer):\n key = serializers.CharField(max_length=20)\n avg = serializers.IntegerField()\n min = serializers.IntegerField()\n max = serializers.IntegerField()\n",
"step-2": "<mask token>\n\n\nclass MeasurementsSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Measurements\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(MeasurementsSerializer, self).__init__(*args, **kwargs)\n request = self.context.get('request')\n if request and request.query_params.get('fields'):\n fields = request.query_params.get('fields')\n if fields:\n fields = fields.split(',')\n allowed = set(fields)\n existing = set(self.fields.keys())\n for field_name in (existing - allowed):\n self.fields.pop(field_name)\n\n\nclass CountSerializer(serializers.Serializer):\n key = serializers.CharField(max_length=20)\n value = serializers.IntegerField()\n\n\nclass OperatingSystemSerializer(serializers.ModelSerializer):\n value = serializers.CharField(max_length=30)\n key = serializers.CharField(source='versionname', max_length=30)\n\n\n class Meta:\n model = Measurements\n fields = 'key', 'value'\n\n\nclass VendorsSerializer(serializers.ModelSerializer):\n value = serializers.CharField(max_length=30)\n key = serializers.CharField(source='devicemanufacturer', max_length=30)\n\n\n class Meta:\n model = Measurements\n fields = 'key', 'value'\n\n\nclass GlobalSerializer(serializers.Serializer):\n key = serializers.CharField(max_length=20)\n avg = serializers.IntegerField()\n min = serializers.IntegerField()\n max = serializers.IntegerField()\n",
"step-3": "<mask token>\n\n\nclass V2OfUsersSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = V2OfUsers\n fields = 'firstname', 'lastname', 'username', 'email', 'password'\n extra_kwargs = {'password': {'write_only': True}}\n <mask token>\n\n\nclass MeasurementsSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Measurements\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(MeasurementsSerializer, self).__init__(*args, **kwargs)\n request = self.context.get('request')\n if request and request.query_params.get('fields'):\n fields = request.query_params.get('fields')\n if fields:\n fields = fields.split(',')\n allowed = set(fields)\n existing = set(self.fields.keys())\n for field_name in (existing - allowed):\n self.fields.pop(field_name)\n\n\nclass CountSerializer(serializers.Serializer):\n key = serializers.CharField(max_length=20)\n value = serializers.IntegerField()\n\n\nclass OperatingSystemSerializer(serializers.ModelSerializer):\n value = serializers.CharField(max_length=30)\n key = serializers.CharField(source='versionname', max_length=30)\n\n\n class Meta:\n model = Measurements\n fields = 'key', 'value'\n\n\nclass VendorsSerializer(serializers.ModelSerializer):\n value = serializers.CharField(max_length=30)\n key = serializers.CharField(source='devicemanufacturer', max_length=30)\n\n\n class Meta:\n model = Measurements\n fields = 'key', 'value'\n\n\nclass GlobalSerializer(serializers.Serializer):\n key = serializers.CharField(max_length=20)\n avg = serializers.IntegerField()\n min = serializers.IntegerField()\n max = serializers.IntegerField()\n",
"step-4": "<mask token>\n\n\nclass V2OfUsersSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = V2OfUsers\n fields = 'firstname', 'lastname', 'username', 'email', 'password'\n extra_kwargs = {'password': {'write_only': True}}\n\n def create(self, validated_data):\n user = User(email=validated_data['email'], username=validated_data[\n 'username'])\n user.set_password(validated_data['password'])\n user.save()\n Token.objects.create(user=user)\n return user\n\n\nclass MeasurementsSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Measurements\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(MeasurementsSerializer, self).__init__(*args, **kwargs)\n request = self.context.get('request')\n if request and request.query_params.get('fields'):\n fields = request.query_params.get('fields')\n if fields:\n fields = fields.split(',')\n allowed = set(fields)\n existing = set(self.fields.keys())\n for field_name in (existing - allowed):\n self.fields.pop(field_name)\n\n\nclass CountSerializer(serializers.Serializer):\n key = serializers.CharField(max_length=20)\n value = serializers.IntegerField()\n\n\nclass OperatingSystemSerializer(serializers.ModelSerializer):\n value = serializers.CharField(max_length=30)\n key = serializers.CharField(source='versionname', max_length=30)\n\n\n class Meta:\n model = Measurements\n fields = 'key', 'value'\n\n\nclass VendorsSerializer(serializers.ModelSerializer):\n value = serializers.CharField(max_length=30)\n key = serializers.CharField(source='devicemanufacturer', max_length=30)\n\n\n class Meta:\n model = Measurements\n fields = 'key', 'value'\n\n\nclass GlobalSerializer(serializers.Serializer):\n key = serializers.CharField(max_length=20)\n avg = serializers.IntegerField()\n min = serializers.IntegerField()\n max = serializers.IntegerField()\n",
"step-5": "# myapp/serializers.py\nfrom rest_framework import serializers\nfrom rest_framework.authtoken.models import Token\nfrom .models import *\n\n\n# Serializers define the API representation.\nclass GeneralSerializer(serializers.ModelSerializer):\n class Meta:\n model = None\n fields = '__all__'\n\n\nclass V2OfUsersSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = V2OfUsers\n fields = ('firstname', 'lastname', 'username', 'email', 'password')\n extra_kwargs = {'password': {'write_only': True}}\n\n def create(self, validated_data):\n user = User(\n email=validated_data['email'],\n username=validated_data['username']\n )\n user.set_password(validated_data['password'])\n user.save()\n Token.objects.create(user=user)\n return user\n\n\nclass MeasurementsSerializer(serializers.ModelSerializer):\n class Meta:\n model = Measurements\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(MeasurementsSerializer, self).__init__(*args, **kwargs)\n request = self.context.get(\"request\")\n if request and request.query_params.get('fields'):\n fields = request.query_params.get('fields')\n if fields:\n fields = fields.split(',')\n allowed = set(fields)\n existing = set(self.fields.keys())\n for field_name in existing - allowed:\n self.fields.pop(field_name)\n\n\n# Serializer for Counting Providers\n# and Network Type e.g 2G, 3G, 4G\n\n\nclass CountSerializer(serializers.Serializer):\n key = serializers.CharField(max_length=20)\n value = serializers.IntegerField()\n\n# Serializer for Mobile Operating System\n\n\nclass OperatingSystemSerializer(serializers.ModelSerializer):\n value = serializers.CharField(max_length=30)\n key = serializers.CharField(source='versionname', max_length=30)\n\n class Meta:\n model = Measurements\n fields = ('key', 'value')\n\n\n# Serializer for Vendors\n\n\nclass VendorsSerializer(serializers.ModelSerializer):\n value = serializers.CharField(max_length=30)\n key = serializers.CharField(source='devicemanufacturer', max_length=30)\n\n class Meta:\n model = Measurements\n fields = ('key', 'value')\n\n\n# General Serializer for DownLink and UpLink for all\n# Providers and Network Types with date range parameters\n\nclass GlobalSerializer(serializers.Serializer):\n key = serializers.CharField(max_length=20)\n avg = serializers.IntegerField()\n min = serializers.IntegerField()\n max = serializers.IntegerField()\n",
"step-ids": [
9,
10,
11,
12,
15
]
}
|
[
9,
10,
11,
12,
15
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def is_file_ontology(file_path):
"""
Method that, given a file, returns its URI.
This method is in a separate file in case we want to extract additional metadata if required
Parameters
----------
@param file_path: path of the candidate ontology
Returns
-------
@return: The URI of the target ontology (if there is one)
"""
try:
g = Graph()
g.parse(file_path)
q1 = prepareQuery(
"""
SELECT ?onto
WHERE {
?onto a <http://www.w3.org/2002/07/owl#Ontology>.
}
"""
)
for r in g.query(q1):
return r.onto
except Exception:
pass
<|reserved_special_token_1|>
from rdflib import Graph
from rdflib.plugins.sparql import prepareQuery
def is_file_ontology(file_path):
"""
Method that, given a file, returns its URI.
This method is in a separate file in case we want to extract additional metadata if required
Parameters
----------
@param file_path: path of the candidate ontology
Returns
-------
@return: The URI of the target ontology (if there is one)
"""
try:
g = Graph()
g.parse(file_path)
q1 = prepareQuery(
"""
SELECT ?onto
WHERE {
?onto a <http://www.w3.org/2002/07/owl#Ontology>.
}
"""
)
for r in g.query(q1):
return r.onto
except Exception:
pass
<|reserved_special_token_1|>
from rdflib import Graph
from rdflib.plugins.sparql import prepareQuery
def is_file_ontology(file_path):
"""
Method that, given a file, returns its URI.
This method is in a separate file in case we want to extract additional metadata if required
Parameters
----------
@param file_path: path of the candidate ontology
Returns
-------
@return: The URI of the target ontology (if there is one)
"""
# load in rdf lib
try:
g = Graph()
g.parse(file_path)
q1 = prepareQuery('''
SELECT ?onto
WHERE {
?onto a <http://www.w3.org/2002/07/owl#Ontology>.
}
''')
# TO DO: extract title, preferred ns.
# there should be only one ontology per file
for r in g.query(q1):
# print("Found that %s is an ontology" % file_path)
return r.onto
except Exception:
# If the candidate file could not be read, pass
pass
|
flexible
|
{
"blob_id": "c327f8f7aece1a9c25079613809df52e9a8e7a52",
"index": 8763,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef is_file_ontology(file_path):\n \"\"\"\n Method that, given a file, returns its URI.\n This method is in a separate file in case we want to extract additional metadata if required\n Parameters\n ----------\n @param file_path: path of the candidate ontology\n\n Returns\n -------\n @return: The URI of the target ontology (if there is one)\n \"\"\"\n try:\n g = Graph()\n g.parse(file_path)\n q1 = prepareQuery(\n \"\"\"\n SELECT ?onto\n WHERE { \n ?onto a <http://www.w3.org/2002/07/owl#Ontology>. \n }\n \"\"\"\n )\n for r in g.query(q1):\n return r.onto\n except Exception:\n pass\n",
"step-3": "from rdflib import Graph\nfrom rdflib.plugins.sparql import prepareQuery\n\n\ndef is_file_ontology(file_path):\n \"\"\"\n Method that, given a file, returns its URI.\n This method is in a separate file in case we want to extract additional metadata if required\n Parameters\n ----------\n @param file_path: path of the candidate ontology\n\n Returns\n -------\n @return: The URI of the target ontology (if there is one)\n \"\"\"\n try:\n g = Graph()\n g.parse(file_path)\n q1 = prepareQuery(\n \"\"\"\n SELECT ?onto\n WHERE { \n ?onto a <http://www.w3.org/2002/07/owl#Ontology>. \n }\n \"\"\"\n )\n for r in g.query(q1):\n return r.onto\n except Exception:\n pass\n",
"step-4": "from rdflib import Graph\nfrom rdflib.plugins.sparql import prepareQuery\n\n\ndef is_file_ontology(file_path):\n \"\"\"\n Method that, given a file, returns its URI.\n This method is in a separate file in case we want to extract additional metadata if required\n Parameters\n ----------\n @param file_path: path of the candidate ontology\n\n Returns\n -------\n @return: The URI of the target ontology (if there is one)\n \"\"\"\n # load in rdf lib\n try:\n g = Graph()\n g.parse(file_path)\n q1 = prepareQuery('''\n SELECT ?onto\n WHERE { \n ?onto a <http://www.w3.org/2002/07/owl#Ontology>. \n }\n ''')\n # TO DO: extract title, preferred ns.\n # there should be only one ontology per file\n for r in g.query(q1):\n # print(\"Found that %s is an ontology\" % file_path)\n return r.onto\n except Exception:\n # If the candidate file could not be read, pass\n pass\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(3):
numList[i] = int(sys.stdin.readline())
<|reserved_special_token_0|>
for i in intList:
print(resultList.count(str(i)))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
numList = list(range(3))
for i in range(3):
numList[i] = int(sys.stdin.readline())
result = numList[0] * numList[1] * numList[2]
resultList = list(str(result))
intList = list(range(10))
for i in intList:
print(resultList.count(str(i)))
<|reserved_special_token_1|>
import sys
numList = list(range(3))
for i in range(3):
numList[i] = int(sys.stdin.readline())
result = numList[0] * numList[1] * numList[2]
resultList = list(str(result))
intList = list(range(10))
for i in intList:
print(resultList.count(str(i)))
|
flexible
|
{
"blob_id": "c3de6cd76ca7180a1a4d236bb2a6a18f7594f38b",
"index": 1304,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(3):\n numList[i] = int(sys.stdin.readline())\n<mask token>\nfor i in intList:\n print(resultList.count(str(i)))\n",
"step-3": "<mask token>\nnumList = list(range(3))\nfor i in range(3):\n numList[i] = int(sys.stdin.readline())\nresult = numList[0] * numList[1] * numList[2]\nresultList = list(str(result))\nintList = list(range(10))\nfor i in intList:\n print(resultList.count(str(i)))\n",
"step-4": "import sys\nnumList = list(range(3))\nfor i in range(3):\n numList[i] = int(sys.stdin.readline())\nresult = numList[0] * numList[1] * numList[2]\nresultList = list(str(result))\nintList = list(range(10))\nfor i in intList:\n print(resultList.count(str(i)))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
"""所有的PyQt5应用必须创建一个应用(Application)对象。sys.argv参数是一个来自命令行的参数列表。Python脚本可以在shell中运行"""
app = QApplication(sys.argv)
"""Qwidget组件是PyQt5中所有用户界面类的基础类。我们给QWidget提供了默认的构造方法。默认构造方法没有父类。没有父类的widget组件将被作为窗口使用"""
w = QWidget()
"""resize()方法调整了widget组件的大小。它现在是250px宽,150px高。"""
w.resize(500, 150)
"""move()方法移动widget组件到一个位置,这个位置是屏幕上x=300,y=300的坐标。"""
w.move(300, 300)
"""setWindowTitle()设置了我们窗口的标题。这个标题显示在标题栏中。"""
w.setWindowTitle('Simple')
"""show()方法在屏幕上显示出widget。一个widget对象在内存中创建"""
w.show()
"""sys.exit()方法确保一个不留垃圾的退出"""
sys.exit(app.exec_())
<|reserved_special_token_1|>
import sys
from PyQt5.QtWidgets import QApplication, QWidget
if __name__ == '__main__':
"""所有的PyQt5应用必须创建一个应用(Application)对象。sys.argv参数是一个来自命令行的参数列表。Python脚本可以在shell中运行"""
app = QApplication(sys.argv)
"""Qwidget组件是PyQt5中所有用户界面类的基础类。我们给QWidget提供了默认的构造方法。默认构造方法没有父类。没有父类的widget组件将被作为窗口使用"""
w = QWidget()
"""resize()方法调整了widget组件的大小。它现在是250px宽,150px高。"""
w.resize(500, 150)
"""move()方法移动widget组件到一个位置,这个位置是屏幕上x=300,y=300的坐标。"""
w.move(300, 300)
"""setWindowTitle()设置了我们窗口的标题。这个标题显示在标题栏中。"""
w.setWindowTitle('Simple')
"""show()方法在屏幕上显示出widget。一个widget对象在内存中创建"""
w.show()
"""sys.exit()方法确保一个不留垃圾的退出"""
sys.exit(app.exec_())
<|reserved_special_token_1|>
# -*- encoding: utf-8 -*-
# @Version : 1.0
# @Time : 2018/8/29 9:59
# @Author : wanghuodong
# @note : 生成一个简单窗口
import sys
from PyQt5.QtWidgets import QApplication, QWidget
if __name__ == '__main__':
'''所有的PyQt5应用必须创建一个应用(Application)对象。sys.argv参数是一个来自命令行的参数列表。Python脚本可以在shell中运行'''
app = QApplication(sys.argv)
'''Qwidget组件是PyQt5中所有用户界面类的基础类。我们给QWidget提供了默认的构造方法。默认构造方法没有父类。没有父类的widget组件将被作为窗口使用'''
w = QWidget()
'''resize()方法调整了widget组件的大小。它现在是250px宽,150px高。'''
w.resize(500, 150)
'''move()方法移动widget组件到一个位置,这个位置是屏幕上x=300,y=300的坐标。'''
w.move(300, 300)
'''setWindowTitle()设置了我们窗口的标题。这个标题显示在标题栏中。'''
w.setWindowTitle('Simple')
'''show()方法在屏幕上显示出widget。一个widget对象在内存中创建'''
w.show()
'''sys.exit()方法确保一个不留垃圾的退出'''
sys.exit(app.exec_())
|
flexible
|
{
"blob_id": "6ff300bbd7866466d1992445e46c5ee54f73d0d7",
"index": 9167,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n \"\"\"所有的PyQt5应用必须创建一个应用(Application)对象。sys.argv参数是一个来自命令行的参数列表。Python脚本可以在shell中运行\"\"\"\n app = QApplication(sys.argv)\n \"\"\"Qwidget组件是PyQt5中所有用户界面类的基础类。我们给QWidget提供了默认的构造方法。默认构造方法没有父类。没有父类的widget组件将被作为窗口使用\"\"\"\n w = QWidget()\n \"\"\"resize()方法调整了widget组件的大小。它现在是250px宽,150px高。\"\"\"\n w.resize(500, 150)\n \"\"\"move()方法移动widget组件到一个位置,这个位置是屏幕上x=300,y=300的坐标。\"\"\"\n w.move(300, 300)\n \"\"\"setWindowTitle()设置了我们窗口的标题。这个标题显示在标题栏中。\"\"\"\n w.setWindowTitle('Simple')\n \"\"\"show()方法在屏幕上显示出widget。一个widget对象在内存中创建\"\"\"\n w.show()\n \"\"\"sys.exit()方法确保一个不留垃圾的退出\"\"\"\n sys.exit(app.exec_())\n",
"step-3": "import sys\nfrom PyQt5.QtWidgets import QApplication, QWidget\nif __name__ == '__main__':\n \"\"\"所有的PyQt5应用必须创建一个应用(Application)对象。sys.argv参数是一个来自命令行的参数列表。Python脚本可以在shell中运行\"\"\"\n app = QApplication(sys.argv)\n \"\"\"Qwidget组件是PyQt5中所有用户界面类的基础类。我们给QWidget提供了默认的构造方法。默认构造方法没有父类。没有父类的widget组件将被作为窗口使用\"\"\"\n w = QWidget()\n \"\"\"resize()方法调整了widget组件的大小。它现在是250px宽,150px高。\"\"\"\n w.resize(500, 150)\n \"\"\"move()方法移动widget组件到一个位置,这个位置是屏幕上x=300,y=300的坐标。\"\"\"\n w.move(300, 300)\n \"\"\"setWindowTitle()设置了我们窗口的标题。这个标题显示在标题栏中。\"\"\"\n w.setWindowTitle('Simple')\n \"\"\"show()方法在屏幕上显示出widget。一个widget对象在内存中创建\"\"\"\n w.show()\n \"\"\"sys.exit()方法确保一个不留垃圾的退出\"\"\"\n sys.exit(app.exec_())\n",
"step-4": "# -*- encoding: utf-8 -*-\n# @Version : 1.0 \n# @Time : 2018/8/29 9:59\n# @Author : wanghuodong \n# @note : 生成一个简单窗口\n\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QWidget\n\n\nif __name__ == '__main__':\n\n '''所有的PyQt5应用必须创建一个应用(Application)对象。sys.argv参数是一个来自命令行的参数列表。Python脚本可以在shell中运行'''\n app = QApplication(sys.argv)\n\n '''Qwidget组件是PyQt5中所有用户界面类的基础类。我们给QWidget提供了默认的构造方法。默认构造方法没有父类。没有父类的widget组件将被作为窗口使用'''\n w = QWidget()\n '''resize()方法调整了widget组件的大小。它现在是250px宽,150px高。'''\n w.resize(500, 150)\n '''move()方法移动widget组件到一个位置,这个位置是屏幕上x=300,y=300的坐标。'''\n w.move(300, 300)\n '''setWindowTitle()设置了我们窗口的标题。这个标题显示在标题栏中。'''\n w.setWindowTitle('Simple')\n '''show()方法在屏幕上显示出widget。一个widget对象在内存中创建'''\n w.show()\n\n '''sys.exit()方法确保一个不留垃圾的退出'''\n sys.exit(app.exec_())",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class BeanAttributes(Base):
""" Defines the data model for the table `bean_attributes`. """
__tablename__ = 'bean_attributes'
id = Column(Integer, primary_key=True)
species = Column(String(100), unique=False, nullable=True)
owner = Column(String(100), unique=False, nullable=True)
country = Column(String(100), unique=False, nullable=True)
farm_name = Column(String(100), unique=False, nullable=True)
company = Column(String(100), unique=False, nullable=True)
region = Column(String(100), unique=False, nullable=True)
producer = Column(String(100), unique=False, nullable=True)
grading_date = Column(String(100), unique=False, nullable=True)
processing_method = Column(Text, unique=False, nullable=True)
aroma = Column(Float, unique=False, nullable=True)
flavor = Column(Float, unique=False, nullable=True)
aftertaste = Column(Float, unique=False, nullable=True)
acidity = Column(Float, unique=False, nullable=True)
body = Column(Float, unique=False, nullable=True)
balance = Column(Float, unique=False, nullable=True)
uniformity = Column(Float, unique=False, nullable=True)
cleancup = Column(Float, unique=False, nullable=True)
sweetness = Column(Float, unique=False, nullable=True)
total_cup_point = Column(Float, unique=False, nullable=True)
moisture = Column(Float, unique=False, nullable=True)
color = Column(String(100), unique=False, nullable=True)
cluster = Column(Integer, unique=False, nullable=True)
def __repr__(self):
return '<BeanAttributes %r>' % self.id
def persist_to_db(engine_string):
"""Persist the data to database.
Args:
engine_string (`str`): Engine string for SQLAlchemy.
Returns:
None.
"""
engine = sql.create_engine(engine_string)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
if config.LOCAL_DB_FLAG:
try:
session.execute('DELETE FROM msia_db.bean_attributes')
except:
pass
else:
try:
session.execute('DELETE FROM bean_attributes')
except:
pass
raw_data = pd.read_csv(config.DATA_TABLE_PATH)
raw_data = raw_data.replace(np.nan, '', regex=True)
try:
for i in range(raw_data.shape[0]):
bean_row = BeanAttributes(id=int(raw_data.iloc[i]['Unnamed: 0']
), species=str(raw_data.iloc[i]['Species']), owner=str(
raw_data.iloc[i]['Owner.1']), country=str(raw_data.iloc[i][
'Country.of.Origin']), farm_name=str(raw_data.iloc[i][
'Farm.Name']), company=str(raw_data.iloc[i]['Company']),
region=str(raw_data.iloc[i]['Region']), producer=str(
raw_data.iloc[i]['Producer']), grading_date=str(raw_data.
iloc[i]['Grading.Date']), processing_method=str(raw_data.
iloc[i]['Processing.Method']), aroma=float(raw_data.iloc[i]
['Aroma']), flavor=float(raw_data.iloc[i]['Flavor']),
aftertaste=float(raw_data.iloc[i]['Aftertaste']), acidity=
float(raw_data.iloc[i]['Acidity']), body=float(raw_data.
iloc[i]['Body']), balance=float(raw_data.iloc[i]['Balance']
), uniformity=float(raw_data.iloc[i]['Uniformity']),
cleancup=float(raw_data.iloc[i]['Clean.Cup']), sweetness=
float(raw_data.iloc[i]['Sweetness']), total_cup_point=float
(raw_data.iloc[i]['Total.Cup.Points']), moisture=float(
raw_data.iloc[i]['Moisture']), color=str(raw_data.iloc[i][
'Color']), cluster=int(raw_data.iloc[i]['cluster']))
session.add(bean_row)
logger.debug('Row %d added to table ' % i)
session.commit()
except sql.exc.IntegrityError:
logger.error('Duplicated coffee bean')
except Exception as e:
logger.error('Incorrect credentials, access denied', e)
finally:
session.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append('./config')
<|reserved_special_token_0|>
logging.basicConfig(level=logging.INFO, format=
'%(name)s - %(levelname)s - %(asctime)s - %(message)s')
<|reserved_special_token_0|>
class BeanAttributes(Base):
""" Defines the data model for the table `bean_attributes`. """
__tablename__ = 'bean_attributes'
id = Column(Integer, primary_key=True)
species = Column(String(100), unique=False, nullable=True)
owner = Column(String(100), unique=False, nullable=True)
country = Column(String(100), unique=False, nullable=True)
farm_name = Column(String(100), unique=False, nullable=True)
company = Column(String(100), unique=False, nullable=True)
region = Column(String(100), unique=False, nullable=True)
producer = Column(String(100), unique=False, nullable=True)
grading_date = Column(String(100), unique=False, nullable=True)
processing_method = Column(Text, unique=False, nullable=True)
aroma = Column(Float, unique=False, nullable=True)
flavor = Column(Float, unique=False, nullable=True)
aftertaste = Column(Float, unique=False, nullable=True)
acidity = Column(Float, unique=False, nullable=True)
body = Column(Float, unique=False, nullable=True)
balance = Column(Float, unique=False, nullable=True)
uniformity = Column(Float, unique=False, nullable=True)
cleancup = Column(Float, unique=False, nullable=True)
sweetness = Column(Float, unique=False, nullable=True)
total_cup_point = Column(Float, unique=False, nullable=True)
moisture = Column(Float, unique=False, nullable=True)
color = Column(String(100), unique=False, nullable=True)
cluster = Column(Integer, unique=False, nullable=True)
def __repr__(self):
return '<BeanAttributes %r>' % self.id
def persist_to_db(engine_string):
"""Persist the data to database.
Args:
engine_string (`str`): Engine string for SQLAlchemy.
Returns:
None.
"""
engine = sql.create_engine(engine_string)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
if config.LOCAL_DB_FLAG:
try:
session.execute('DELETE FROM msia_db.bean_attributes')
except:
pass
else:
try:
session.execute('DELETE FROM bean_attributes')
except:
pass
raw_data = pd.read_csv(config.DATA_TABLE_PATH)
raw_data = raw_data.replace(np.nan, '', regex=True)
try:
for i in range(raw_data.shape[0]):
bean_row = BeanAttributes(id=int(raw_data.iloc[i]['Unnamed: 0']
), species=str(raw_data.iloc[i]['Species']), owner=str(
raw_data.iloc[i]['Owner.1']), country=str(raw_data.iloc[i][
'Country.of.Origin']), farm_name=str(raw_data.iloc[i][
'Farm.Name']), company=str(raw_data.iloc[i]['Company']),
region=str(raw_data.iloc[i]['Region']), producer=str(
raw_data.iloc[i]['Producer']), grading_date=str(raw_data.
iloc[i]['Grading.Date']), processing_method=str(raw_data.
iloc[i]['Processing.Method']), aroma=float(raw_data.iloc[i]
['Aroma']), flavor=float(raw_data.iloc[i]['Flavor']),
aftertaste=float(raw_data.iloc[i]['Aftertaste']), acidity=
float(raw_data.iloc[i]['Acidity']), body=float(raw_data.
iloc[i]['Body']), balance=float(raw_data.iloc[i]['Balance']
), uniformity=float(raw_data.iloc[i]['Uniformity']),
cleancup=float(raw_data.iloc[i]['Clean.Cup']), sweetness=
float(raw_data.iloc[i]['Sweetness']), total_cup_point=float
(raw_data.iloc[i]['Total.Cup.Points']), moisture=float(
raw_data.iloc[i]['Moisture']), color=str(raw_data.iloc[i][
'Color']), cluster=int(raw_data.iloc[i]['cluster']))
session.add(bean_row)
logger.debug('Row %d added to table ' % i)
session.commit()
except sql.exc.IntegrityError:
logger.error('Duplicated coffee bean')
except Exception as e:
logger.error('Incorrect credentials, access denied', e)
finally:
session.close()
if __name__ == '__main__':
conn_type = 'mysql+pymysql'
user = os.environ.get('MYSQL_USER')
password = os.environ.get('MYSQL_PASSWORD')
host = os.environ.get('MYSQL_HOST')
port = os.environ.get('MYSQL_PORT')
database = os.environ.get('DATABASE_NAME')
local_database_path = config.LOCAL_DATABASE_PATH
if config.SQLALCHEMY_DATABASE_URI is None:
if config.LOCAL_DB_FLAG:
engine_string = 'sqlite:///{}'.format(local_database_path)
else:
engine_string = '{}://{}:{}@{}:{}/{}'.format(conn_type, user,
password, host, port, database)
else:
engine_string = config.SQLALCHEMY_DATABASE_URI
try:
engine_string = 'sqlite:///data/bean.db'
persist_to_db(engine_string)
logger.info('Data successfully persisted into the database')
except Exception as e:
logger.error(e)
sys.exit(1)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append('./config')
<|reserved_special_token_0|>
logging.basicConfig(level=logging.INFO, format=
'%(name)s - %(levelname)s - %(asctime)s - %(message)s')
logger = logging.getLogger(__file__)
Base = declarative_base()
class BeanAttributes(Base):
""" Defines the data model for the table `bean_attributes`. """
__tablename__ = 'bean_attributes'
id = Column(Integer, primary_key=True)
species = Column(String(100), unique=False, nullable=True)
owner = Column(String(100), unique=False, nullable=True)
country = Column(String(100), unique=False, nullable=True)
farm_name = Column(String(100), unique=False, nullable=True)
company = Column(String(100), unique=False, nullable=True)
region = Column(String(100), unique=False, nullable=True)
producer = Column(String(100), unique=False, nullable=True)
grading_date = Column(String(100), unique=False, nullable=True)
processing_method = Column(Text, unique=False, nullable=True)
aroma = Column(Float, unique=False, nullable=True)
flavor = Column(Float, unique=False, nullable=True)
aftertaste = Column(Float, unique=False, nullable=True)
acidity = Column(Float, unique=False, nullable=True)
body = Column(Float, unique=False, nullable=True)
balance = Column(Float, unique=False, nullable=True)
uniformity = Column(Float, unique=False, nullable=True)
cleancup = Column(Float, unique=False, nullable=True)
sweetness = Column(Float, unique=False, nullable=True)
total_cup_point = Column(Float, unique=False, nullable=True)
moisture = Column(Float, unique=False, nullable=True)
color = Column(String(100), unique=False, nullable=True)
cluster = Column(Integer, unique=False, nullable=True)
def __repr__(self):
return '<BeanAttributes %r>' % self.id
def persist_to_db(engine_string):
"""Persist the data to database.
Args:
engine_string (`str`): Engine string for SQLAlchemy.
Returns:
None.
"""
engine = sql.create_engine(engine_string)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
if config.LOCAL_DB_FLAG:
try:
session.execute('DELETE FROM msia_db.bean_attributes')
except:
pass
else:
try:
session.execute('DELETE FROM bean_attributes')
except:
pass
raw_data = pd.read_csv(config.DATA_TABLE_PATH)
raw_data = raw_data.replace(np.nan, '', regex=True)
try:
for i in range(raw_data.shape[0]):
bean_row = BeanAttributes(id=int(raw_data.iloc[i]['Unnamed: 0']
), species=str(raw_data.iloc[i]['Species']), owner=str(
raw_data.iloc[i]['Owner.1']), country=str(raw_data.iloc[i][
'Country.of.Origin']), farm_name=str(raw_data.iloc[i][
'Farm.Name']), company=str(raw_data.iloc[i]['Company']),
region=str(raw_data.iloc[i]['Region']), producer=str(
raw_data.iloc[i]['Producer']), grading_date=str(raw_data.
iloc[i]['Grading.Date']), processing_method=str(raw_data.
iloc[i]['Processing.Method']), aroma=float(raw_data.iloc[i]
['Aroma']), flavor=float(raw_data.iloc[i]['Flavor']),
aftertaste=float(raw_data.iloc[i]['Aftertaste']), acidity=
float(raw_data.iloc[i]['Acidity']), body=float(raw_data.
iloc[i]['Body']), balance=float(raw_data.iloc[i]['Balance']
), uniformity=float(raw_data.iloc[i]['Uniformity']),
cleancup=float(raw_data.iloc[i]['Clean.Cup']), sweetness=
float(raw_data.iloc[i]['Sweetness']), total_cup_point=float
(raw_data.iloc[i]['Total.Cup.Points']), moisture=float(
raw_data.iloc[i]['Moisture']), color=str(raw_data.iloc[i][
'Color']), cluster=int(raw_data.iloc[i]['cluster']))
session.add(bean_row)
logger.debug('Row %d added to table ' % i)
session.commit()
except sql.exc.IntegrityError:
logger.error('Duplicated coffee bean')
except Exception as e:
logger.error('Incorrect credentials, access denied', e)
finally:
session.close()
if __name__ == '__main__':
conn_type = 'mysql+pymysql'
user = os.environ.get('MYSQL_USER')
password = os.environ.get('MYSQL_PASSWORD')
host = os.environ.get('MYSQL_HOST')
port = os.environ.get('MYSQL_PORT')
database = os.environ.get('DATABASE_NAME')
local_database_path = config.LOCAL_DATABASE_PATH
if config.SQLALCHEMY_DATABASE_URI is None:
if config.LOCAL_DB_FLAG:
engine_string = 'sqlite:///{}'.format(local_database_path)
else:
engine_string = '{}://{}:{}@{}:{}/{}'.format(conn_type, user,
password, host, port, database)
else:
engine_string = config.SQLALCHEMY_DATABASE_URI
try:
engine_string = 'sqlite:///data/bean.db'
persist_to_db(engine_string)
logger.info('Data successfully persisted into the database')
except Exception as e:
logger.error(e)
sys.exit(1)
<|reserved_special_token_1|>
import os
import sys
import logging.config
import sqlalchemy as sql
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Float, String, Text, Integer
import pandas as pd
import numpy as np
sys.path.append('./config')
import config
logging.basicConfig(level=logging.INFO, format=
'%(name)s - %(levelname)s - %(asctime)s - %(message)s')
logger = logging.getLogger(__file__)
Base = declarative_base()
class BeanAttributes(Base):
""" Defines the data model for the table `bean_attributes`. """
__tablename__ = 'bean_attributes'
id = Column(Integer, primary_key=True)
species = Column(String(100), unique=False, nullable=True)
owner = Column(String(100), unique=False, nullable=True)
country = Column(String(100), unique=False, nullable=True)
farm_name = Column(String(100), unique=False, nullable=True)
company = Column(String(100), unique=False, nullable=True)
region = Column(String(100), unique=False, nullable=True)
producer = Column(String(100), unique=False, nullable=True)
grading_date = Column(String(100), unique=False, nullable=True)
processing_method = Column(Text, unique=False, nullable=True)
aroma = Column(Float, unique=False, nullable=True)
flavor = Column(Float, unique=False, nullable=True)
aftertaste = Column(Float, unique=False, nullable=True)
acidity = Column(Float, unique=False, nullable=True)
body = Column(Float, unique=False, nullable=True)
balance = Column(Float, unique=False, nullable=True)
uniformity = Column(Float, unique=False, nullable=True)
cleancup = Column(Float, unique=False, nullable=True)
sweetness = Column(Float, unique=False, nullable=True)
total_cup_point = Column(Float, unique=False, nullable=True)
moisture = Column(Float, unique=False, nullable=True)
color = Column(String(100), unique=False, nullable=True)
cluster = Column(Integer, unique=False, nullable=True)
def __repr__(self):
return '<BeanAttributes %r>' % self.id
def persist_to_db(engine_string):
"""Persist the data to database.
Args:
engine_string (`str`): Engine string for SQLAlchemy.
Returns:
None.
"""
engine = sql.create_engine(engine_string)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
if config.LOCAL_DB_FLAG:
try:
session.execute('DELETE FROM msia_db.bean_attributes')
except:
pass
else:
try:
session.execute('DELETE FROM bean_attributes')
except:
pass
raw_data = pd.read_csv(config.DATA_TABLE_PATH)
raw_data = raw_data.replace(np.nan, '', regex=True)
try:
for i in range(raw_data.shape[0]):
bean_row = BeanAttributes(id=int(raw_data.iloc[i]['Unnamed: 0']
), species=str(raw_data.iloc[i]['Species']), owner=str(
raw_data.iloc[i]['Owner.1']), country=str(raw_data.iloc[i][
'Country.of.Origin']), farm_name=str(raw_data.iloc[i][
'Farm.Name']), company=str(raw_data.iloc[i]['Company']),
region=str(raw_data.iloc[i]['Region']), producer=str(
raw_data.iloc[i]['Producer']), grading_date=str(raw_data.
iloc[i]['Grading.Date']), processing_method=str(raw_data.
iloc[i]['Processing.Method']), aroma=float(raw_data.iloc[i]
['Aroma']), flavor=float(raw_data.iloc[i]['Flavor']),
aftertaste=float(raw_data.iloc[i]['Aftertaste']), acidity=
float(raw_data.iloc[i]['Acidity']), body=float(raw_data.
iloc[i]['Body']), balance=float(raw_data.iloc[i]['Balance']
), uniformity=float(raw_data.iloc[i]['Uniformity']),
cleancup=float(raw_data.iloc[i]['Clean.Cup']), sweetness=
float(raw_data.iloc[i]['Sweetness']), total_cup_point=float
(raw_data.iloc[i]['Total.Cup.Points']), moisture=float(
raw_data.iloc[i]['Moisture']), color=str(raw_data.iloc[i][
'Color']), cluster=int(raw_data.iloc[i]['cluster']))
session.add(bean_row)
logger.debug('Row %d added to table ' % i)
session.commit()
except sql.exc.IntegrityError:
logger.error('Duplicated coffee bean')
except Exception as e:
logger.error('Incorrect credentials, access denied', e)
finally:
session.close()
if __name__ == '__main__':
conn_type = 'mysql+pymysql'
user = os.environ.get('MYSQL_USER')
password = os.environ.get('MYSQL_PASSWORD')
host = os.environ.get('MYSQL_HOST')
port = os.environ.get('MYSQL_PORT')
database = os.environ.get('DATABASE_NAME')
local_database_path = config.LOCAL_DATABASE_PATH
if config.SQLALCHEMY_DATABASE_URI is None:
if config.LOCAL_DB_FLAG:
engine_string = 'sqlite:///{}'.format(local_database_path)
else:
engine_string = '{}://{}:{}@{}:{}/{}'.format(conn_type, user,
password, host, port, database)
else:
engine_string = config.SQLALCHEMY_DATABASE_URI
try:
engine_string = 'sqlite:///data/bean.db'
persist_to_db(engine_string)
logger.info('Data successfully persisted into the database')
except Exception as e:
logger.error(e)
sys.exit(1)
<|reserved_special_token_1|>
import os
import sys
import logging.config
import sqlalchemy as sql
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Float, String, Text, Integer
import pandas as pd
import numpy as np
sys.path.append('./config')
import config
logging.basicConfig(level=logging.INFO, format='%(name)s - %(levelname)s - %(asctime)s - %(message)s')
logger = logging.getLogger(__file__)
Base = declarative_base()
class BeanAttributes(Base):
""" Defines the data model for the table `bean_attributes`. """
__tablename__ = 'bean_attributes'
id = Column(Integer, primary_key=True)
species = Column(String(100), unique=False, nullable=True)
owner = Column(String(100), unique=False, nullable=True)
country = Column(String(100), unique=False, nullable=True)
farm_name = Column(String(100), unique=False, nullable=True)
company = Column(String(100), unique=False, nullable=True)
region = Column(String(100), unique=False, nullable=True)
producer = Column(String(100), unique=False, nullable=True)
grading_date = Column(String(100), unique=False, nullable=True)
processing_method = Column(Text, unique=False, nullable=True)
aroma = Column(Float, unique=False, nullable=True)
flavor = Column(Float, unique=False, nullable=True)
aftertaste = Column(Float, unique=False, nullable=True)
acidity = Column(Float, unique=False, nullable=True)
body = Column(Float, unique=False, nullable=True)
balance = Column(Float, unique=False, nullable=True)
uniformity = Column(Float, unique=False, nullable=True)
cleancup = Column(Float, unique=False, nullable=True)
sweetness = Column(Float, unique=False, nullable=True)
total_cup_point = Column(Float, unique=False, nullable=True)
moisture = Column(Float, unique=False, nullable=True)
color = Column(String(100), unique=False, nullable=True)
cluster = Column(Integer, unique=False, nullable=True)
def __repr__(self):
return '<BeanAttributes %r>' % self.id
def persist_to_db(engine_string):
"""Persist the data to database.
Args:
engine_string (`str`): Engine string for SQLAlchemy.
Returns:
None.
"""
engine = sql.create_engine(engine_string)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
# Delete all existing records in the table
if config.LOCAL_DB_FLAG:
try:
session.execute('''DELETE FROM msia_db.bean_attributes''')
except:
pass
else:
try:
session.execute('''DELETE FROM bean_attributes''')
except:
pass
# Read the data table and persist it into the database
raw_data = pd.read_csv(config.DATA_TABLE_PATH)
raw_data = raw_data.replace(np.nan, '', regex=True)
try:
for i in range(raw_data.shape[0]):
bean_row = BeanAttributes(id=int(raw_data.iloc[i]['Unnamed: 0']),
species=str(raw_data.iloc[i]['Species']),
owner=str(raw_data.iloc[i]['Owner.1']),
country=str(raw_data.iloc[i]['Country.of.Origin']),
farm_name=str(raw_data.iloc[i]['Farm.Name']),
company=str(raw_data.iloc[i]['Company']),
region=str(raw_data.iloc[i]['Region']),
producer=str(raw_data.iloc[i]['Producer']),
grading_date=str(raw_data.iloc[i]['Grading.Date']),
processing_method=str(raw_data.iloc[i]['Processing.Method']),
aroma=float(raw_data.iloc[i]['Aroma']),
flavor=float(raw_data.iloc[i]['Flavor']),
aftertaste=float(raw_data.iloc[i]['Aftertaste']),
acidity=float(raw_data.iloc[i]['Acidity']),
body=float(raw_data.iloc[i]['Body']),
balance=float(raw_data.iloc[i]['Balance']),
uniformity=float(raw_data.iloc[i]['Uniformity']),
cleancup=float(raw_data.iloc[i]['Clean.Cup']),
sweetness=float(raw_data.iloc[i]['Sweetness']),
total_cup_point=float(raw_data.iloc[i]['Total.Cup.Points']),
moisture=float(raw_data.iloc[i]['Moisture']),
color=str(raw_data.iloc[i]['Color']),
cluster=int(raw_data.iloc[i]['cluster'])
)
session.add(bean_row)
logger.debug('Row %d added to table ' % i)
session.commit()
except sql.exc.IntegrityError: # Check primary key duplication
logger.error("Duplicated coffee bean")
except Exception as e:
logger.error("Incorrect credentials, access denied", e)
finally:
session.close()
if __name__ == "__main__":
# Obtain parameters from os
conn_type = "mysql+pymysql"
user = os.environ.get("MYSQL_USER")
password = os.environ.get("MYSQL_PASSWORD")
host = os.environ.get("MYSQL_HOST")
port = os.environ.get("MYSQL_PORT")
database = os.environ.get("DATABASE_NAME")
local_database_path = config.LOCAL_DATABASE_PATH
# If users wish to write to their own SQLALCHEMY_DATABASE_URI in the environment
if config.SQLALCHEMY_DATABASE_URI is None:
# Whether to create a local SQLite database or an AWS RDS database
if config.LOCAL_DB_FLAG:
engine_string = "sqlite:///{}".format(local_database_path)
else:
engine_string = "{}://{}:{}@{}:{}/{}".format(conn_type, user, password, host, port, database)
else:
engine_string = config.SQLALCHEMY_DATABASE_URI
try:
engine_string = 'sqlite:///data/bean.db'
persist_to_db(engine_string)
logger.info("Data successfully persisted into the database")
except Exception as e:
logger.error(e)
sys.exit(1)
|
flexible
|
{
"blob_id": "76f2312a01bf8475220a9fcc16209faddfccd2ae",
"index": 9754,
"step-1": "<mask token>\n\n\nclass BeanAttributes(Base):\n \"\"\" Defines the data model for the table `bean_attributes`. \"\"\"\n __tablename__ = 'bean_attributes'\n id = Column(Integer, primary_key=True)\n species = Column(String(100), unique=False, nullable=True)\n owner = Column(String(100), unique=False, nullable=True)\n country = Column(String(100), unique=False, nullable=True)\n farm_name = Column(String(100), unique=False, nullable=True)\n company = Column(String(100), unique=False, nullable=True)\n region = Column(String(100), unique=False, nullable=True)\n producer = Column(String(100), unique=False, nullable=True)\n grading_date = Column(String(100), unique=False, nullable=True)\n processing_method = Column(Text, unique=False, nullable=True)\n aroma = Column(Float, unique=False, nullable=True)\n flavor = Column(Float, unique=False, nullable=True)\n aftertaste = Column(Float, unique=False, nullable=True)\n acidity = Column(Float, unique=False, nullable=True)\n body = Column(Float, unique=False, nullable=True)\n balance = Column(Float, unique=False, nullable=True)\n uniformity = Column(Float, unique=False, nullable=True)\n cleancup = Column(Float, unique=False, nullable=True)\n sweetness = Column(Float, unique=False, nullable=True)\n total_cup_point = Column(Float, unique=False, nullable=True)\n moisture = Column(Float, unique=False, nullable=True)\n color = Column(String(100), unique=False, nullable=True)\n cluster = Column(Integer, unique=False, nullable=True)\n\n def __repr__(self):\n return '<BeanAttributes %r>' % self.id\n\n\ndef persist_to_db(engine_string):\n \"\"\"Persist the data to database.\n Args:\n engine_string (`str`): Engine string for SQLAlchemy.\n Returns:\n None.\n \"\"\"\n engine = sql.create_engine(engine_string)\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n if config.LOCAL_DB_FLAG:\n try:\n session.execute('DELETE FROM msia_db.bean_attributes')\n except:\n pass\n else:\n try:\n session.execute('DELETE FROM bean_attributes')\n except:\n pass\n raw_data = pd.read_csv(config.DATA_TABLE_PATH)\n raw_data = raw_data.replace(np.nan, '', regex=True)\n try:\n for i in range(raw_data.shape[0]):\n bean_row = BeanAttributes(id=int(raw_data.iloc[i]['Unnamed: 0']\n ), species=str(raw_data.iloc[i]['Species']), owner=str(\n raw_data.iloc[i]['Owner.1']), country=str(raw_data.iloc[i][\n 'Country.of.Origin']), farm_name=str(raw_data.iloc[i][\n 'Farm.Name']), company=str(raw_data.iloc[i]['Company']),\n region=str(raw_data.iloc[i]['Region']), producer=str(\n raw_data.iloc[i]['Producer']), grading_date=str(raw_data.\n iloc[i]['Grading.Date']), processing_method=str(raw_data.\n iloc[i]['Processing.Method']), aroma=float(raw_data.iloc[i]\n ['Aroma']), flavor=float(raw_data.iloc[i]['Flavor']),\n aftertaste=float(raw_data.iloc[i]['Aftertaste']), acidity=\n float(raw_data.iloc[i]['Acidity']), body=float(raw_data.\n iloc[i]['Body']), balance=float(raw_data.iloc[i]['Balance']\n ), uniformity=float(raw_data.iloc[i]['Uniformity']),\n cleancup=float(raw_data.iloc[i]['Clean.Cup']), sweetness=\n float(raw_data.iloc[i]['Sweetness']), total_cup_point=float\n (raw_data.iloc[i]['Total.Cup.Points']), moisture=float(\n raw_data.iloc[i]['Moisture']), color=str(raw_data.iloc[i][\n 'Color']), cluster=int(raw_data.iloc[i]['cluster']))\n session.add(bean_row)\n logger.debug('Row %d added to table ' % i)\n session.commit()\n except sql.exc.IntegrityError:\n logger.error('Duplicated coffee bean')\n except Exception as e:\n logger.error('Incorrect credentials, access denied', e)\n finally:\n session.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.append('./config')\n<mask token>\nlogging.basicConfig(level=logging.INFO, format=\n '%(name)s - %(levelname)s - %(asctime)s - %(message)s')\n<mask token>\n\n\nclass BeanAttributes(Base):\n \"\"\" Defines the data model for the table `bean_attributes`. \"\"\"\n __tablename__ = 'bean_attributes'\n id = Column(Integer, primary_key=True)\n species = Column(String(100), unique=False, nullable=True)\n owner = Column(String(100), unique=False, nullable=True)\n country = Column(String(100), unique=False, nullable=True)\n farm_name = Column(String(100), unique=False, nullable=True)\n company = Column(String(100), unique=False, nullable=True)\n region = Column(String(100), unique=False, nullable=True)\n producer = Column(String(100), unique=False, nullable=True)\n grading_date = Column(String(100), unique=False, nullable=True)\n processing_method = Column(Text, unique=False, nullable=True)\n aroma = Column(Float, unique=False, nullable=True)\n flavor = Column(Float, unique=False, nullable=True)\n aftertaste = Column(Float, unique=False, nullable=True)\n acidity = Column(Float, unique=False, nullable=True)\n body = Column(Float, unique=False, nullable=True)\n balance = Column(Float, unique=False, nullable=True)\n uniformity = Column(Float, unique=False, nullable=True)\n cleancup = Column(Float, unique=False, nullable=True)\n sweetness = Column(Float, unique=False, nullable=True)\n total_cup_point = Column(Float, unique=False, nullable=True)\n moisture = Column(Float, unique=False, nullable=True)\n color = Column(String(100), unique=False, nullable=True)\n cluster = Column(Integer, unique=False, nullable=True)\n\n def __repr__(self):\n return '<BeanAttributes %r>' % self.id\n\n\ndef persist_to_db(engine_string):\n \"\"\"Persist the data to database.\n Args:\n engine_string (`str`): Engine string for SQLAlchemy.\n Returns:\n None.\n \"\"\"\n engine = sql.create_engine(engine_string)\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n if config.LOCAL_DB_FLAG:\n try:\n session.execute('DELETE FROM msia_db.bean_attributes')\n except:\n pass\n else:\n try:\n session.execute('DELETE FROM bean_attributes')\n except:\n pass\n raw_data = pd.read_csv(config.DATA_TABLE_PATH)\n raw_data = raw_data.replace(np.nan, '', regex=True)\n try:\n for i in range(raw_data.shape[0]):\n bean_row = BeanAttributes(id=int(raw_data.iloc[i]['Unnamed: 0']\n ), species=str(raw_data.iloc[i]['Species']), owner=str(\n raw_data.iloc[i]['Owner.1']), country=str(raw_data.iloc[i][\n 'Country.of.Origin']), farm_name=str(raw_data.iloc[i][\n 'Farm.Name']), company=str(raw_data.iloc[i]['Company']),\n region=str(raw_data.iloc[i]['Region']), producer=str(\n raw_data.iloc[i]['Producer']), grading_date=str(raw_data.\n iloc[i]['Grading.Date']), processing_method=str(raw_data.\n iloc[i]['Processing.Method']), aroma=float(raw_data.iloc[i]\n ['Aroma']), flavor=float(raw_data.iloc[i]['Flavor']),\n aftertaste=float(raw_data.iloc[i]['Aftertaste']), acidity=\n float(raw_data.iloc[i]['Acidity']), body=float(raw_data.\n iloc[i]['Body']), balance=float(raw_data.iloc[i]['Balance']\n ), uniformity=float(raw_data.iloc[i]['Uniformity']),\n cleancup=float(raw_data.iloc[i]['Clean.Cup']), sweetness=\n float(raw_data.iloc[i]['Sweetness']), total_cup_point=float\n (raw_data.iloc[i]['Total.Cup.Points']), moisture=float(\n raw_data.iloc[i]['Moisture']), color=str(raw_data.iloc[i][\n 'Color']), cluster=int(raw_data.iloc[i]['cluster']))\n session.add(bean_row)\n logger.debug('Row %d added to table ' % i)\n session.commit()\n except sql.exc.IntegrityError:\n logger.error('Duplicated coffee bean')\n except Exception as e:\n logger.error('Incorrect credentials, access denied', e)\n finally:\n session.close()\n\n\nif __name__ == '__main__':\n conn_type = 'mysql+pymysql'\n user = os.environ.get('MYSQL_USER')\n password = os.environ.get('MYSQL_PASSWORD')\n host = os.environ.get('MYSQL_HOST')\n port = os.environ.get('MYSQL_PORT')\n database = os.environ.get('DATABASE_NAME')\n local_database_path = config.LOCAL_DATABASE_PATH\n if config.SQLALCHEMY_DATABASE_URI is None:\n if config.LOCAL_DB_FLAG:\n engine_string = 'sqlite:///{}'.format(local_database_path)\n else:\n engine_string = '{}://{}:{}@{}:{}/{}'.format(conn_type, user,\n password, host, port, database)\n else:\n engine_string = config.SQLALCHEMY_DATABASE_URI\n try:\n engine_string = 'sqlite:///data/bean.db'\n persist_to_db(engine_string)\n logger.info('Data successfully persisted into the database')\n except Exception as e:\n logger.error(e)\n sys.exit(1)\n",
"step-3": "<mask token>\nsys.path.append('./config')\n<mask token>\nlogging.basicConfig(level=logging.INFO, format=\n '%(name)s - %(levelname)s - %(asctime)s - %(message)s')\nlogger = logging.getLogger(__file__)\nBase = declarative_base()\n\n\nclass BeanAttributes(Base):\n \"\"\" Defines the data model for the table `bean_attributes`. \"\"\"\n __tablename__ = 'bean_attributes'\n id = Column(Integer, primary_key=True)\n species = Column(String(100), unique=False, nullable=True)\n owner = Column(String(100), unique=False, nullable=True)\n country = Column(String(100), unique=False, nullable=True)\n farm_name = Column(String(100), unique=False, nullable=True)\n company = Column(String(100), unique=False, nullable=True)\n region = Column(String(100), unique=False, nullable=True)\n producer = Column(String(100), unique=False, nullable=True)\n grading_date = Column(String(100), unique=False, nullable=True)\n processing_method = Column(Text, unique=False, nullable=True)\n aroma = Column(Float, unique=False, nullable=True)\n flavor = Column(Float, unique=False, nullable=True)\n aftertaste = Column(Float, unique=False, nullable=True)\n acidity = Column(Float, unique=False, nullable=True)\n body = Column(Float, unique=False, nullable=True)\n balance = Column(Float, unique=False, nullable=True)\n uniformity = Column(Float, unique=False, nullable=True)\n cleancup = Column(Float, unique=False, nullable=True)\n sweetness = Column(Float, unique=False, nullable=True)\n total_cup_point = Column(Float, unique=False, nullable=True)\n moisture = Column(Float, unique=False, nullable=True)\n color = Column(String(100), unique=False, nullable=True)\n cluster = Column(Integer, unique=False, nullable=True)\n\n def __repr__(self):\n return '<BeanAttributes %r>' % self.id\n\n\ndef persist_to_db(engine_string):\n \"\"\"Persist the data to database.\n Args:\n engine_string (`str`): Engine string for SQLAlchemy.\n Returns:\n None.\n \"\"\"\n engine = sql.create_engine(engine_string)\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n if config.LOCAL_DB_FLAG:\n try:\n session.execute('DELETE FROM msia_db.bean_attributes')\n except:\n pass\n else:\n try:\n session.execute('DELETE FROM bean_attributes')\n except:\n pass\n raw_data = pd.read_csv(config.DATA_TABLE_PATH)\n raw_data = raw_data.replace(np.nan, '', regex=True)\n try:\n for i in range(raw_data.shape[0]):\n bean_row = BeanAttributes(id=int(raw_data.iloc[i]['Unnamed: 0']\n ), species=str(raw_data.iloc[i]['Species']), owner=str(\n raw_data.iloc[i]['Owner.1']), country=str(raw_data.iloc[i][\n 'Country.of.Origin']), farm_name=str(raw_data.iloc[i][\n 'Farm.Name']), company=str(raw_data.iloc[i]['Company']),\n region=str(raw_data.iloc[i]['Region']), producer=str(\n raw_data.iloc[i]['Producer']), grading_date=str(raw_data.\n iloc[i]['Grading.Date']), processing_method=str(raw_data.\n iloc[i]['Processing.Method']), aroma=float(raw_data.iloc[i]\n ['Aroma']), flavor=float(raw_data.iloc[i]['Flavor']),\n aftertaste=float(raw_data.iloc[i]['Aftertaste']), acidity=\n float(raw_data.iloc[i]['Acidity']), body=float(raw_data.\n iloc[i]['Body']), balance=float(raw_data.iloc[i]['Balance']\n ), uniformity=float(raw_data.iloc[i]['Uniformity']),\n cleancup=float(raw_data.iloc[i]['Clean.Cup']), sweetness=\n float(raw_data.iloc[i]['Sweetness']), total_cup_point=float\n (raw_data.iloc[i]['Total.Cup.Points']), moisture=float(\n raw_data.iloc[i]['Moisture']), color=str(raw_data.iloc[i][\n 'Color']), cluster=int(raw_data.iloc[i]['cluster']))\n session.add(bean_row)\n logger.debug('Row %d added to table ' % i)\n session.commit()\n except sql.exc.IntegrityError:\n logger.error('Duplicated coffee bean')\n except Exception as e:\n logger.error('Incorrect credentials, access denied', e)\n finally:\n session.close()\n\n\nif __name__ == '__main__':\n conn_type = 'mysql+pymysql'\n user = os.environ.get('MYSQL_USER')\n password = os.environ.get('MYSQL_PASSWORD')\n host = os.environ.get('MYSQL_HOST')\n port = os.environ.get('MYSQL_PORT')\n database = os.environ.get('DATABASE_NAME')\n local_database_path = config.LOCAL_DATABASE_PATH\n if config.SQLALCHEMY_DATABASE_URI is None:\n if config.LOCAL_DB_FLAG:\n engine_string = 'sqlite:///{}'.format(local_database_path)\n else:\n engine_string = '{}://{}:{}@{}:{}/{}'.format(conn_type, user,\n password, host, port, database)\n else:\n engine_string = config.SQLALCHEMY_DATABASE_URI\n try:\n engine_string = 'sqlite:///data/bean.db'\n persist_to_db(engine_string)\n logger.info('Data successfully persisted into the database')\n except Exception as e:\n logger.error(e)\n sys.exit(1)\n",
"step-4": "import os\nimport sys\nimport logging.config\nimport sqlalchemy as sql\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Float, String, Text, Integer\nimport pandas as pd\nimport numpy as np\nsys.path.append('./config')\nimport config\nlogging.basicConfig(level=logging.INFO, format=\n '%(name)s - %(levelname)s - %(asctime)s - %(message)s')\nlogger = logging.getLogger(__file__)\nBase = declarative_base()\n\n\nclass BeanAttributes(Base):\n \"\"\" Defines the data model for the table `bean_attributes`. \"\"\"\n __tablename__ = 'bean_attributes'\n id = Column(Integer, primary_key=True)\n species = Column(String(100), unique=False, nullable=True)\n owner = Column(String(100), unique=False, nullable=True)\n country = Column(String(100), unique=False, nullable=True)\n farm_name = Column(String(100), unique=False, nullable=True)\n company = Column(String(100), unique=False, nullable=True)\n region = Column(String(100), unique=False, nullable=True)\n producer = Column(String(100), unique=False, nullable=True)\n grading_date = Column(String(100), unique=False, nullable=True)\n processing_method = Column(Text, unique=False, nullable=True)\n aroma = Column(Float, unique=False, nullable=True)\n flavor = Column(Float, unique=False, nullable=True)\n aftertaste = Column(Float, unique=False, nullable=True)\n acidity = Column(Float, unique=False, nullable=True)\n body = Column(Float, unique=False, nullable=True)\n balance = Column(Float, unique=False, nullable=True)\n uniformity = Column(Float, unique=False, nullable=True)\n cleancup = Column(Float, unique=False, nullable=True)\n sweetness = Column(Float, unique=False, nullable=True)\n total_cup_point = Column(Float, unique=False, nullable=True)\n moisture = Column(Float, unique=False, nullable=True)\n color = Column(String(100), unique=False, nullable=True)\n cluster = Column(Integer, unique=False, nullable=True)\n\n def __repr__(self):\n return '<BeanAttributes %r>' % self.id\n\n\ndef persist_to_db(engine_string):\n \"\"\"Persist the data to database.\n Args:\n engine_string (`str`): Engine string for SQLAlchemy.\n Returns:\n None.\n \"\"\"\n engine = sql.create_engine(engine_string)\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n if config.LOCAL_DB_FLAG:\n try:\n session.execute('DELETE FROM msia_db.bean_attributes')\n except:\n pass\n else:\n try:\n session.execute('DELETE FROM bean_attributes')\n except:\n pass\n raw_data = pd.read_csv(config.DATA_TABLE_PATH)\n raw_data = raw_data.replace(np.nan, '', regex=True)\n try:\n for i in range(raw_data.shape[0]):\n bean_row = BeanAttributes(id=int(raw_data.iloc[i]['Unnamed: 0']\n ), species=str(raw_data.iloc[i]['Species']), owner=str(\n raw_data.iloc[i]['Owner.1']), country=str(raw_data.iloc[i][\n 'Country.of.Origin']), farm_name=str(raw_data.iloc[i][\n 'Farm.Name']), company=str(raw_data.iloc[i]['Company']),\n region=str(raw_data.iloc[i]['Region']), producer=str(\n raw_data.iloc[i]['Producer']), grading_date=str(raw_data.\n iloc[i]['Grading.Date']), processing_method=str(raw_data.\n iloc[i]['Processing.Method']), aroma=float(raw_data.iloc[i]\n ['Aroma']), flavor=float(raw_data.iloc[i]['Flavor']),\n aftertaste=float(raw_data.iloc[i]['Aftertaste']), acidity=\n float(raw_data.iloc[i]['Acidity']), body=float(raw_data.\n iloc[i]['Body']), balance=float(raw_data.iloc[i]['Balance']\n ), uniformity=float(raw_data.iloc[i]['Uniformity']),\n cleancup=float(raw_data.iloc[i]['Clean.Cup']), sweetness=\n float(raw_data.iloc[i]['Sweetness']), total_cup_point=float\n (raw_data.iloc[i]['Total.Cup.Points']), moisture=float(\n raw_data.iloc[i]['Moisture']), color=str(raw_data.iloc[i][\n 'Color']), cluster=int(raw_data.iloc[i]['cluster']))\n session.add(bean_row)\n logger.debug('Row %d added to table ' % i)\n session.commit()\n except sql.exc.IntegrityError:\n logger.error('Duplicated coffee bean')\n except Exception as e:\n logger.error('Incorrect credentials, access denied', e)\n finally:\n session.close()\n\n\nif __name__ == '__main__':\n conn_type = 'mysql+pymysql'\n user = os.environ.get('MYSQL_USER')\n password = os.environ.get('MYSQL_PASSWORD')\n host = os.environ.get('MYSQL_HOST')\n port = os.environ.get('MYSQL_PORT')\n database = os.environ.get('DATABASE_NAME')\n local_database_path = config.LOCAL_DATABASE_PATH\n if config.SQLALCHEMY_DATABASE_URI is None:\n if config.LOCAL_DB_FLAG:\n engine_string = 'sqlite:///{}'.format(local_database_path)\n else:\n engine_string = '{}://{}:{}@{}:{}/{}'.format(conn_type, user,\n password, host, port, database)\n else:\n engine_string = config.SQLALCHEMY_DATABASE_URI\n try:\n engine_string = 'sqlite:///data/bean.db'\n persist_to_db(engine_string)\n logger.info('Data successfully persisted into the database')\n except Exception as e:\n logger.error(e)\n sys.exit(1)\n",
"step-5": "import os\nimport sys\nimport logging.config\nimport sqlalchemy as sql\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Float, String, Text, Integer\nimport pandas as pd\nimport numpy as np\nsys.path.append('./config')\nimport config\n\nlogging.basicConfig(level=logging.INFO, format='%(name)s - %(levelname)s - %(asctime)s - %(message)s')\nlogger = logging.getLogger(__file__)\n\nBase = declarative_base()\n\nclass BeanAttributes(Base):\n \"\"\" Defines the data model for the table `bean_attributes`. \"\"\"\n\n __tablename__ = 'bean_attributes'\n\n id = Column(Integer, primary_key=True)\n species = Column(String(100), unique=False, nullable=True)\n owner = Column(String(100), unique=False, nullable=True)\n country = Column(String(100), unique=False, nullable=True)\n farm_name = Column(String(100), unique=False, nullable=True)\n company = Column(String(100), unique=False, nullable=True)\n region = Column(String(100), unique=False, nullable=True)\n producer = Column(String(100), unique=False, nullable=True)\n grading_date = Column(String(100), unique=False, nullable=True)\n processing_method = Column(Text, unique=False, nullable=True)\n aroma = Column(Float, unique=False, nullable=True)\n flavor = Column(Float, unique=False, nullable=True)\n aftertaste = Column(Float, unique=False, nullable=True)\n acidity = Column(Float, unique=False, nullable=True)\n body = Column(Float, unique=False, nullable=True)\n balance = Column(Float, unique=False, nullable=True)\n uniformity = Column(Float, unique=False, nullable=True)\n cleancup = Column(Float, unique=False, nullable=True)\n sweetness = Column(Float, unique=False, nullable=True)\n total_cup_point = Column(Float, unique=False, nullable=True)\n moisture = Column(Float, unique=False, nullable=True)\n color = Column(String(100), unique=False, nullable=True)\n cluster = Column(Integer, unique=False, nullable=True)\n\n def __repr__(self):\n return '<BeanAttributes %r>' % self.id\n\n\ndef persist_to_db(engine_string):\n \"\"\"Persist the data to database.\n Args:\n engine_string (`str`): Engine string for SQLAlchemy.\n Returns:\n None.\n \"\"\"\n\n engine = sql.create_engine(engine_string)\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n\n # Delete all existing records in the table\n if config.LOCAL_DB_FLAG:\n try:\n session.execute('''DELETE FROM msia_db.bean_attributes''')\n except:\n pass\n else:\n try:\n session.execute('''DELETE FROM bean_attributes''')\n except:\n pass\n\n # Read the data table and persist it into the database\n raw_data = pd.read_csv(config.DATA_TABLE_PATH)\n raw_data = raw_data.replace(np.nan, '', regex=True)\n\n try:\n for i in range(raw_data.shape[0]):\n bean_row = BeanAttributes(id=int(raw_data.iloc[i]['Unnamed: 0']),\n species=str(raw_data.iloc[i]['Species']),\n owner=str(raw_data.iloc[i]['Owner.1']),\n country=str(raw_data.iloc[i]['Country.of.Origin']),\n farm_name=str(raw_data.iloc[i]['Farm.Name']),\n company=str(raw_data.iloc[i]['Company']),\n region=str(raw_data.iloc[i]['Region']),\n producer=str(raw_data.iloc[i]['Producer']),\n grading_date=str(raw_data.iloc[i]['Grading.Date']),\n processing_method=str(raw_data.iloc[i]['Processing.Method']),\n aroma=float(raw_data.iloc[i]['Aroma']),\n flavor=float(raw_data.iloc[i]['Flavor']),\n aftertaste=float(raw_data.iloc[i]['Aftertaste']),\n acidity=float(raw_data.iloc[i]['Acidity']),\n body=float(raw_data.iloc[i]['Body']),\n balance=float(raw_data.iloc[i]['Balance']),\n uniformity=float(raw_data.iloc[i]['Uniformity']),\n cleancup=float(raw_data.iloc[i]['Clean.Cup']),\n sweetness=float(raw_data.iloc[i]['Sweetness']),\n total_cup_point=float(raw_data.iloc[i]['Total.Cup.Points']),\n moisture=float(raw_data.iloc[i]['Moisture']),\n color=str(raw_data.iloc[i]['Color']),\n cluster=int(raw_data.iloc[i]['cluster'])\n )\n session.add(bean_row)\n logger.debug('Row %d added to table ' % i)\n session.commit()\n except sql.exc.IntegrityError: # Check primary key duplication\n logger.error(\"Duplicated coffee bean\")\n except Exception as e:\n logger.error(\"Incorrect credentials, access denied\", e)\n finally:\n session.close()\n\n\nif __name__ == \"__main__\":\n\n # Obtain parameters from os\n conn_type = \"mysql+pymysql\"\n user = os.environ.get(\"MYSQL_USER\")\n password = os.environ.get(\"MYSQL_PASSWORD\")\n host = os.environ.get(\"MYSQL_HOST\")\n port = os.environ.get(\"MYSQL_PORT\")\n database = os.environ.get(\"DATABASE_NAME\")\n local_database_path = config.LOCAL_DATABASE_PATH\n\n # If users wish to write to their own SQLALCHEMY_DATABASE_URI in the environment\n if config.SQLALCHEMY_DATABASE_URI is None:\n # Whether to create a local SQLite database or an AWS RDS database\n if config.LOCAL_DB_FLAG:\n engine_string = \"sqlite:///{}\".format(local_database_path)\n else:\n engine_string = \"{}://{}:{}@{}:{}/{}\".format(conn_type, user, password, host, port, database)\n else:\n engine_string = config.SQLALCHEMY_DATABASE_URI\n\n try:\n engine_string = 'sqlite:///data/bean.db'\n persist_to_db(engine_string)\n logger.info(\"Data successfully persisted into the database\")\n except Exception as e:\n logger.error(e)\n sys.exit(1)\n\n\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
"""
Chess state handling model.
"""
from concurrent.futures import ThreadPoolExecutor
from itertools import count
from json import dumps
from .base_board import BaseBoard, NoBoard
from .table_board import TableBoard
from .table_game import TableGame
__all__ = ['Board', 'NoBoard']
class Board(BaseBoard):
"""
Chess board interaction model.
"""
EMOJI = [
'⌛', '‼',
'♝', '♗', '♚', '♔', '♞', '♘', '♟', '♙', '♛', '♕', '♜', '♖', '▪', '▫']
def __init__(self, board=None, _id=None, active_player=True):
"""
Set up board.
"""
super().__init__(board, _id, active_player)
self.executor = ThreadPoolExecutor()
def __repr__(self):
"""
Output the raw view of board.
"""
return f'Board({ self.board !r})'
def __str__(self):
"""
Output the emoji view of board.
"""
if self._active_player:
def piece_to_index(piece):
return (piece & 0xF)
else:
def piece_to_index(piece):
return (piece & 0xE) | (0 if piece & 1 else 1)
return '\n'.join(map(
lambda posY, row: ''.join(map(
lambda posX, piece: self.EMOJI[
piece_to_index(piece)
if piece else
14 + ((posY + posX) % 2)],
count(), row)),
count(),
self.board if self._active_player else reversed(
[reversed(row) for row in self.board])))
def add_player_v1(self, dbsession, player):
"""
Player 2 joins game.
"""
assert player
if self.player1:
self.player2 = player
table_game = TableGame(
game=self.id,
player_one=self.player1,
player_two=self.player2,
one_won=True,
two_won=True)
table_board = TableBoard(
board_state=dumps(tuple(map(tuple, self.board))),
move_num=self._board.move_count,
player=self.active_player(),
game=self.id)
table_board.game_link.append(table_game)
dbsession.add(table_game)
dbsession.add(table_board)
self.poke_player(False)
return {}
self.player1 = player
return {}
def slice_cursor_v1(self, cursor=None, lookahead=1, complete=False):
"""
Retrieve REST cursor slice.
"""
return self.cursor_delegate.slice_cursor_v1(self._board, cursor, int(lookahead), complete)
def update_state_v1(self, dbsession, state):
"""
Make a move to a new state on the board.
"""
moving_player = self.active_player()
board = self.update(state)
table_game = dbsession.query(TableGame).filter(
TableGame.game == board.id).first()
table_board = TableBoard(
board_state=dumps(tuple(map(tuple, board.board))),
move_num=board._board.move_count,
player=board.active_player(),
game=board.id)
if table_game: # TODO(grandquista)
table_board.game_link.append(table_game)
dbsession.add(table_board)
if board:
board.poke_player(False)
return {'end': False}
board.poke_player(True, moving_player)
if board._board.has_kings():
table_game.one_won = False
table_game.two_won = False
elif moving_player == table_game.player_one:
table_game.two_won = False
else:
table_game.one_won = False
board.close()
return {'end': True}
|
normal
|
{
"blob_id": "796fada5dcd45ace8240760ac7e9bad41953ab56",
"index": 9347,
"step-1": "<mask token>\n\n\nclass Board(BaseBoard):\n <mask token>\n EMOJI = ['⌛', '‼', '♝', '♗', '♚', '♔', '♞', '♘', '♟', '♙', '♛', '♕',\n '♜', '♖', '▪', '▫']\n\n def __init__(self, board=None, _id=None, active_player=True):\n \"\"\"\n Set up board.\n \"\"\"\n super().__init__(board, _id, active_player)\n self.executor = ThreadPoolExecutor()\n\n def __repr__(self):\n \"\"\"\n Output the raw view of board.\n \"\"\"\n return f'Board({self.board!r})'\n\n def __str__(self):\n \"\"\"\n Output the emoji view of board.\n \"\"\"\n if self._active_player:\n\n def piece_to_index(piece):\n return piece & 15\n else:\n\n def piece_to_index(piece):\n return piece & 14 | (0 if piece & 1 else 1)\n return '\\n'.join(map(lambda posY, row: ''.join(map(lambda posX,\n piece: self.EMOJI[piece_to_index(piece) if piece else 14 + (\n posY + posX) % 2], count(), row)), count(), self.board if self.\n _active_player else reversed([reversed(row) for row in self.\n board])))\n\n def add_player_v1(self, dbsession, player):\n \"\"\"\n Player 2 joins game.\n \"\"\"\n assert player\n if self.player1:\n self.player2 = player\n table_game = TableGame(game=self.id, player_one=self.player1,\n player_two=self.player2, one_won=True, two_won=True)\n table_board = TableBoard(board_state=dumps(tuple(map(tuple,\n self.board))), move_num=self._board.move_count, player=self\n .active_player(), game=self.id)\n table_board.game_link.append(table_game)\n dbsession.add(table_game)\n dbsession.add(table_board)\n self.poke_player(False)\n return {}\n self.player1 = player\n return {}\n\n def slice_cursor_v1(self, cursor=None, lookahead=1, complete=False):\n \"\"\"\n Retrieve REST cursor slice.\n \"\"\"\n return self.cursor_delegate.slice_cursor_v1(self._board, cursor,\n int(lookahead), complete)\n\n def update_state_v1(self, dbsession, state):\n \"\"\"\n Make a move to a new state on the board.\n \"\"\"\n moving_player = self.active_player()\n board = self.update(state)\n table_game = dbsession.query(TableGame).filter(TableGame.game ==\n board.id).first()\n table_board = TableBoard(board_state=dumps(tuple(map(tuple, board.\n board))), move_num=board._board.move_count, player=board.\n active_player(), game=board.id)\n if table_game:\n table_board.game_link.append(table_game)\n dbsession.add(table_board)\n if board:\n board.poke_player(False)\n return {'end': False}\n board.poke_player(True, moving_player)\n if board._board.has_kings():\n table_game.one_won = False\n table_game.two_won = False\n elif moving_player == table_game.player_one:\n table_game.two_won = False\n else:\n table_game.one_won = False\n board.close()\n return {'end': True}\n",
"step-2": "<mask token>\n\n\nclass Board(BaseBoard):\n \"\"\"\n Chess board interaction model.\n \"\"\"\n EMOJI = ['⌛', '‼', '♝', '♗', '♚', '♔', '♞', '♘', '♟', '♙', '♛', '♕',\n '♜', '♖', '▪', '▫']\n\n def __init__(self, board=None, _id=None, active_player=True):\n \"\"\"\n Set up board.\n \"\"\"\n super().__init__(board, _id, active_player)\n self.executor = ThreadPoolExecutor()\n\n def __repr__(self):\n \"\"\"\n Output the raw view of board.\n \"\"\"\n return f'Board({self.board!r})'\n\n def __str__(self):\n \"\"\"\n Output the emoji view of board.\n \"\"\"\n if self._active_player:\n\n def piece_to_index(piece):\n return piece & 15\n else:\n\n def piece_to_index(piece):\n return piece & 14 | (0 if piece & 1 else 1)\n return '\\n'.join(map(lambda posY, row: ''.join(map(lambda posX,\n piece: self.EMOJI[piece_to_index(piece) if piece else 14 + (\n posY + posX) % 2], count(), row)), count(), self.board if self.\n _active_player else reversed([reversed(row) for row in self.\n board])))\n\n def add_player_v1(self, dbsession, player):\n \"\"\"\n Player 2 joins game.\n \"\"\"\n assert player\n if self.player1:\n self.player2 = player\n table_game = TableGame(game=self.id, player_one=self.player1,\n player_two=self.player2, one_won=True, two_won=True)\n table_board = TableBoard(board_state=dumps(tuple(map(tuple,\n self.board))), move_num=self._board.move_count, player=self\n .active_player(), game=self.id)\n table_board.game_link.append(table_game)\n dbsession.add(table_game)\n dbsession.add(table_board)\n self.poke_player(False)\n return {}\n self.player1 = player\n return {}\n\n def slice_cursor_v1(self, cursor=None, lookahead=1, complete=False):\n \"\"\"\n Retrieve REST cursor slice.\n \"\"\"\n return self.cursor_delegate.slice_cursor_v1(self._board, cursor,\n int(lookahead), complete)\n\n def update_state_v1(self, dbsession, state):\n \"\"\"\n Make a move to a new state on the board.\n \"\"\"\n moving_player = self.active_player()\n board = self.update(state)\n table_game = dbsession.query(TableGame).filter(TableGame.game ==\n board.id).first()\n table_board = TableBoard(board_state=dumps(tuple(map(tuple, board.\n board))), move_num=board._board.move_count, player=board.\n active_player(), game=board.id)\n if table_game:\n table_board.game_link.append(table_game)\n dbsession.add(table_board)\n if board:\n board.poke_player(False)\n return {'end': False}\n board.poke_player(True, moving_player)\n if board._board.has_kings():\n table_game.one_won = False\n table_game.two_won = False\n elif moving_player == table_game.player_one:\n table_game.two_won = False\n else:\n table_game.one_won = False\n board.close()\n return {'end': True}\n",
"step-3": "<mask token>\n__all__ = ['Board', 'NoBoard']\n\n\nclass Board(BaseBoard):\n \"\"\"\n Chess board interaction model.\n \"\"\"\n EMOJI = ['⌛', '‼', '♝', '♗', '♚', '♔', '♞', '♘', '♟', '♙', '♛', '♕',\n '♜', '♖', '▪', '▫']\n\n def __init__(self, board=None, _id=None, active_player=True):\n \"\"\"\n Set up board.\n \"\"\"\n super().__init__(board, _id, active_player)\n self.executor = ThreadPoolExecutor()\n\n def __repr__(self):\n \"\"\"\n Output the raw view of board.\n \"\"\"\n return f'Board({self.board!r})'\n\n def __str__(self):\n \"\"\"\n Output the emoji view of board.\n \"\"\"\n if self._active_player:\n\n def piece_to_index(piece):\n return piece & 15\n else:\n\n def piece_to_index(piece):\n return piece & 14 | (0 if piece & 1 else 1)\n return '\\n'.join(map(lambda posY, row: ''.join(map(lambda posX,\n piece: self.EMOJI[piece_to_index(piece) if piece else 14 + (\n posY + posX) % 2], count(), row)), count(), self.board if self.\n _active_player else reversed([reversed(row) for row in self.\n board])))\n\n def add_player_v1(self, dbsession, player):\n \"\"\"\n Player 2 joins game.\n \"\"\"\n assert player\n if self.player1:\n self.player2 = player\n table_game = TableGame(game=self.id, player_one=self.player1,\n player_two=self.player2, one_won=True, two_won=True)\n table_board = TableBoard(board_state=dumps(tuple(map(tuple,\n self.board))), move_num=self._board.move_count, player=self\n .active_player(), game=self.id)\n table_board.game_link.append(table_game)\n dbsession.add(table_game)\n dbsession.add(table_board)\n self.poke_player(False)\n return {}\n self.player1 = player\n return {}\n\n def slice_cursor_v1(self, cursor=None, lookahead=1, complete=False):\n \"\"\"\n Retrieve REST cursor slice.\n \"\"\"\n return self.cursor_delegate.slice_cursor_v1(self._board, cursor,\n int(lookahead), complete)\n\n def update_state_v1(self, dbsession, state):\n \"\"\"\n Make a move to a new state on the board.\n \"\"\"\n moving_player = self.active_player()\n board = self.update(state)\n table_game = dbsession.query(TableGame).filter(TableGame.game ==\n board.id).first()\n table_board = TableBoard(board_state=dumps(tuple(map(tuple, board.\n board))), move_num=board._board.move_count, player=board.\n active_player(), game=board.id)\n if table_game:\n table_board.game_link.append(table_game)\n dbsession.add(table_board)\n if board:\n board.poke_player(False)\n return {'end': False}\n board.poke_player(True, moving_player)\n if board._board.has_kings():\n table_game.one_won = False\n table_game.two_won = False\n elif moving_player == table_game.player_one:\n table_game.two_won = False\n else:\n table_game.one_won = False\n board.close()\n return {'end': True}\n",
"step-4": "<mask token>\nfrom concurrent.futures import ThreadPoolExecutor\nfrom itertools import count\nfrom json import dumps\nfrom .base_board import BaseBoard, NoBoard\nfrom .table_board import TableBoard\nfrom .table_game import TableGame\n__all__ = ['Board', 'NoBoard']\n\n\nclass Board(BaseBoard):\n \"\"\"\n Chess board interaction model.\n \"\"\"\n EMOJI = ['⌛', '‼', '♝', '♗', '♚', '♔', '♞', '♘', '♟', '♙', '♛', '♕',\n '♜', '♖', '▪', '▫']\n\n def __init__(self, board=None, _id=None, active_player=True):\n \"\"\"\n Set up board.\n \"\"\"\n super().__init__(board, _id, active_player)\n self.executor = ThreadPoolExecutor()\n\n def __repr__(self):\n \"\"\"\n Output the raw view of board.\n \"\"\"\n return f'Board({self.board!r})'\n\n def __str__(self):\n \"\"\"\n Output the emoji view of board.\n \"\"\"\n if self._active_player:\n\n def piece_to_index(piece):\n return piece & 15\n else:\n\n def piece_to_index(piece):\n return piece & 14 | (0 if piece & 1 else 1)\n return '\\n'.join(map(lambda posY, row: ''.join(map(lambda posX,\n piece: self.EMOJI[piece_to_index(piece) if piece else 14 + (\n posY + posX) % 2], count(), row)), count(), self.board if self.\n _active_player else reversed([reversed(row) for row in self.\n board])))\n\n def add_player_v1(self, dbsession, player):\n \"\"\"\n Player 2 joins game.\n \"\"\"\n assert player\n if self.player1:\n self.player2 = player\n table_game = TableGame(game=self.id, player_one=self.player1,\n player_two=self.player2, one_won=True, two_won=True)\n table_board = TableBoard(board_state=dumps(tuple(map(tuple,\n self.board))), move_num=self._board.move_count, player=self\n .active_player(), game=self.id)\n table_board.game_link.append(table_game)\n dbsession.add(table_game)\n dbsession.add(table_board)\n self.poke_player(False)\n return {}\n self.player1 = player\n return {}\n\n def slice_cursor_v1(self, cursor=None, lookahead=1, complete=False):\n \"\"\"\n Retrieve REST cursor slice.\n \"\"\"\n return self.cursor_delegate.slice_cursor_v1(self._board, cursor,\n int(lookahead), complete)\n\n def update_state_v1(self, dbsession, state):\n \"\"\"\n Make a move to a new state on the board.\n \"\"\"\n moving_player = self.active_player()\n board = self.update(state)\n table_game = dbsession.query(TableGame).filter(TableGame.game ==\n board.id).first()\n table_board = TableBoard(board_state=dumps(tuple(map(tuple, board.\n board))), move_num=board._board.move_count, player=board.\n active_player(), game=board.id)\n if table_game:\n table_board.game_link.append(table_game)\n dbsession.add(table_board)\n if board:\n board.poke_player(False)\n return {'end': False}\n board.poke_player(True, moving_player)\n if board._board.has_kings():\n table_game.one_won = False\n table_game.two_won = False\n elif moving_player == table_game.player_one:\n table_game.two_won = False\n else:\n table_game.one_won = False\n board.close()\n return {'end': True}\n",
"step-5": "\"\"\"\nChess state handling model.\n\"\"\"\n\nfrom concurrent.futures import ThreadPoolExecutor\nfrom itertools import count\nfrom json import dumps\n\nfrom .base_board import BaseBoard, NoBoard\nfrom .table_board import TableBoard\nfrom .table_game import TableGame\n\n__all__ = ['Board', 'NoBoard']\n\n\nclass Board(BaseBoard):\n \"\"\"\n Chess board interaction model.\n \"\"\"\n\n EMOJI = [\n '⌛', '‼',\n '♝', '♗', '♚', '♔', '♞', '♘', '♟', '♙', '♛', '♕', '♜', '♖', '▪', '▫']\n\n def __init__(self, board=None, _id=None, active_player=True):\n \"\"\"\n Set up board.\n \"\"\"\n super().__init__(board, _id, active_player)\n self.executor = ThreadPoolExecutor()\n\n def __repr__(self):\n \"\"\"\n Output the raw view of board.\n \"\"\"\n return f'Board({ self.board !r})'\n\n def __str__(self):\n \"\"\"\n Output the emoji view of board.\n \"\"\"\n if self._active_player:\n def piece_to_index(piece):\n return (piece & 0xF)\n else:\n def piece_to_index(piece):\n return (piece & 0xE) | (0 if piece & 1 else 1)\n\n return '\\n'.join(map(\n lambda posY, row: ''.join(map(\n lambda posX, piece: self.EMOJI[\n piece_to_index(piece)\n if piece else\n 14 + ((posY + posX) % 2)],\n count(), row)),\n count(),\n self.board if self._active_player else reversed(\n [reversed(row) for row in self.board])))\n\n def add_player_v1(self, dbsession, player):\n \"\"\"\n Player 2 joins game.\n \"\"\"\n assert player\n if self.player1:\n self.player2 = player\n table_game = TableGame(\n game=self.id,\n player_one=self.player1,\n player_two=self.player2,\n one_won=True,\n two_won=True)\n table_board = TableBoard(\n board_state=dumps(tuple(map(tuple, self.board))),\n move_num=self._board.move_count,\n player=self.active_player(),\n game=self.id)\n table_board.game_link.append(table_game)\n dbsession.add(table_game)\n dbsession.add(table_board)\n self.poke_player(False)\n return {}\n self.player1 = player\n return {}\n\n def slice_cursor_v1(self, cursor=None, lookahead=1, complete=False):\n \"\"\"\n Retrieve REST cursor slice.\n \"\"\"\n return self.cursor_delegate.slice_cursor_v1(self._board, cursor, int(lookahead), complete)\n\n def update_state_v1(self, dbsession, state):\n \"\"\"\n Make a move to a new state on the board.\n \"\"\"\n moving_player = self.active_player()\n board = self.update(state)\n table_game = dbsession.query(TableGame).filter(\n TableGame.game == board.id).first()\n table_board = TableBoard(\n board_state=dumps(tuple(map(tuple, board.board))),\n move_num=board._board.move_count,\n player=board.active_player(),\n game=board.id)\n if table_game: # TODO(grandquista)\n table_board.game_link.append(table_game)\n dbsession.add(table_board)\n if board:\n board.poke_player(False)\n return {'end': False}\n board.poke_player(True, moving_player)\n if board._board.has_kings():\n table_game.one_won = False\n table_game.two_won = False\n elif moving_player == table_game.player_one:\n table_game.two_won = False\n else:\n table_game.one_won = False\n board.close()\n return {'end': True}\n",
"step-ids": [
8,
9,
10,
11,
12
]
}
|
[
8,
9,
10,
11,
12
] |
from ccapi.interfaces.bitfinex import Bitfinex
from ccapi.interfaces.bittrex import Bittrex
from ccapi.interfaces.poloniex import Poloniex
from ccapi.interfaces.bithumb import Bithumb
from ccapi.interfaces.coinone import Coinone
from ccapi.interfaces.korbit import Korbit
# from ccapis.interfaces.coinbase import Coinbase
|
normal
|
{
"blob_id": "098c91f4aa367cb389e542c0199b633e7ecd4003",
"index": 4369,
"step-1": "<mask token>\n",
"step-2": "from ccapi.interfaces.bitfinex import Bitfinex\nfrom ccapi.interfaces.bittrex import Bittrex\nfrom ccapi.interfaces.poloniex import Poloniex\nfrom ccapi.interfaces.bithumb import Bithumb\nfrom ccapi.interfaces.coinone import Coinone\nfrom ccapi.interfaces.korbit import Korbit\n",
"step-3": "from ccapi.interfaces.bitfinex import Bitfinex\nfrom ccapi.interfaces.bittrex import Bittrex\nfrom ccapi.interfaces.poloniex import Poloniex\nfrom ccapi.interfaces.bithumb import Bithumb\nfrom ccapi.interfaces.coinone import Coinone\nfrom ccapi.interfaces.korbit import Korbit\n# from ccapis.interfaces.coinbase import Coinbase\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(negativo, positivo)
<|reserved_special_token_0|>
print(b_neg, b_pos)
<|reserved_special_token_1|>
positivo = float(1.0000001)
negativo = float(-1.000001)
print(negativo, positivo)
b_pos = bin(positivo)
b_neg = bin(negativo)
print(b_neg, b_pos)
|
flexible
|
{
"blob_id": "5c908697000247056bb63a443f837eef88b4c957",
"index": 9196,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(negativo, positivo)\n<mask token>\nprint(b_neg, b_pos)\n",
"step-3": "positivo = float(1.0000001)\nnegativo = float(-1.000001)\nprint(negativo, positivo)\nb_pos = bin(positivo)\nb_neg = bin(negativo)\nprint(b_neg, b_pos)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from .. import _utilities
import typing
from .authority import *
from .ca_pool import *
from .ca_pool_iam_binding import *
from .ca_pool_iam_member import *
from .ca_pool_iam_policy import *
from .certificate import *
from .certificate_template import *
from .certificate_template_iam_binding import *
from .certificate_template_iam_member import *
from .certificate_template_iam_policy import *
from .get_authority import *
from .get_ca_pool_iam_policy import *
from .get_certificate_template_iam_policy import *
from ._inputs import *
from . import outputs
<|reserved_special_token_1|>
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from .authority import *
from .ca_pool import *
from .ca_pool_iam_binding import *
from .ca_pool_iam_member import *
from .ca_pool_iam_policy import *
from .certificate import *
from .certificate_template import *
from .certificate_template_iam_binding import *
from .certificate_template_iam_member import *
from .certificate_template_iam_policy import *
from .get_authority import *
from .get_ca_pool_iam_policy import *
from .get_certificate_template_iam_policy import *
from ._inputs import *
from . import outputs
|
flexible
|
{
"blob_id": "4ca4d4bd684802b056417be4ee3d7d10e8f5dc85",
"index": 8842,
"step-1": "<mask token>\n",
"step-2": "from .. import _utilities\nimport typing\nfrom .authority import *\nfrom .ca_pool import *\nfrom .ca_pool_iam_binding import *\nfrom .ca_pool_iam_member import *\nfrom .ca_pool_iam_policy import *\nfrom .certificate import *\nfrom .certificate_template import *\nfrom .certificate_template_iam_binding import *\nfrom .certificate_template_iam_member import *\nfrom .certificate_template_iam_policy import *\nfrom .get_authority import *\nfrom .get_ca_pool_iam_policy import *\nfrom .get_certificate_template_iam_policy import *\nfrom ._inputs import *\nfrom . import outputs\n",
"step-3": "# coding=utf-8\n# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***\n# *** Do not edit by hand unless you're certain you know what you are doing! ***\n\nfrom .. import _utilities\nimport typing\n# Export this package's modules as members:\nfrom .authority import *\nfrom .ca_pool import *\nfrom .ca_pool_iam_binding import *\nfrom .ca_pool_iam_member import *\nfrom .ca_pool_iam_policy import *\nfrom .certificate import *\nfrom .certificate_template import *\nfrom .certificate_template_iam_binding import *\nfrom .certificate_template_iam_member import *\nfrom .certificate_template_iam_policy import *\nfrom .get_authority import *\nfrom .get_ca_pool_iam_policy import *\nfrom .get_certificate_template_iam_policy import *\nfrom ._inputs import *\nfrom . import outputs\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import IDS
# In[7]:
# testfile = 'data/good_fromE2.txt'
# testfile = 'data/goodqueries.txt'
good_testfile = "data/good_fromE2.txt"
bad_testfile = "data/badqueries.txt"
# a = IDS.LG()
a = IDS.SVM()
# preicdtlist = ['www.foo.com/id=1<script>alert(1)</script>','www.foo.com/name=admin\' or 1=1','abc.com/admin.php','"><svg onload=confirm(1)>','test/q=<a href="javascript:confirm(1)>','q=../etc/passwd']
# result =a.predict(preicdtlist)
# print('正常结果 前10条 ' + str(result[0][:10]))
with open(good_testfile, 'r') as f:
print('预测数据集: '+good_testfile)
preicdtlist = [i.strip('\n') for i in f.readlines()[:]]
result = a.predict(preicdtlist)
print('恶意结果 前10条'+str(result[1][:10]))
print('正常结果 前10条 ' + str(result[0][:10]))
pass
with open(bad_testfile, 'r') as f:
print('预测数据集: '+bad_testfile)
preicdtlist = [i.strip('\n') for i in f.readlines()[:]]
result = a.predict(preicdtlist)
print('恶意结果 前10条'+str(result[1][:10]))
print('正常结果 前10条 ' + str(result[0][:10]))
pass
|
normal
|
{
"blob_id": "e627bcc6c9a49d46190cc793a77103aa0a760989",
"index": 1709,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(good_testfile, 'r') as f:\n print('预测数据集: ' + good_testfile)\n preicdtlist = [i.strip('\\n') for i in f.readlines()[:]]\n result = a.predict(preicdtlist)\n print('恶意结果 前10条' + str(result[1][:10]))\n print('正常结果 前10条 ' + str(result[0][:10]))\n pass\nwith open(bad_testfile, 'r') as f:\n print('预测数据集: ' + bad_testfile)\n preicdtlist = [i.strip('\\n') for i in f.readlines()[:]]\n result = a.predict(preicdtlist)\n print('恶意结果 前10条' + str(result[1][:10]))\n print('正常结果 前10条 ' + str(result[0][:10]))\n pass\n",
"step-3": "<mask token>\ngood_testfile = 'data/good_fromE2.txt'\nbad_testfile = 'data/badqueries.txt'\na = IDS.SVM()\nwith open(good_testfile, 'r') as f:\n print('预测数据集: ' + good_testfile)\n preicdtlist = [i.strip('\\n') for i in f.readlines()[:]]\n result = a.predict(preicdtlist)\n print('恶意结果 前10条' + str(result[1][:10]))\n print('正常结果 前10条 ' + str(result[0][:10]))\n pass\nwith open(bad_testfile, 'r') as f:\n print('预测数据集: ' + bad_testfile)\n preicdtlist = [i.strip('\\n') for i in f.readlines()[:]]\n result = a.predict(preicdtlist)\n print('恶意结果 前10条' + str(result[1][:10]))\n print('正常结果 前10条 ' + str(result[0][:10]))\n pass\n",
"step-4": "import IDS\ngood_testfile = 'data/good_fromE2.txt'\nbad_testfile = 'data/badqueries.txt'\na = IDS.SVM()\nwith open(good_testfile, 'r') as f:\n print('预测数据集: ' + good_testfile)\n preicdtlist = [i.strip('\\n') for i in f.readlines()[:]]\n result = a.predict(preicdtlist)\n print('恶意结果 前10条' + str(result[1][:10]))\n print('正常结果 前10条 ' + str(result[0][:10]))\n pass\nwith open(bad_testfile, 'r') as f:\n print('预测数据集: ' + bad_testfile)\n preicdtlist = [i.strip('\\n') for i in f.readlines()[:]]\n result = a.predict(preicdtlist)\n print('恶意结果 前10条' + str(result[1][:10]))\n print('正常结果 前10条 ' + str(result[0][:10]))\n pass\n",
"step-5": "\nimport IDS\n# In[7]:\n# testfile = 'data/good_fromE2.txt'\n# testfile = 'data/goodqueries.txt'\ngood_testfile = \"data/good_fromE2.txt\"\nbad_testfile = \"data/badqueries.txt\"\n# a = IDS.LG()\n\na = IDS.SVM()\n\n# preicdtlist = ['www.foo.com/id=1<script>alert(1)</script>','www.foo.com/name=admin\\' or 1=1','abc.com/admin.php','\"><svg onload=confirm(1)>','test/q=<a href=\"javascript:confirm(1)>','q=../etc/passwd']\n# result =a.predict(preicdtlist)\n# print('正常结果 前10条 ' + str(result[0][:10]))\n\n\n\nwith open(good_testfile, 'r') as f:\n print('预测数据集: '+good_testfile)\n preicdtlist = [i.strip('\\n') for i in f.readlines()[:]]\n result = a.predict(preicdtlist)\n print('恶意结果 前10条'+str(result[1][:10]))\n print('正常结果 前10条 ' + str(result[0][:10]))\n pass\n\n\nwith open(bad_testfile, 'r') as f:\n print('预测数据集: '+bad_testfile)\n preicdtlist = [i.strip('\\n') for i in f.readlines()[:]]\n result = a.predict(preicdtlist)\n print('恶意结果 前10条'+str(result[1][:10]))\n print('正常结果 前10条 ' + str(result[0][:10]))\n pass",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import cv2
import os
"""
视频场景拼接
"""
stich_path="stichImage\\"
def read_video(filename):
'''
将视频每秒的内容提取出来
:param filename: 视频文件路径
:return: 视频文件名,用来拼接
'''
cap=cv2.VideoCapture(filename)
rate = cap.get(cv2.CAP_PROP_FPS)
count=0
success, frame = cap.read()
imageCount=0
while success:
success, frame = cap.read()
count+=1
if count>=rate:
if not os.path.exists(stich_path):
os.mkdir(stich_path)
(shotname, extension)=os.path.splitext(filename)
shotname=shotname.split('\\')[len(shotname.split('\\'))-1]
if not os.path.exists(stich_path+shotname):
os.mkdir(stich_path+shotname)
# frame=cv2.resize(frame,(960,544))
cv2.imencode(".jpg", frame)[1].tofile(
stich_path+shotname+'\\'+str(imageCount)+'.jpg')
imageCount+=1
count=0
stitcher_image(shotname)
def stitcher_image(shotname):
"""
使用OpenCV的stitcher进行拼接
****需要OpenCV 3.3.0****
OpenCV 3.3.0以下的版本stitcher不能正确的运行,详情参考 https://github.com/opencv/opencv/issues/6969#issuecomment-326430615
:param shotname:
"""
imgs=[]
for file in os.listdir(stich_path+shotname):
imgs.append(cv2.imread(stich_path+shotname+'\\'+file))
stitcher = cv2.createStitcher(False)
result = stitcher.stitch(imgs)
cv2.imwrite(stich_path+shotname+'\\'+"stich_result.jpg", result[1])
def read_file_list(path):
if os.path.isdir(path):
pathlist=os.listdir(path)
for file in pathlist:
read_video(path+'\\'+file)
# read_video('E:\\2.mp4')
|
normal
|
{
"blob_id": "a8506420b1bc558fa953f0cec3f8c16beaf44909",
"index": 9886,
"step-1": "<mask token>\n\n\ndef read_video(filename):\n \"\"\"\n 将视频每秒的内容提取出来\n :param filename: 视频文件路径\n :return: 视频文件名,用来拼接\n \"\"\"\n cap = cv2.VideoCapture(filename)\n rate = cap.get(cv2.CAP_PROP_FPS)\n count = 0\n success, frame = cap.read()\n imageCount = 0\n while success:\n success, frame = cap.read()\n count += 1\n if count >= rate:\n if not os.path.exists(stich_path):\n os.mkdir(stich_path)\n shotname, extension = os.path.splitext(filename)\n shotname = shotname.split('\\\\')[len(shotname.split('\\\\')) - 1]\n if not os.path.exists(stich_path + shotname):\n os.mkdir(stich_path + shotname)\n cv2.imencode('.jpg', frame)[1].tofile(stich_path + shotname +\n '\\\\' + str(imageCount) + '.jpg')\n imageCount += 1\n count = 0\n stitcher_image(shotname)\n\n\ndef stitcher_image(shotname):\n \"\"\"\n 使用OpenCV的stitcher进行拼接\n ****需要OpenCV 3.3.0****\n OpenCV 3.3.0以下的版本stitcher不能正确的运行,详情参考 https://github.com/opencv/opencv/issues/6969#issuecomment-326430615\n :param shotname:\n \"\"\"\n imgs = []\n for file in os.listdir(stich_path + shotname):\n imgs.append(cv2.imread(stich_path + shotname + '\\\\' + file))\n stitcher = cv2.createStitcher(False)\n result = stitcher.stitch(imgs)\n cv2.imwrite(stich_path + shotname + '\\\\' + 'stich_result.jpg', result[1])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef read_video(filename):\n \"\"\"\n 将视频每秒的内容提取出来\n :param filename: 视频文件路径\n :return: 视频文件名,用来拼接\n \"\"\"\n cap = cv2.VideoCapture(filename)\n rate = cap.get(cv2.CAP_PROP_FPS)\n count = 0\n success, frame = cap.read()\n imageCount = 0\n while success:\n success, frame = cap.read()\n count += 1\n if count >= rate:\n if not os.path.exists(stich_path):\n os.mkdir(stich_path)\n shotname, extension = os.path.splitext(filename)\n shotname = shotname.split('\\\\')[len(shotname.split('\\\\')) - 1]\n if not os.path.exists(stich_path + shotname):\n os.mkdir(stich_path + shotname)\n cv2.imencode('.jpg', frame)[1].tofile(stich_path + shotname +\n '\\\\' + str(imageCount) + '.jpg')\n imageCount += 1\n count = 0\n stitcher_image(shotname)\n\n\ndef stitcher_image(shotname):\n \"\"\"\n 使用OpenCV的stitcher进行拼接\n ****需要OpenCV 3.3.0****\n OpenCV 3.3.0以下的版本stitcher不能正确的运行,详情参考 https://github.com/opencv/opencv/issues/6969#issuecomment-326430615\n :param shotname:\n \"\"\"\n imgs = []\n for file in os.listdir(stich_path + shotname):\n imgs.append(cv2.imread(stich_path + shotname + '\\\\' + file))\n stitcher = cv2.createStitcher(False)\n result = stitcher.stitch(imgs)\n cv2.imwrite(stich_path + shotname + '\\\\' + 'stich_result.jpg', result[1])\n\n\ndef read_file_list(path):\n if os.path.isdir(path):\n pathlist = os.listdir(path)\n for file in pathlist:\n read_video(path + '\\\\' + file)\n",
"step-3": "<mask token>\nstich_path = 'stichImage\\\\'\n\n\ndef read_video(filename):\n \"\"\"\n 将视频每秒的内容提取出来\n :param filename: 视频文件路径\n :return: 视频文件名,用来拼接\n \"\"\"\n cap = cv2.VideoCapture(filename)\n rate = cap.get(cv2.CAP_PROP_FPS)\n count = 0\n success, frame = cap.read()\n imageCount = 0\n while success:\n success, frame = cap.read()\n count += 1\n if count >= rate:\n if not os.path.exists(stich_path):\n os.mkdir(stich_path)\n shotname, extension = os.path.splitext(filename)\n shotname = shotname.split('\\\\')[len(shotname.split('\\\\')) - 1]\n if not os.path.exists(stich_path + shotname):\n os.mkdir(stich_path + shotname)\n cv2.imencode('.jpg', frame)[1].tofile(stich_path + shotname +\n '\\\\' + str(imageCount) + '.jpg')\n imageCount += 1\n count = 0\n stitcher_image(shotname)\n\n\ndef stitcher_image(shotname):\n \"\"\"\n 使用OpenCV的stitcher进行拼接\n ****需要OpenCV 3.3.0****\n OpenCV 3.3.0以下的版本stitcher不能正确的运行,详情参考 https://github.com/opencv/opencv/issues/6969#issuecomment-326430615\n :param shotname:\n \"\"\"\n imgs = []\n for file in os.listdir(stich_path + shotname):\n imgs.append(cv2.imread(stich_path + shotname + '\\\\' + file))\n stitcher = cv2.createStitcher(False)\n result = stitcher.stitch(imgs)\n cv2.imwrite(stich_path + shotname + '\\\\' + 'stich_result.jpg', result[1])\n\n\ndef read_file_list(path):\n if os.path.isdir(path):\n pathlist = os.listdir(path)\n for file in pathlist:\n read_video(path + '\\\\' + file)\n",
"step-4": "import cv2\nimport os\n<mask token>\nstich_path = 'stichImage\\\\'\n\n\ndef read_video(filename):\n \"\"\"\n 将视频每秒的内容提取出来\n :param filename: 视频文件路径\n :return: 视频文件名,用来拼接\n \"\"\"\n cap = cv2.VideoCapture(filename)\n rate = cap.get(cv2.CAP_PROP_FPS)\n count = 0\n success, frame = cap.read()\n imageCount = 0\n while success:\n success, frame = cap.read()\n count += 1\n if count >= rate:\n if not os.path.exists(stich_path):\n os.mkdir(stich_path)\n shotname, extension = os.path.splitext(filename)\n shotname = shotname.split('\\\\')[len(shotname.split('\\\\')) - 1]\n if not os.path.exists(stich_path + shotname):\n os.mkdir(stich_path + shotname)\n cv2.imencode('.jpg', frame)[1].tofile(stich_path + shotname +\n '\\\\' + str(imageCount) + '.jpg')\n imageCount += 1\n count = 0\n stitcher_image(shotname)\n\n\ndef stitcher_image(shotname):\n \"\"\"\n 使用OpenCV的stitcher进行拼接\n ****需要OpenCV 3.3.0****\n OpenCV 3.3.0以下的版本stitcher不能正确的运行,详情参考 https://github.com/opencv/opencv/issues/6969#issuecomment-326430615\n :param shotname:\n \"\"\"\n imgs = []\n for file in os.listdir(stich_path + shotname):\n imgs.append(cv2.imread(stich_path + shotname + '\\\\' + file))\n stitcher = cv2.createStitcher(False)\n result = stitcher.stitch(imgs)\n cv2.imwrite(stich_path + shotname + '\\\\' + 'stich_result.jpg', result[1])\n\n\ndef read_file_list(path):\n if os.path.isdir(path):\n pathlist = os.listdir(path)\n for file in pathlist:\n read_video(path + '\\\\' + file)\n",
"step-5": "import cv2\nimport os\n\"\"\"\n视频场景拼接\n\"\"\"\nstich_path=\"stichImage\\\\\"\n\ndef read_video(filename):\n '''\n 将视频每秒的内容提取出来\n :param filename: 视频文件路径\n :return: 视频文件名,用来拼接\n '''\n cap=cv2.VideoCapture(filename)\n rate = cap.get(cv2.CAP_PROP_FPS)\n count=0\n success, frame = cap.read()\n imageCount=0\n while success:\n success, frame = cap.read()\n count+=1\n if count>=rate:\n if not os.path.exists(stich_path):\n os.mkdir(stich_path)\n (shotname, extension)=os.path.splitext(filename)\n shotname=shotname.split('\\\\')[len(shotname.split('\\\\'))-1]\n if not os.path.exists(stich_path+shotname):\n os.mkdir(stich_path+shotname)\n # frame=cv2.resize(frame,(960,544))\n cv2.imencode(\".jpg\", frame)[1].tofile(\n stich_path+shotname+'\\\\'+str(imageCount)+'.jpg')\n imageCount+=1\n count=0\n stitcher_image(shotname)\n\n\ndef stitcher_image(shotname):\n \"\"\"\n 使用OpenCV的stitcher进行拼接\n ****需要OpenCV 3.3.0****\n OpenCV 3.3.0以下的版本stitcher不能正确的运行,详情参考 https://github.com/opencv/opencv/issues/6969#issuecomment-326430615\n :param shotname:\n \"\"\"\n imgs=[]\n for file in os.listdir(stich_path+shotname):\n imgs.append(cv2.imread(stich_path+shotname+'\\\\'+file))\n stitcher = cv2.createStitcher(False)\n result = stitcher.stitch(imgs)\n cv2.imwrite(stich_path+shotname+'\\\\'+\"stich_result.jpg\", result[1])\n\ndef read_file_list(path):\n if os.path.isdir(path):\n pathlist=os.listdir(path)\n for file in pathlist:\n read_video(path+'\\\\'+file)\n\n\n\n# read_video('E:\\\\2.mp4')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def loadData(filename):
_data = json.loads(open(filename).read())
return _data
def buildUserDict(userDict, _data, boardName):
for article in _data:
_user = article['b_作者'].split(' ')[0]
if not _user in userDict:
userDict[_user] = dict()
if not boardName in userDict[_user]:
userDict[_user][boardName] = {'article': 0, 'article_g': 0,
'article_b': 0, 'article_n': 0, 'g': 0, 'b': 0, 'n': 0}
userDict[_user][boardName]['article'] += 1
userDict[_user][boardName]['article_g'] += article['h_推文總數']['g']
userDict[_user][boardName]['article_b'] += article['h_推文總數']['b']
userDict[_user][boardName]['article_n'] += article['h_推文總數']['n']
responses = article['g_推文']
for res in responses:
resUser = responses[res]['留言者']
if not resUser in userDict:
userDict[resUser] = dict()
if not boardName in userDict[resUser]:
userDict[resUser][boardName] = {'article': 0, 'article_g':
0, 'article_b': 0, 'article_n': 0, 'g': 0, 'b': 0, 'n': 0}
if responses[res]['狀態'] == u'噓 ':
userDict[resUser][boardName]['b'] += 1
elif responses[res]['狀態'] == u'推 ':
userDict[resUser][boardName]['g'] += 1
else:
userDict[resUser][boardName]['n'] += 1
return userDict
def printFeature2File(userDict, filename):
_file = open(filename, 'w')
json.dump(userDict, _file)
_file.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def loadData(filename):
_data = json.loads(open(filename).read())
return _data
def buildUserDict(userDict, _data, boardName):
for article in _data:
_user = article['b_作者'].split(' ')[0]
if not _user in userDict:
userDict[_user] = dict()
if not boardName in userDict[_user]:
userDict[_user][boardName] = {'article': 0, 'article_g': 0,
'article_b': 0, 'article_n': 0, 'g': 0, 'b': 0, 'n': 0}
userDict[_user][boardName]['article'] += 1
userDict[_user][boardName]['article_g'] += article['h_推文總數']['g']
userDict[_user][boardName]['article_b'] += article['h_推文總數']['b']
userDict[_user][boardName]['article_n'] += article['h_推文總數']['n']
responses = article['g_推文']
for res in responses:
resUser = responses[res]['留言者']
if not resUser in userDict:
userDict[resUser] = dict()
if not boardName in userDict[resUser]:
userDict[resUser][boardName] = {'article': 0, 'article_g':
0, 'article_b': 0, 'article_n': 0, 'g': 0, 'b': 0, 'n': 0}
if responses[res]['狀態'] == u'噓 ':
userDict[resUser][boardName]['b'] += 1
elif responses[res]['狀態'] == u'推 ':
userDict[resUser][boardName]['g'] += 1
else:
userDict[resUser][boardName]['n'] += 1
return userDict
def printFeature2File(userDict, filename):
_file = open(filename, 'w')
json.dump(userDict, _file)
_file.close()
if __name__ == '__main__':
featureFileOut = str(sys.argv[1])
dataDir = '../data/'
filenameList = ['data-Baseball-5000-2017-06-29-03-25-05.json',
'data-Elephants-3500-2017-06-29-03-30-22.json',
'data-Monkeys-3500-2017-06-29-03-31-55.json',
'data-Guardians-3500-2017-06-29-04-12-43.json',
'data-Lions-3300-2017-06-29-04-11-50.json']
total_start = time.time()
_start = time.time()
userDict = dict()
for index in range(len(filenameList)):
print('Loading data from ' + boardNameList[index] + ' ...')
_data = loadData(dataDir + filenameList[index])
print('number of articles : ' + str(len(_data)))
print('Cost time : ' + str(time.time() - _start) + ' secs')
_start = time.time()
print('Building user dict...')
boardName = boardNameList[index]
userDict = buildUserDict(userDict, _data, boardName)
print('Total user number : ' + str(len(userDict.keys())))
print('Cost time : ' + str(time.time() - _start) + ' secs')
_start = time.time()
print('Extract user features...')
printFeature2File(userDict, featureFileOut)
print('Cost time : ' + str(time.time() - _start) + ' secs')
print('Total cost time : ' + str(time.time() - total_start) + ' secs')
_start = time.time()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
boardNameList = ['Baseball', 'Elephants', 'Monkeys', 'Lions', 'Guardians']
def loadData(filename):
_data = json.loads(open(filename).read())
return _data
def buildUserDict(userDict, _data, boardName):
for article in _data:
_user = article['b_作者'].split(' ')[0]
if not _user in userDict:
userDict[_user] = dict()
if not boardName in userDict[_user]:
userDict[_user][boardName] = {'article': 0, 'article_g': 0,
'article_b': 0, 'article_n': 0, 'g': 0, 'b': 0, 'n': 0}
userDict[_user][boardName]['article'] += 1
userDict[_user][boardName]['article_g'] += article['h_推文總數']['g']
userDict[_user][boardName]['article_b'] += article['h_推文總數']['b']
userDict[_user][boardName]['article_n'] += article['h_推文總數']['n']
responses = article['g_推文']
for res in responses:
resUser = responses[res]['留言者']
if not resUser in userDict:
userDict[resUser] = dict()
if not boardName in userDict[resUser]:
userDict[resUser][boardName] = {'article': 0, 'article_g':
0, 'article_b': 0, 'article_n': 0, 'g': 0, 'b': 0, 'n': 0}
if responses[res]['狀態'] == u'噓 ':
userDict[resUser][boardName]['b'] += 1
elif responses[res]['狀態'] == u'推 ':
userDict[resUser][boardName]['g'] += 1
else:
userDict[resUser][boardName]['n'] += 1
return userDict
def printFeature2File(userDict, filename):
_file = open(filename, 'w')
json.dump(userDict, _file)
_file.close()
if __name__ == '__main__':
featureFileOut = str(sys.argv[1])
dataDir = '../data/'
filenameList = ['data-Baseball-5000-2017-06-29-03-25-05.json',
'data-Elephants-3500-2017-06-29-03-30-22.json',
'data-Monkeys-3500-2017-06-29-03-31-55.json',
'data-Guardians-3500-2017-06-29-04-12-43.json',
'data-Lions-3300-2017-06-29-04-11-50.json']
total_start = time.time()
_start = time.time()
userDict = dict()
for index in range(len(filenameList)):
print('Loading data from ' + boardNameList[index] + ' ...')
_data = loadData(dataDir + filenameList[index])
print('number of articles : ' + str(len(_data)))
print('Cost time : ' + str(time.time() - _start) + ' secs')
_start = time.time()
print('Building user dict...')
boardName = boardNameList[index]
userDict = buildUserDict(userDict, _data, boardName)
print('Total user number : ' + str(len(userDict.keys())))
print('Cost time : ' + str(time.time() - _start) + ' secs')
_start = time.time()
print('Extract user features...')
printFeature2File(userDict, featureFileOut)
print('Cost time : ' + str(time.time() - _start) + ' secs')
print('Total cost time : ' + str(time.time() - total_start) + ' secs')
_start = time.time()
<|reserved_special_token_1|>
import json
import sys
import time
boardNameList = ['Baseball', 'Elephants', 'Monkeys', 'Lions', 'Guardians']
def loadData(filename):
_data = json.loads(open(filename).read())
return _data
def buildUserDict(userDict, _data, boardName):
for article in _data:
_user = article['b_作者'].split(' ')[0]
if not _user in userDict:
userDict[_user] = dict()
if not boardName in userDict[_user]:
userDict[_user][boardName] = {'article': 0, 'article_g': 0,
'article_b': 0, 'article_n': 0, 'g': 0, 'b': 0, 'n': 0}
userDict[_user][boardName]['article'] += 1
userDict[_user][boardName]['article_g'] += article['h_推文總數']['g']
userDict[_user][boardName]['article_b'] += article['h_推文總數']['b']
userDict[_user][boardName]['article_n'] += article['h_推文總數']['n']
responses = article['g_推文']
for res in responses:
resUser = responses[res]['留言者']
if not resUser in userDict:
userDict[resUser] = dict()
if not boardName in userDict[resUser]:
userDict[resUser][boardName] = {'article': 0, 'article_g':
0, 'article_b': 0, 'article_n': 0, 'g': 0, 'b': 0, 'n': 0}
if responses[res]['狀態'] == u'噓 ':
userDict[resUser][boardName]['b'] += 1
elif responses[res]['狀態'] == u'推 ':
userDict[resUser][boardName]['g'] += 1
else:
userDict[resUser][boardName]['n'] += 1
return userDict
def printFeature2File(userDict, filename):
_file = open(filename, 'w')
json.dump(userDict, _file)
_file.close()
if __name__ == '__main__':
featureFileOut = str(sys.argv[1])
dataDir = '../data/'
filenameList = ['data-Baseball-5000-2017-06-29-03-25-05.json',
'data-Elephants-3500-2017-06-29-03-30-22.json',
'data-Monkeys-3500-2017-06-29-03-31-55.json',
'data-Guardians-3500-2017-06-29-04-12-43.json',
'data-Lions-3300-2017-06-29-04-11-50.json']
total_start = time.time()
_start = time.time()
userDict = dict()
for index in range(len(filenameList)):
print('Loading data from ' + boardNameList[index] + ' ...')
_data = loadData(dataDir + filenameList[index])
print('number of articles : ' + str(len(_data)))
print('Cost time : ' + str(time.time() - _start) + ' secs')
_start = time.time()
print('Building user dict...')
boardName = boardNameList[index]
userDict = buildUserDict(userDict, _data, boardName)
print('Total user number : ' + str(len(userDict.keys())))
print('Cost time : ' + str(time.time() - _start) + ' secs')
_start = time.time()
print('Extract user features...')
printFeature2File(userDict, featureFileOut)
print('Cost time : ' + str(time.time() - _start) + ' secs')
print('Total cost time : ' + str(time.time() - total_start) + ' secs')
_start = time.time()
<|reserved_special_token_1|>
import json
import sys
import time
# boardName pageNum indexNewest
# Baseball 5000 5183
# Elephants 3500 3558
# Monkeys 3500 3672
# Lions 3300 3381
# Guardians 3500 3542
boardNameList = ["Baseball", "Elephants", "Monkeys", "Lions", "Guardians"]
def loadData(filename):
_data = json.loads(open(filename).read())
return _data
def buildUserDict(userDict, _data, boardName):
#各版發文數 發文總推數 發文總噓數 發文總->數 各版推文數 各板噓文數 各版->數
#article article_g article_b article_n g b n
# userDict = dict()
for article in _data:
_user = article['b_作者'].split(" ")[0]
if not _user in userDict:
userDict[_user] = dict()
if not boardName in userDict[_user]:
userDict[_user][boardName] = {'article':0,'article_g':0,'article_b':0,'article_n':0,'g':0,'b':0,'n':0}
userDict[_user][boardName]['article'] += 1
userDict[_user][boardName]['article_g'] += article['h_推文總數']['g']
userDict[_user][boardName]['article_b'] += article['h_推文總數']['b']
userDict[_user][boardName]['article_n'] += article['h_推文總數']['n']
responses = article['g_推文']
for res in responses:
resUser = responses[res]['留言者']
if not resUser in userDict:
userDict[resUser] = dict()
if not boardName in userDict[resUser]:
userDict[resUser][boardName] = {'article':0,'article_g':0,'article_b':0,'article_n':0,'g':0,'b':0,'n':0}
if responses[res]['狀態'] == u'噓 ':
userDict[resUser][boardName]['b'] += 1
elif responses[res]['狀態'] == u'推 ':
userDict[resUser][boardName]['g'] += 1
else:
userDict[resUser][boardName]['n'] += 1
return userDict
def printFeature2File(userDict, filename):
_file = open(filename, "w")
json.dump(userDict,_file)
_file.close()
if __name__ == "__main__":
# filename = str(sys.argv[1])
featureFileOut = str(sys.argv[1])
dataDir = "../data/"
filenameList = ['data-Baseball-5000-2017-06-29-03-25-05.json','data-Elephants-3500-2017-06-29-03-30-22.json',
'data-Monkeys-3500-2017-06-29-03-31-55.json','data-Guardians-3500-2017-06-29-04-12-43.json',
'data-Lions-3300-2017-06-29-04-11-50.json']
#python3 extractFeatures.py ../data/userFeatureTest.json
total_start = time.time()
_start = time.time()
userDict = dict()
for index in range(len(filenameList)):
print("Loading data from "+boardNameList[index]+" ...")
_data = loadData(dataDir+filenameList[index])
print("number of articles : "+str(len(_data)))
print("Cost time : "+str(time.time()-_start)+" secs")
_start = time.time()
print("Building user dict...")
boardName = boardNameList[index]
userDict = buildUserDict(userDict, _data, boardName)
print("Total user number : "+str(len(userDict.keys())))
print("Cost time : "+str(time.time()-_start)+" secs")
_start = time.time()
print("Extract user features...")
printFeature2File(userDict, featureFileOut)
print("Cost time : "+str(time.time()-_start)+" secs")
print("Total cost time : "+str(time.time()-total_start)+" secs")
_start = time.time()
# for dd in _data:
# print("=====================================")
# print(dd['b_作者'].split(" ")[0])
# print(dd['h_推文總數']['b'])
# print(dd['h_推文總數']['g'])
# print(dd['h_推文總數']['all'])
# res = dd['g_推文']
# goodResList = list()
# BooResList = list()
# neutralResList = list()
# for rr in res:
# if res[rr]['狀態'] == u'噓 ':
# BooResList.append(res[rr]['留言者'])
# elif res[rr]['狀態'] == u'推 ':
# goodResList.append(res[rr]['留言者'])
# else:
# neutralResList.append(res[rr]['留言者'])
# print("噓"+str(BooResList))
# print("推"+str(goodResList))
# print("->"+str(neutralResList))
# print(_data[0]['c_標題'])
# print(_data[0]['h_推文總數'])
# print(_data[0]['g_推文'])
|
flexible
|
{
"blob_id": "306240db8a1652fe7cd79808c40e4354c3158d3e",
"index": 3434,
"step-1": "<mask token>\n\n\ndef loadData(filename):\n _data = json.loads(open(filename).read())\n return _data\n\n\ndef buildUserDict(userDict, _data, boardName):\n for article in _data:\n _user = article['b_作者'].split(' ')[0]\n if not _user in userDict:\n userDict[_user] = dict()\n if not boardName in userDict[_user]:\n userDict[_user][boardName] = {'article': 0, 'article_g': 0,\n 'article_b': 0, 'article_n': 0, 'g': 0, 'b': 0, 'n': 0}\n userDict[_user][boardName]['article'] += 1\n userDict[_user][boardName]['article_g'] += article['h_推文總數']['g']\n userDict[_user][boardName]['article_b'] += article['h_推文總數']['b']\n userDict[_user][boardName]['article_n'] += article['h_推文總數']['n']\n responses = article['g_推文']\n for res in responses:\n resUser = responses[res]['留言者']\n if not resUser in userDict:\n userDict[resUser] = dict()\n if not boardName in userDict[resUser]:\n userDict[resUser][boardName] = {'article': 0, 'article_g': \n 0, 'article_b': 0, 'article_n': 0, 'g': 0, 'b': 0, 'n': 0}\n if responses[res]['狀態'] == u'噓 ':\n userDict[resUser][boardName]['b'] += 1\n elif responses[res]['狀態'] == u'推 ':\n userDict[resUser][boardName]['g'] += 1\n else:\n userDict[resUser][boardName]['n'] += 1\n return userDict\n\n\ndef printFeature2File(userDict, filename):\n _file = open(filename, 'w')\n json.dump(userDict, _file)\n _file.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef loadData(filename):\n _data = json.loads(open(filename).read())\n return _data\n\n\ndef buildUserDict(userDict, _data, boardName):\n for article in _data:\n _user = article['b_作者'].split(' ')[0]\n if not _user in userDict:\n userDict[_user] = dict()\n if not boardName in userDict[_user]:\n userDict[_user][boardName] = {'article': 0, 'article_g': 0,\n 'article_b': 0, 'article_n': 0, 'g': 0, 'b': 0, 'n': 0}\n userDict[_user][boardName]['article'] += 1\n userDict[_user][boardName]['article_g'] += article['h_推文總數']['g']\n userDict[_user][boardName]['article_b'] += article['h_推文總數']['b']\n userDict[_user][boardName]['article_n'] += article['h_推文總數']['n']\n responses = article['g_推文']\n for res in responses:\n resUser = responses[res]['留言者']\n if not resUser in userDict:\n userDict[resUser] = dict()\n if not boardName in userDict[resUser]:\n userDict[resUser][boardName] = {'article': 0, 'article_g': \n 0, 'article_b': 0, 'article_n': 0, 'g': 0, 'b': 0, 'n': 0}\n if responses[res]['狀態'] == u'噓 ':\n userDict[resUser][boardName]['b'] += 1\n elif responses[res]['狀態'] == u'推 ':\n userDict[resUser][boardName]['g'] += 1\n else:\n userDict[resUser][boardName]['n'] += 1\n return userDict\n\n\ndef printFeature2File(userDict, filename):\n _file = open(filename, 'w')\n json.dump(userDict, _file)\n _file.close()\n\n\nif __name__ == '__main__':\n featureFileOut = str(sys.argv[1])\n dataDir = '../data/'\n filenameList = ['data-Baseball-5000-2017-06-29-03-25-05.json',\n 'data-Elephants-3500-2017-06-29-03-30-22.json',\n 'data-Monkeys-3500-2017-06-29-03-31-55.json',\n 'data-Guardians-3500-2017-06-29-04-12-43.json',\n 'data-Lions-3300-2017-06-29-04-11-50.json']\n total_start = time.time()\n _start = time.time()\n userDict = dict()\n for index in range(len(filenameList)):\n print('Loading data from ' + boardNameList[index] + ' ...')\n _data = loadData(dataDir + filenameList[index])\n print('number of articles : ' + str(len(_data)))\n print('Cost time : ' + str(time.time() - _start) + ' secs')\n _start = time.time()\n print('Building user dict...')\n boardName = boardNameList[index]\n userDict = buildUserDict(userDict, _data, boardName)\n print('Total user number : ' + str(len(userDict.keys())))\n print('Cost time : ' + str(time.time() - _start) + ' secs')\n _start = time.time()\n print('Extract user features...')\n printFeature2File(userDict, featureFileOut)\n print('Cost time : ' + str(time.time() - _start) + ' secs')\n print('Total cost time : ' + str(time.time() - total_start) + ' secs')\n _start = time.time()\n",
"step-3": "<mask token>\nboardNameList = ['Baseball', 'Elephants', 'Monkeys', 'Lions', 'Guardians']\n\n\ndef loadData(filename):\n _data = json.loads(open(filename).read())\n return _data\n\n\ndef buildUserDict(userDict, _data, boardName):\n for article in _data:\n _user = article['b_作者'].split(' ')[0]\n if not _user in userDict:\n userDict[_user] = dict()\n if not boardName in userDict[_user]:\n userDict[_user][boardName] = {'article': 0, 'article_g': 0,\n 'article_b': 0, 'article_n': 0, 'g': 0, 'b': 0, 'n': 0}\n userDict[_user][boardName]['article'] += 1\n userDict[_user][boardName]['article_g'] += article['h_推文總數']['g']\n userDict[_user][boardName]['article_b'] += article['h_推文總數']['b']\n userDict[_user][boardName]['article_n'] += article['h_推文總數']['n']\n responses = article['g_推文']\n for res in responses:\n resUser = responses[res]['留言者']\n if not resUser in userDict:\n userDict[resUser] = dict()\n if not boardName in userDict[resUser]:\n userDict[resUser][boardName] = {'article': 0, 'article_g': \n 0, 'article_b': 0, 'article_n': 0, 'g': 0, 'b': 0, 'n': 0}\n if responses[res]['狀態'] == u'噓 ':\n userDict[resUser][boardName]['b'] += 1\n elif responses[res]['狀態'] == u'推 ':\n userDict[resUser][boardName]['g'] += 1\n else:\n userDict[resUser][boardName]['n'] += 1\n return userDict\n\n\ndef printFeature2File(userDict, filename):\n _file = open(filename, 'w')\n json.dump(userDict, _file)\n _file.close()\n\n\nif __name__ == '__main__':\n featureFileOut = str(sys.argv[1])\n dataDir = '../data/'\n filenameList = ['data-Baseball-5000-2017-06-29-03-25-05.json',\n 'data-Elephants-3500-2017-06-29-03-30-22.json',\n 'data-Monkeys-3500-2017-06-29-03-31-55.json',\n 'data-Guardians-3500-2017-06-29-04-12-43.json',\n 'data-Lions-3300-2017-06-29-04-11-50.json']\n total_start = time.time()\n _start = time.time()\n userDict = dict()\n for index in range(len(filenameList)):\n print('Loading data from ' + boardNameList[index] + ' ...')\n _data = loadData(dataDir + filenameList[index])\n print('number of articles : ' + str(len(_data)))\n print('Cost time : ' + str(time.time() - _start) + ' secs')\n _start = time.time()\n print('Building user dict...')\n boardName = boardNameList[index]\n userDict = buildUserDict(userDict, _data, boardName)\n print('Total user number : ' + str(len(userDict.keys())))\n print('Cost time : ' + str(time.time() - _start) + ' secs')\n _start = time.time()\n print('Extract user features...')\n printFeature2File(userDict, featureFileOut)\n print('Cost time : ' + str(time.time() - _start) + ' secs')\n print('Total cost time : ' + str(time.time() - total_start) + ' secs')\n _start = time.time()\n",
"step-4": "import json\nimport sys\nimport time\nboardNameList = ['Baseball', 'Elephants', 'Monkeys', 'Lions', 'Guardians']\n\n\ndef loadData(filename):\n _data = json.loads(open(filename).read())\n return _data\n\n\ndef buildUserDict(userDict, _data, boardName):\n for article in _data:\n _user = article['b_作者'].split(' ')[0]\n if not _user in userDict:\n userDict[_user] = dict()\n if not boardName in userDict[_user]:\n userDict[_user][boardName] = {'article': 0, 'article_g': 0,\n 'article_b': 0, 'article_n': 0, 'g': 0, 'b': 0, 'n': 0}\n userDict[_user][boardName]['article'] += 1\n userDict[_user][boardName]['article_g'] += article['h_推文總數']['g']\n userDict[_user][boardName]['article_b'] += article['h_推文總數']['b']\n userDict[_user][boardName]['article_n'] += article['h_推文總數']['n']\n responses = article['g_推文']\n for res in responses:\n resUser = responses[res]['留言者']\n if not resUser in userDict:\n userDict[resUser] = dict()\n if not boardName in userDict[resUser]:\n userDict[resUser][boardName] = {'article': 0, 'article_g': \n 0, 'article_b': 0, 'article_n': 0, 'g': 0, 'b': 0, 'n': 0}\n if responses[res]['狀態'] == u'噓 ':\n userDict[resUser][boardName]['b'] += 1\n elif responses[res]['狀態'] == u'推 ':\n userDict[resUser][boardName]['g'] += 1\n else:\n userDict[resUser][boardName]['n'] += 1\n return userDict\n\n\ndef printFeature2File(userDict, filename):\n _file = open(filename, 'w')\n json.dump(userDict, _file)\n _file.close()\n\n\nif __name__ == '__main__':\n featureFileOut = str(sys.argv[1])\n dataDir = '../data/'\n filenameList = ['data-Baseball-5000-2017-06-29-03-25-05.json',\n 'data-Elephants-3500-2017-06-29-03-30-22.json',\n 'data-Monkeys-3500-2017-06-29-03-31-55.json',\n 'data-Guardians-3500-2017-06-29-04-12-43.json',\n 'data-Lions-3300-2017-06-29-04-11-50.json']\n total_start = time.time()\n _start = time.time()\n userDict = dict()\n for index in range(len(filenameList)):\n print('Loading data from ' + boardNameList[index] + ' ...')\n _data = loadData(dataDir + filenameList[index])\n print('number of articles : ' + str(len(_data)))\n print('Cost time : ' + str(time.time() - _start) + ' secs')\n _start = time.time()\n print('Building user dict...')\n boardName = boardNameList[index]\n userDict = buildUserDict(userDict, _data, boardName)\n print('Total user number : ' + str(len(userDict.keys())))\n print('Cost time : ' + str(time.time() - _start) + ' secs')\n _start = time.time()\n print('Extract user features...')\n printFeature2File(userDict, featureFileOut)\n print('Cost time : ' + str(time.time() - _start) + ' secs')\n print('Total cost time : ' + str(time.time() - total_start) + ' secs')\n _start = time.time()\n",
"step-5": "import json\nimport sys\nimport time\n# boardName\tpageNum\tindexNewest\n# Baseball\t5000\t5183\n# Elephants\t3500\t3558\n# Monkeys\t3500\t3672\n# Lions\t3300\t3381\n# Guardians\t3500\t3542\n\nboardNameList = [\"Baseball\", \"Elephants\", \"Monkeys\", \"Lions\", \"Guardians\"]\ndef loadData(filename):\n\t_data = json.loads(open(filename).read())\n\treturn _data\n\ndef buildUserDict(userDict, _data, boardName):\n\t#各版發文數\t發文總推數\t發文總噓數\t發文總->數\t各版推文數\t各板噓文數\t各版->數\n\t#article\tarticle_g\tarticle_b\tarticle_n\tg \t\t\tb \t\t\tn \t\t\t\n\t# userDict = dict()\n\tfor article in _data:\n\t\t_user = article['b_作者'].split(\" \")[0] \n\t\tif not _user in userDict:\n\t\t\tuserDict[_user] = dict()\n\t\tif not boardName in userDict[_user]:\n\t\t\tuserDict[_user][boardName] = {'article':0,'article_g':0,'article_b':0,'article_n':0,'g':0,'b':0,'n':0}\n\t\t\n\t\tuserDict[_user][boardName]['article'] += 1\n\t\tuserDict[_user][boardName]['article_g'] += article['h_推文總數']['g']\n\t\tuserDict[_user][boardName]['article_b'] += article['h_推文總數']['b']\n\t\tuserDict[_user][boardName]['article_n'] += article['h_推文總數']['n']\n\t\tresponses = article['g_推文']\n\t\tfor res in responses:\n\t\t\tresUser = responses[res]['留言者']\n\t\t\tif not resUser in userDict:\n\t\t\t\tuserDict[resUser] = dict()\n\t\t\tif not boardName in userDict[resUser]:\n\t\t\t\tuserDict[resUser][boardName] = {'article':0,'article_g':0,'article_b':0,'article_n':0,'g':0,'b':0,'n':0}\n\n\t\t\tif responses[res]['狀態'] == u'噓 ':\n\t\t\t\tuserDict[resUser][boardName]['b'] += 1\n\t\t\telif responses[res]['狀態'] == u'推 ':\n\t\t\t\tuserDict[resUser][boardName]['g'] += 1\n\t\t\telse:\n\t\t\t\tuserDict[resUser][boardName]['n'] += 1\n\treturn userDict\ndef printFeature2File(userDict, filename):\n\t_file = open(filename, \"w\")\n\tjson.dump(userDict,_file)\n\t_file.close()\n\nif __name__ == \"__main__\": \n\t# filename = str(sys.argv[1])\n\tfeatureFileOut = str(sys.argv[1])\n\tdataDir = \"../data/\"\n\tfilenameList = ['data-Baseball-5000-2017-06-29-03-25-05.json','data-Elephants-3500-2017-06-29-03-30-22.json',\n\t\t\t\t\t'data-Monkeys-3500-2017-06-29-03-31-55.json','data-Guardians-3500-2017-06-29-04-12-43.json',\n\t\t\t\t\t'data-Lions-3300-2017-06-29-04-11-50.json']\n\t#python3 extractFeatures.py ../data/userFeatureTest.json\n\ttotal_start = time.time()\n\t_start = time.time()\n\tuserDict = dict()\n\tfor index in range(len(filenameList)):\n\t\tprint(\"Loading data from \"+boardNameList[index]+\" ...\")\n\t\t_data = loadData(dataDir+filenameList[index])\n\t\tprint(\"number of articles : \"+str(len(_data)))\n\t\tprint(\"Cost time : \"+str(time.time()-_start)+\" secs\")\n\t\t_start = time.time()\n\n\t\tprint(\"Building user dict...\")\n\t\tboardName = boardNameList[index]\n\t\tuserDict = buildUserDict(userDict, _data, boardName)\n\t\tprint(\"Total user number : \"+str(len(userDict.keys())))\n\t\tprint(\"Cost time : \"+str(time.time()-_start)+\" secs\")\n\t\t_start = time.time()\n\n\tprint(\"Extract user features...\")\n\tprintFeature2File(userDict, featureFileOut)\n\tprint(\"Cost time : \"+str(time.time()-_start)+\" secs\")\n\tprint(\"Total cost time : \"+str(time.time()-total_start)+\" secs\")\n\t_start = time.time()\n\t\n\t# for dd in _data:\n\t# \tprint(\"=====================================\")\n\t# \tprint(dd['b_作者'].split(\" \")[0])\n\t# \tprint(dd['h_推文總數']['b'])\n\t# \tprint(dd['h_推文總數']['g'])\n\t# \tprint(dd['h_推文總數']['all'])\n\t# \tres = dd['g_推文']\n\t# \tgoodResList = list()\n\t# \tBooResList = list()\n\t# \tneutralResList = list()\n\t# \tfor rr in res:\n\t# \t\tif res[rr]['狀態'] == u'噓 ':\n\t# \t\t\tBooResList.append(res[rr]['留言者'])\n\t# \t\telif res[rr]['狀態'] == u'推 ':\n\t# \t\t\tgoodResList.append(res[rr]['留言者'])\n\t# \t\telse:\n\t# \t\t\tneutralResList.append(res[rr]['留言者'])\n\t# \tprint(\"噓\"+str(BooResList))\n\t# \tprint(\"推\"+str(goodResList))\n\t# \tprint(\"->\"+str(neutralResList))\n\t# print(_data[0]['c_標題'])\n\t# print(_data[0]['h_推文總數'])\n\t# print(_data[0]['g_推文'])",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# 赛场统分
# 【问题】在编程竞赛中,有10个评委为参赛的选手打分,分数为0 ~ 100分。
# 选手最后得分为:去掉一个最高分和一个最低分后其余8个分数的平均值。请编写一个程序实现。
sc_lst = []
i = 1
while len(sc_lst) < 10:
try:
sc = int(input('请第%d位评委打分:' % i))
if sc > 0 and sc < 101:
sc_lst.append(sc)
i += 1
else:
print('超出范围,输入无效')
except:
print('请输入1-100以内的数字')
max_sc = max(sc_lst)
min_sc = min(sc_lst)
sc_lst.remove(max_sc)
sc_lst.remove(min_sc)
ave_sc = sum(sc_lst) / len(sc_lst)
print('去除最高分%d,最低分%d,平均分为%d' % (max_sc, min_sc, ave_sc))
print('end')
|
normal
|
{
"blob_id": "a17abd3947a946daf2c453c120f2e79d2ba60778",
"index": 901,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile len(sc_lst) < 10:\n try:\n sc = int(input('请第%d位评委打分:' % i))\n if sc > 0 and sc < 101:\n sc_lst.append(sc)\n i += 1\n else:\n print('超出范围,输入无效')\n except:\n print('请输入1-100以内的数字')\n<mask token>\nsc_lst.remove(max_sc)\nsc_lst.remove(min_sc)\n<mask token>\nprint('去除最高分%d,最低分%d,平均分为%d' % (max_sc, min_sc, ave_sc))\nprint('end')\n",
"step-3": "sc_lst = []\ni = 1\nwhile len(sc_lst) < 10:\n try:\n sc = int(input('请第%d位评委打分:' % i))\n if sc > 0 and sc < 101:\n sc_lst.append(sc)\n i += 1\n else:\n print('超出范围,输入无效')\n except:\n print('请输入1-100以内的数字')\nmax_sc = max(sc_lst)\nmin_sc = min(sc_lst)\nsc_lst.remove(max_sc)\nsc_lst.remove(min_sc)\nave_sc = sum(sc_lst) / len(sc_lst)\nprint('去除最高分%d,最低分%d,平均分为%d' % (max_sc, min_sc, ave_sc))\nprint('end')\n",
"step-4": "# 赛场统分\n# 【问题】在编程竞赛中,有10个评委为参赛的选手打分,分数为0 ~ 100分。\n# 选手最后得分为:去掉一个最高分和一个最低分后其余8个分数的平均值。请编写一个程序实现。\n\nsc_lst = []\ni = 1\nwhile len(sc_lst) < 10:\n try:\n sc = int(input('请第%d位评委打分:' % i))\n if sc > 0 and sc < 101:\n sc_lst.append(sc)\n i += 1\n else:\n print('超出范围,输入无效')\n except:\n print('请输入1-100以内的数字')\n\nmax_sc = max(sc_lst)\nmin_sc = min(sc_lst)\nsc_lst.remove(max_sc)\nsc_lst.remove(min_sc)\nave_sc = sum(sc_lst) / len(sc_lst)\nprint('去除最高分%d,最低分%d,平均分为%d' % (max_sc, min_sc, ave_sc))\nprint('end')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
class Config(object):
"""Base Config Object"""
DEBUG = False
SECRET_KEY = os.environ.get('SECRET_KEY') or 'Som3$ec5etK*y'
UPLOAD_FOLDER = './uploads'
dbconfig = {'host': os.environ.get('MYSQL_HOST') or 'localhost', 'user': os
.environ.get('MYSQL_USER') or 'root', 'password': os.environ.get(
'MYSQL_PASSWORD') or '', 'db': os.environ.get('MYSQL_DB') or
'finalproject2.sql'}
class DevelopmentConfig(Config):
"""Development Config that extends the Base Config Object"""
DEVELOPMENT = True
DEBUG = True
class ProductionConfig(Config):
"""Production Config that extends the Base Config Object"""
DEBUG = False
|
normal
|
{
"blob_id": "833923c1928862e13c24904f5614927a683b168f",
"index": 611,
"step-1": "<mask token>\n\n\nclass DevelopmentConfig(Config):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass ProductionConfig(Config):\n \"\"\"Production Config that extends the Base Config Object\"\"\"\n DEBUG = False\n",
"step-2": "<mask token>\n\n\nclass DevelopmentConfig(Config):\n \"\"\"Development Config that extends the Base Config Object\"\"\"\n DEVELOPMENT = True\n DEBUG = True\n\n\nclass ProductionConfig(Config):\n \"\"\"Production Config that extends the Base Config Object\"\"\"\n DEBUG = False\n",
"step-3": "<mask token>\n\n\nclass Config(object):\n \"\"\"Base Config Object\"\"\"\n DEBUG = False\n SECRET_KEY = os.environ.get('SECRET_KEY') or 'Som3$ec5etK*y'\n UPLOAD_FOLDER = './uploads'\n\n\n<mask token>\n\n\nclass DevelopmentConfig(Config):\n \"\"\"Development Config that extends the Base Config Object\"\"\"\n DEVELOPMENT = True\n DEBUG = True\n\n\nclass ProductionConfig(Config):\n \"\"\"Production Config that extends the Base Config Object\"\"\"\n DEBUG = False\n",
"step-4": "import os\n\n\nclass Config(object):\n \"\"\"Base Config Object\"\"\"\n DEBUG = False\n SECRET_KEY = os.environ.get('SECRET_KEY') or 'Som3$ec5etK*y'\n UPLOAD_FOLDER = './uploads'\n\n\ndbconfig = {'host': os.environ.get('MYSQL_HOST') or 'localhost', 'user': os\n .environ.get('MYSQL_USER') or 'root', 'password': os.environ.get(\n 'MYSQL_PASSWORD') or '', 'db': os.environ.get('MYSQL_DB') or\n 'finalproject2.sql'}\n\n\nclass DevelopmentConfig(Config):\n \"\"\"Development Config that extends the Base Config Object\"\"\"\n DEVELOPMENT = True\n DEBUG = True\n\n\nclass ProductionConfig(Config):\n \"\"\"Production Config that extends the Base Config Object\"\"\"\n DEBUG = False\n",
"step-5": null,
"step-ids": [
4,
6,
9,
11
]
}
|
[
4,
6,
9,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Mood(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Mood(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def decision(self, data):
temp = float(data)
if temp <= 10:
return self.LOW_TEMP
if temp > 30:
return self.HIGH_TEMP
if 10 < temp <= 30:
return self.GENERIC
<|reserved_special_token_1|>
class Mood(object):
GENERIC = 1
HIGH_TEMP = 2
LOW_TEMP = 3
HIGH_DUST = 4
LOW_DUST = 5
def decision(self, data):
temp = float(data)
if temp <= 10:
return self.LOW_TEMP
if temp > 30:
return self.HIGH_TEMP
if 10 < temp <= 30:
return self.GENERIC
<|reserved_special_token_1|>
class Mood(object):
GENERIC = 1
HIGH_TEMP = 2
LOW_TEMP = 3
HIGH_DUST = 4
LOW_DUST = 5
def decision(self, data):
temp = float(data)
if temp <= 10:
return self.LOW_TEMP
if temp > 30:
return self.HIGH_TEMP
if (10 < temp <=30):
return self.GENERIC
|
flexible
|
{
"blob_id": "511016b9cd54f6824360d609ede233b9cc3e4447",
"index": 7564,
"step-1": "<mask token>\n",
"step-2": "class Mood(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "class Mood(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def decision(self, data):\n temp = float(data)\n if temp <= 10:\n return self.LOW_TEMP\n if temp > 30:\n return self.HIGH_TEMP\n if 10 < temp <= 30:\n return self.GENERIC\n",
"step-4": "class Mood(object):\n GENERIC = 1\n HIGH_TEMP = 2\n LOW_TEMP = 3\n HIGH_DUST = 4\n LOW_DUST = 5\n\n def decision(self, data):\n temp = float(data)\n if temp <= 10:\n return self.LOW_TEMP\n if temp > 30:\n return self.HIGH_TEMP\n if 10 < temp <= 30:\n return self.GENERIC\n",
"step-5": "class Mood(object):\n\n GENERIC = 1\n HIGH_TEMP = 2\n LOW_TEMP = 3\n HIGH_DUST = 4\n LOW_DUST = 5\n\n def decision(self, data):\n temp = float(data)\n\n if temp <= 10:\n return self.LOW_TEMP\n\n if temp > 30:\n return self.HIGH_TEMP\n\n if (10 < temp <=30):\n return self.GENERIC\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pandas as pd
import statistics
import csv
df = pd.read_csv("height-weight.csv")
heightlist = df["Height(Inches)"].to_list()
weightlist = df["Weight(Pounds)"].to_list()
heightmean = statistics.mean(heightlist)
heightmedian = statistics.median(heightlist)
heightmode = statistics.mode(heightlist)
heightstdev = statistics.stdev(heightlist)
print(heightmean)
print(heightmedian)
print(heightmode)
print(heightstdev)
firststart = heightmean - heightstdev
firstend = heightmean + heightstdev
secondstart = heightmean - 2*heightstdev
secondend = heightmean + 2*heightstdev
thirdstart = heightmean - 3*heightstdev
thirdend = heightmean + 3*heightstdev
first = [result for result in heightlist if result > firststart and result < firstend]
second = [result for result in heightlist if result > secondstart and result < secondend]
third = [result for result in heightlist if result > thirdstart and result < thirdend]
firstpercentage = len(first)* 100 / len(heightlist)
secondpercentage = len(second)* 100 / len(heightlist)
thirdpercentage = len(third)* 100 / len(heightlist)
print(firstpercentage)
print(secondpercentage)
print(thirdpercentage)
|
normal
|
{
"blob_id": "3f4b05a1d0c4c2a2b085a0265bafbf89b5635e31",
"index": 8021,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(heightmean)\nprint(heightmedian)\nprint(heightmode)\nprint(heightstdev)\n<mask token>\nprint(firstpercentage)\nprint(secondpercentage)\nprint(thirdpercentage)\n",
"step-3": "<mask token>\ndf = pd.read_csv('height-weight.csv')\nheightlist = df['Height(Inches)'].to_list()\nweightlist = df['Weight(Pounds)'].to_list()\nheightmean = statistics.mean(heightlist)\nheightmedian = statistics.median(heightlist)\nheightmode = statistics.mode(heightlist)\nheightstdev = statistics.stdev(heightlist)\nprint(heightmean)\nprint(heightmedian)\nprint(heightmode)\nprint(heightstdev)\nfirststart = heightmean - heightstdev\nfirstend = heightmean + heightstdev\nsecondstart = heightmean - 2 * heightstdev\nsecondend = heightmean + 2 * heightstdev\nthirdstart = heightmean - 3 * heightstdev\nthirdend = heightmean + 3 * heightstdev\nfirst = [result for result in heightlist if result > firststart and result <\n firstend]\nsecond = [result for result in heightlist if result > secondstart and \n result < secondend]\nthird = [result for result in heightlist if result > thirdstart and result <\n thirdend]\nfirstpercentage = len(first) * 100 / len(heightlist)\nsecondpercentage = len(second) * 100 / len(heightlist)\nthirdpercentage = len(third) * 100 / len(heightlist)\nprint(firstpercentage)\nprint(secondpercentage)\nprint(thirdpercentage)\n",
"step-4": "import pandas as pd\nimport statistics\nimport csv\ndf = pd.read_csv('height-weight.csv')\nheightlist = df['Height(Inches)'].to_list()\nweightlist = df['Weight(Pounds)'].to_list()\nheightmean = statistics.mean(heightlist)\nheightmedian = statistics.median(heightlist)\nheightmode = statistics.mode(heightlist)\nheightstdev = statistics.stdev(heightlist)\nprint(heightmean)\nprint(heightmedian)\nprint(heightmode)\nprint(heightstdev)\nfirststart = heightmean - heightstdev\nfirstend = heightmean + heightstdev\nsecondstart = heightmean - 2 * heightstdev\nsecondend = heightmean + 2 * heightstdev\nthirdstart = heightmean - 3 * heightstdev\nthirdend = heightmean + 3 * heightstdev\nfirst = [result for result in heightlist if result > firststart and result <\n firstend]\nsecond = [result for result in heightlist if result > secondstart and \n result < secondend]\nthird = [result for result in heightlist if result > thirdstart and result <\n thirdend]\nfirstpercentage = len(first) * 100 / len(heightlist)\nsecondpercentage = len(second) * 100 / len(heightlist)\nthirdpercentage = len(third) * 100 / len(heightlist)\nprint(firstpercentage)\nprint(secondpercentage)\nprint(thirdpercentage)\n",
"step-5": "import pandas as pd\r\nimport statistics\r\nimport csv\r\n\r\ndf = pd.read_csv(\"height-weight.csv\")\r\nheightlist = df[\"Height(Inches)\"].to_list()\r\nweightlist = df[\"Weight(Pounds)\"].to_list()\r\n\r\nheightmean = statistics.mean(heightlist)\r\nheightmedian = statistics.median(heightlist)\r\nheightmode = statistics.mode(heightlist)\r\nheightstdev = statistics.stdev(heightlist)\r\n\r\nprint(heightmean)\r\nprint(heightmedian)\r\nprint(heightmode)\r\nprint(heightstdev)\r\n\r\nfirststart = heightmean - heightstdev\r\nfirstend = heightmean + heightstdev\r\n\r\nsecondstart = heightmean - 2*heightstdev\r\nsecondend = heightmean + 2*heightstdev\r\n\r\nthirdstart = heightmean - 3*heightstdev\r\nthirdend = heightmean + 3*heightstdev\r\n\r\nfirst = [result for result in heightlist if result > firststart and result < firstend]\r\nsecond = [result for result in heightlist if result > secondstart and result < secondend]\r\nthird = [result for result in heightlist if result > thirdstart and result < thirdend]\r\n\r\nfirstpercentage = len(first)* 100 / len(heightlist)\r\nsecondpercentage = len(second)* 100 / len(heightlist)\r\nthirdpercentage = len(third)* 100 / len(heightlist)\r\n\r\nprint(firstpercentage)\r\nprint(secondpercentage)\r\nprint(thirdpercentage)\r\n\r\n\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from PIL import Image
from src import urbandictionary_api
from src.card.cardDrawer import CardDrawer
from src.card.cardModel import CardModel
from src.repository import Repository
from src.urbandictionary_api import get_random_word
def save_card(word, image_path, filepath='data/cards/', filename=None):
'''Функция для генерации и сохранения изображения
Возвращает filepath+filename
Параметры:
word - слово, чей контент будет на карточке
image - задний фон изображения
filepath - путь для хранения изображения
filename - имя изображения
'''
content = urbandictionary_api.get_word_data(word)
image = Image.open(image_path)
rep = Repository()
fonts = rep.fonts
model = CardModel(
content=content,
image=image,
auth_font=fonts.aut_font,
cat_font=fonts.cat_font,
def_font=fonts.def_font,
ex_font=fonts.ex_font,
rect_font=fonts.rect_font,
word_font=fonts.word_font,
thumb_font=fonts.thumb_font
)
card_drawer = CardDrawer(model)
card_drawer.draw_card()
path = card_drawer.save(filepath=filepath, filename=filename)
return path
if __name__ == '__main__':
from random import randint
save_card(get_random_word(), f'data/template/backgroundimages/bgimg ({randint(1, 9)}).jpg')
|
normal
|
{
"blob_id": "6bf1d410a33e3b2535e39e4f8c5c7f8278b3de67",
"index": 330,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef save_card(word, image_path, filepath='data/cards/', filename=None):\n \"\"\"Функция для генерации и сохранения изображения\n Возвращает filepath+filename\n \n Параметры:\n word - слово, чей контент будет на карточке\n image - задний фон изображения\n filepath - путь для хранения изображения\n filename - имя изображения\n \"\"\"\n content = urbandictionary_api.get_word_data(word)\n image = Image.open(image_path)\n rep = Repository()\n fonts = rep.fonts\n model = CardModel(content=content, image=image, auth_font=fonts.\n aut_font, cat_font=fonts.cat_font, def_font=fonts.def_font, ex_font\n =fonts.ex_font, rect_font=fonts.rect_font, word_font=fonts.\n word_font, thumb_font=fonts.thumb_font)\n card_drawer = CardDrawer(model)\n card_drawer.draw_card()\n path = card_drawer.save(filepath=filepath, filename=filename)\n return path\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef save_card(word, image_path, filepath='data/cards/', filename=None):\n \"\"\"Функция для генерации и сохранения изображения\n Возвращает filepath+filename\n \n Параметры:\n word - слово, чей контент будет на карточке\n image - задний фон изображения\n filepath - путь для хранения изображения\n filename - имя изображения\n \"\"\"\n content = urbandictionary_api.get_word_data(word)\n image = Image.open(image_path)\n rep = Repository()\n fonts = rep.fonts\n model = CardModel(content=content, image=image, auth_font=fonts.\n aut_font, cat_font=fonts.cat_font, def_font=fonts.def_font, ex_font\n =fonts.ex_font, rect_font=fonts.rect_font, word_font=fonts.\n word_font, thumb_font=fonts.thumb_font)\n card_drawer = CardDrawer(model)\n card_drawer.draw_card()\n path = card_drawer.save(filepath=filepath, filename=filename)\n return path\n\n\nif __name__ == '__main__':\n from random import randint\n save_card(get_random_word(),\n f'data/template/backgroundimages/bgimg ({randint(1, 9)}).jpg')\n",
"step-4": "from PIL import Image\nfrom src import urbandictionary_api\nfrom src.card.cardDrawer import CardDrawer\nfrom src.card.cardModel import CardModel\nfrom src.repository import Repository\nfrom src.urbandictionary_api import get_random_word\n\n\ndef save_card(word, image_path, filepath='data/cards/', filename=None):\n \"\"\"Функция для генерации и сохранения изображения\n Возвращает filepath+filename\n \n Параметры:\n word - слово, чей контент будет на карточке\n image - задний фон изображения\n filepath - путь для хранения изображения\n filename - имя изображения\n \"\"\"\n content = urbandictionary_api.get_word_data(word)\n image = Image.open(image_path)\n rep = Repository()\n fonts = rep.fonts\n model = CardModel(content=content, image=image, auth_font=fonts.\n aut_font, cat_font=fonts.cat_font, def_font=fonts.def_font, ex_font\n =fonts.ex_font, rect_font=fonts.rect_font, word_font=fonts.\n word_font, thumb_font=fonts.thumb_font)\n card_drawer = CardDrawer(model)\n card_drawer.draw_card()\n path = card_drawer.save(filepath=filepath, filename=filename)\n return path\n\n\nif __name__ == '__main__':\n from random import randint\n save_card(get_random_word(),\n f'data/template/backgroundimages/bgimg ({randint(1, 9)}).jpg')\n",
"step-5": "from PIL import Image\n\nfrom src import urbandictionary_api\nfrom src.card.cardDrawer import CardDrawer\nfrom src.card.cardModel import CardModel\nfrom src.repository import Repository\nfrom src.urbandictionary_api import get_random_word\n\n\ndef save_card(word, image_path, filepath='data/cards/', filename=None):\n '''Функция для генерации и сохранения изображения\n Возвращает filepath+filename\n \n Параметры:\n word - слово, чей контент будет на карточке\n image - задний фон изображения\n filepath - путь для хранения изображения\n filename - имя изображения\n '''\n\n content = urbandictionary_api.get_word_data(word)\n image = Image.open(image_path)\n rep = Repository()\n fonts = rep.fonts\n model = CardModel(\n content=content,\n image=image,\n auth_font=fonts.aut_font,\n cat_font=fonts.cat_font,\n def_font=fonts.def_font,\n ex_font=fonts.ex_font,\n rect_font=fonts.rect_font,\n word_font=fonts.word_font,\n thumb_font=fonts.thumb_font\n )\n\n card_drawer = CardDrawer(model)\n card_drawer.draw_card()\n path = card_drawer.save(filepath=filepath, filename=filename)\n\n return path\n\n\nif __name__ == '__main__':\n from random import randint\n\n save_card(get_random_word(), f'data/template/backgroundimages/bgimg ({randint(1, 9)}).jpg')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Bases(metaclass=ABCMeta):
def __init__(self):
pass
@abstractmethod
def set_style(self):
"""set workshet's style, indent,border,font,and so on"""
@abstractmethod
def query(self):
"""query from mysql, sqlserver"""
@abstractmethod
def clean(self):
"""clean data"""
@abstractmethod
def export(self):
"""export data"""
class ReportForm(Bases, WorkSheet):
def __init__(self, visible=False, filename=None, sheetname=None):
WorkSheet.__init__(self, visible, filename, sheetname)
def __new__(cls, *args, **kwargs):
cls.query(cls)
cls.clean(cls)
cls.set_style(cls)
cls.export(cls)
return object.__new__(cls)
class DayRport(ReportForm):
def query(self):
print('query')
def set_style(self):
print('set_style')
def export(self):
print('export')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Bases(metaclass=ABCMeta):
def __init__(self):
pass
@abstractmethod
def set_style(self):
"""set workshet's style, indent,border,font,and so on"""
@abstractmethod
def query(self):
"""query from mysql, sqlserver"""
@abstractmethod
def clean(self):
"""clean data"""
@abstractmethod
def export(self):
"""export data"""
class ReportForm(Bases, WorkSheet):
def __init__(self, visible=False, filename=None, sheetname=None):
WorkSheet.__init__(self, visible, filename, sheetname)
def __new__(cls, *args, **kwargs):
cls.query(cls)
cls.clean(cls)
cls.set_style(cls)
cls.export(cls)
return object.__new__(cls)
class DayRport(ReportForm):
def query(self):
print('query')
def set_style(self):
print('set_style')
def export(self):
print('export')
if __name__ == '__main__':
d = DayRport(visible=True, filename='okok.xlsx', sheetname='dageda')
time.sleep(5)
print(d)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__author__ = 'Andy Yang'
class Bases(metaclass=ABCMeta):
def __init__(self):
pass
@abstractmethod
def set_style(self):
"""set workshet's style, indent,border,font,and so on"""
@abstractmethod
def query(self):
"""query from mysql, sqlserver"""
@abstractmethod
def clean(self):
"""clean data"""
@abstractmethod
def export(self):
"""export data"""
class ReportForm(Bases, WorkSheet):
def __init__(self, visible=False, filename=None, sheetname=None):
WorkSheet.__init__(self, visible, filename, sheetname)
def __new__(cls, *args, **kwargs):
cls.query(cls)
cls.clean(cls)
cls.set_style(cls)
cls.export(cls)
return object.__new__(cls)
class DayRport(ReportForm):
def query(self):
print('query')
def set_style(self):
print('set_style')
def export(self):
print('export')
if __name__ == '__main__':
d = DayRport(visible=True, filename='okok.xlsx', sheetname='dageda')
time.sleep(5)
print(d)
<|reserved_special_token_1|>
import time
from abc import ABCMeta, abstractmethod
from xlreportform.worksheet import WorkSheet
__author__ = 'Andy Yang'
class Bases(metaclass=ABCMeta):
def __init__(self):
pass
@abstractmethod
def set_style(self):
"""set workshet's style, indent,border,font,and so on"""
@abstractmethod
def query(self):
"""query from mysql, sqlserver"""
@abstractmethod
def clean(self):
"""clean data"""
@abstractmethod
def export(self):
"""export data"""
class ReportForm(Bases, WorkSheet):
def __init__(self, visible=False, filename=None, sheetname=None):
WorkSheet.__init__(self, visible, filename, sheetname)
def __new__(cls, *args, **kwargs):
cls.query(cls)
cls.clean(cls)
cls.set_style(cls)
cls.export(cls)
return object.__new__(cls)
class DayRport(ReportForm):
def query(self):
print('query')
def set_style(self):
print('set_style')
def export(self):
print('export')
if __name__ == '__main__':
d = DayRport(visible=True, filename='okok.xlsx', sheetname='dageda')
time.sleep(5)
print(d)
<|reserved_special_token_1|>
# -*- coding:utf-8 -*-
import time
from abc import ABCMeta, abstractmethod
from xlreportform.worksheet import WorkSheet
__author__ = "Andy Yang"
class Bases(metaclass=ABCMeta):
def __init__(self):
pass
@abstractmethod
def set_style(self):
"""set workshet's style, indent,border,font,and so on"""
@abstractmethod
def query(self):
"""query from mysql, sqlserver"""
@abstractmethod
def clean(self):
"""clean data"""
@abstractmethod
def export(self):
"""export data"""
class ReportForm(Bases, WorkSheet):
def __init__(self, visible=False, filename=None, sheetname=None):
WorkSheet.__init__(self, visible, filename, sheetname)
def __new__(cls, *args, **kwargs):
cls.query(cls)
cls.clean(cls)
cls.set_style(cls)
cls.export(cls)
return object.__new__(cls)
class DayRport(ReportForm):
def query(self):
print('query')
def set_style(self):
print('set_style')
def export(self):
print('export')
if __name__ == '__main__':
d = DayRport(visible=True, filename='okok.xlsx', sheetname='dageda')
time.sleep(5)
print(d)
|
flexible
|
{
"blob_id": "092c6d637fe85136b4184d05f0ac7db17a8efb3b",
"index": 6087,
"step-1": "<mask token>\n\n\nclass Bases(metaclass=ABCMeta):\n\n def __init__(self):\n pass\n\n @abstractmethod\n def set_style(self):\n \"\"\"set workshet's style, indent,border,font,and so on\"\"\"\n\n @abstractmethod\n def query(self):\n \"\"\"query from mysql, sqlserver\"\"\"\n\n @abstractmethod\n def clean(self):\n \"\"\"clean data\"\"\"\n\n @abstractmethod\n def export(self):\n \"\"\"export data\"\"\"\n\n\nclass ReportForm(Bases, WorkSheet):\n\n def __init__(self, visible=False, filename=None, sheetname=None):\n WorkSheet.__init__(self, visible, filename, sheetname)\n\n def __new__(cls, *args, **kwargs):\n cls.query(cls)\n cls.clean(cls)\n cls.set_style(cls)\n cls.export(cls)\n return object.__new__(cls)\n\n\nclass DayRport(ReportForm):\n\n def query(self):\n print('query')\n\n def set_style(self):\n print('set_style')\n\n def export(self):\n print('export')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Bases(metaclass=ABCMeta):\n\n def __init__(self):\n pass\n\n @abstractmethod\n def set_style(self):\n \"\"\"set workshet's style, indent,border,font,and so on\"\"\"\n\n @abstractmethod\n def query(self):\n \"\"\"query from mysql, sqlserver\"\"\"\n\n @abstractmethod\n def clean(self):\n \"\"\"clean data\"\"\"\n\n @abstractmethod\n def export(self):\n \"\"\"export data\"\"\"\n\n\nclass ReportForm(Bases, WorkSheet):\n\n def __init__(self, visible=False, filename=None, sheetname=None):\n WorkSheet.__init__(self, visible, filename, sheetname)\n\n def __new__(cls, *args, **kwargs):\n cls.query(cls)\n cls.clean(cls)\n cls.set_style(cls)\n cls.export(cls)\n return object.__new__(cls)\n\n\nclass DayRport(ReportForm):\n\n def query(self):\n print('query')\n\n def set_style(self):\n print('set_style')\n\n def export(self):\n print('export')\n\n\nif __name__ == '__main__':\n d = DayRport(visible=True, filename='okok.xlsx', sheetname='dageda')\n time.sleep(5)\n print(d)\n",
"step-3": "<mask token>\n__author__ = 'Andy Yang'\n\n\nclass Bases(metaclass=ABCMeta):\n\n def __init__(self):\n pass\n\n @abstractmethod\n def set_style(self):\n \"\"\"set workshet's style, indent,border,font,and so on\"\"\"\n\n @abstractmethod\n def query(self):\n \"\"\"query from mysql, sqlserver\"\"\"\n\n @abstractmethod\n def clean(self):\n \"\"\"clean data\"\"\"\n\n @abstractmethod\n def export(self):\n \"\"\"export data\"\"\"\n\n\nclass ReportForm(Bases, WorkSheet):\n\n def __init__(self, visible=False, filename=None, sheetname=None):\n WorkSheet.__init__(self, visible, filename, sheetname)\n\n def __new__(cls, *args, **kwargs):\n cls.query(cls)\n cls.clean(cls)\n cls.set_style(cls)\n cls.export(cls)\n return object.__new__(cls)\n\n\nclass DayRport(ReportForm):\n\n def query(self):\n print('query')\n\n def set_style(self):\n print('set_style')\n\n def export(self):\n print('export')\n\n\nif __name__ == '__main__':\n d = DayRport(visible=True, filename='okok.xlsx', sheetname='dageda')\n time.sleep(5)\n print(d)\n",
"step-4": "import time\nfrom abc import ABCMeta, abstractmethod\nfrom xlreportform.worksheet import WorkSheet\n__author__ = 'Andy Yang'\n\n\nclass Bases(metaclass=ABCMeta):\n\n def __init__(self):\n pass\n\n @abstractmethod\n def set_style(self):\n \"\"\"set workshet's style, indent,border,font,and so on\"\"\"\n\n @abstractmethod\n def query(self):\n \"\"\"query from mysql, sqlserver\"\"\"\n\n @abstractmethod\n def clean(self):\n \"\"\"clean data\"\"\"\n\n @abstractmethod\n def export(self):\n \"\"\"export data\"\"\"\n\n\nclass ReportForm(Bases, WorkSheet):\n\n def __init__(self, visible=False, filename=None, sheetname=None):\n WorkSheet.__init__(self, visible, filename, sheetname)\n\n def __new__(cls, *args, **kwargs):\n cls.query(cls)\n cls.clean(cls)\n cls.set_style(cls)\n cls.export(cls)\n return object.__new__(cls)\n\n\nclass DayRport(ReportForm):\n\n def query(self):\n print('query')\n\n def set_style(self):\n print('set_style')\n\n def export(self):\n print('export')\n\n\nif __name__ == '__main__':\n d = DayRport(visible=True, filename='okok.xlsx', sheetname='dageda')\n time.sleep(5)\n print(d)\n",
"step-5": "# -*- coding:utf-8 -*-\r\nimport time\r\nfrom abc import ABCMeta, abstractmethod\r\nfrom xlreportform.worksheet import WorkSheet\r\n\r\n__author__ = \"Andy Yang\"\r\n\r\n\r\nclass Bases(metaclass=ABCMeta):\r\n def __init__(self):\r\n pass\r\n\r\n @abstractmethod\r\n def set_style(self):\r\n \"\"\"set workshet's style, indent,border,font,and so on\"\"\"\r\n\r\n @abstractmethod\r\n def query(self):\r\n \"\"\"query from mysql, sqlserver\"\"\"\r\n\r\n @abstractmethod\r\n def clean(self):\r\n \"\"\"clean data\"\"\"\r\n\r\n @abstractmethod\r\n def export(self):\r\n \"\"\"export data\"\"\"\r\n\r\n\r\nclass ReportForm(Bases, WorkSheet):\r\n def __init__(self, visible=False, filename=None, sheetname=None):\r\n WorkSheet.__init__(self, visible, filename, sheetname)\r\n\r\n def __new__(cls, *args, **kwargs):\r\n cls.query(cls)\r\n cls.clean(cls)\r\n cls.set_style(cls)\r\n cls.export(cls)\r\n return object.__new__(cls)\r\n\r\n\r\nclass DayRport(ReportForm):\r\n def query(self):\r\n print('query')\r\n def set_style(self):\r\n print('set_style')\r\n def export(self):\r\n print('export')\r\n\r\n\r\nif __name__ == '__main__':\r\n d = DayRport(visible=True, filename='okok.xlsx', sheetname='dageda')\r\n time.sleep(5)\r\n print(d)",
"step-ids": [
13,
14,
15,
16,
17
]
}
|
[
13,
14,
15,
16,
17
] |
#!/usr/bin/env python
"""add_columns.py: This script reads an SCEC ETAS forecast directory name
and extracts key fields that are then added as attributes in the SCEC Deriva
schema.
This script is an example of how the ERD used by Deriva is extended as additional
information or metadata is added to the asset descriptions in Deriva.
This must be run after the create_model.py script has been run, because this modifies
the ERD created by that script.
The expectation is this is run once. If it is run a second time, we expect errors
indicating the columns already exist.
Philip Maechling
3 April 2021
"""
import os
import sys
from deriva.core import DerivaServer, ErmrestCatalog, get_credential
from deriva.chisel import Model, Schema, Table, Column, Key, ForeignKey, builtin_types, tag
if __name__ == "__main__":
# Connect to server and catalog ------------------------------------------------------------------#
hostname = 'forecast.derivacloud.org' # this is a dev server for throw-away work (change to 'forecast.derivacloud.org)
catalog_id = '5' # this was a throw-away catalog used to test this script (change to TBD)
model = Model.from_catalog(
DerivaServer('https', hostname, credentials=get_credential(hostname)).connect_ermrest(catalog_id)
)
#
# During testing, exit before any table modifications are done
#
tabname = model.schemas['ETAS'].tables["Forecast"]
print("Before Adding Column")
for column in tabname.column_definitions:
print(column.name,column.type.typename,column.nullok)
"""
Define a series of column names that reflect metadata we expect to extract from
the ETAS directory names. These are initial names, defined by developers.
ETAS modelers may want to rename these columns to be more meaningful to domain experts.
For this first version, all fields are defined as free text.
Redefinition of these values as controlled vocabularies are a future refinement.
1) Sim_Start_Time: Enumeration List
e.g: "2019_07_16"
not null
2) Catalog_Mag: Enumeration List
e.g.: "ComCatM7p1"
not null
3) Event_ID: Enumeration List
e.g.: "ci39457511"
not null
4) Post_Event_Date: Enumeration List
e.g.: "7DaysAfter"
maybe null
5) Rupture_Def: Enumeration List
e.g. "ShakeMapSurfaces"
"ShakeMapSurfaces-noSpont-full_td-scale1.14"
not null
"""
tabname.create_column(Column.define('Sim_Start_Time',
builtin_types.text,
comment="Simulation Start Time"))
tabname.create_column(Column.define('Catalog_Mag',
builtin_types.text,
comment="Catalog Name and Event Magnitude"))
tabname.create_column(Column.define('Event_ID',
builtin_types.text,
comment="Earthquake Event ID"))
tabname.create_column(Column.define('Post_Event_Date',
builtin_types.text,
comment="Days Forecast made after Mainshock"))
tabname.create_column(Column.define('Rupture_Definition',
builtin_types.text,
comment="Type of Rupture used in ETAS forecast"))
# retrieve catalog model again to ensure we reflect latest structural changes
# example shows this, but I'm not sure what it returns
print("After Adding Column")
etas_model = model.schemas['ETAS']
tabname = etas_model.tables["Forecast"]
for column in tabname.column_definitions:
print(column.name,column.type.typename,column.nullok)
sys.exit(0)
|
normal
|
{
"blob_id": "a745f72081e06ff3399f9d7f65a30d7eef594689",
"index": 2292,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n hostname = 'forecast.derivacloud.org'\n catalog_id = '5'\n model = Model.from_catalog(DerivaServer('https', hostname, credentials=\n get_credential(hostname)).connect_ermrest(catalog_id))\n tabname = model.schemas['ETAS'].tables['Forecast']\n print('Before Adding Column')\n for column in tabname.column_definitions:\n print(column.name, column.type.typename, column.nullok)\n \"\"\"\n Define a series of column names that reflect metadata we expect to extract from\n the ETAS directory names. These are initial names, defined by developers.\n ETAS modelers may want to rename these columns to be more meaningful to domain experts.\n For this first version, all fields are defined as free text.\n Redefinition of these values as controlled vocabularies are a future refinement.\n \n 1) Sim_Start_Time: Enumeration List\n e.g: \"2019_07_16\"\n not null\n \n 2) Catalog_Mag: Enumeration List\n e.g.: \"ComCatM7p1\"\n not null\n \n 3) Event_ID: Enumeration List\n e.g.: \"ci39457511\"\n not null\n \n 4) Post_Event_Date: Enumeration List\n e.g.: \"7DaysAfter\"\n maybe null\n \n 5) Rupture_Def: Enumeration List\n e.g. \"ShakeMapSurfaces\"\n \"ShakeMapSurfaces-noSpont-full_td-scale1.14\"\n not null\n \"\"\"\n tabname.create_column(Column.define('Sim_Start_Time', builtin_types.\n text, comment='Simulation Start Time'))\n tabname.create_column(Column.define('Catalog_Mag', builtin_types.text,\n comment='Catalog Name and Event Magnitude'))\n tabname.create_column(Column.define('Event_ID', builtin_types.text,\n comment='Earthquake Event ID'))\n tabname.create_column(Column.define('Post_Event_Date', builtin_types.\n text, comment='Days Forecast made after Mainshock'))\n tabname.create_column(Column.define('Rupture_Definition', builtin_types\n .text, comment='Type of Rupture used in ETAS forecast'))\n print('After Adding Column')\n etas_model = model.schemas['ETAS']\n tabname = etas_model.tables['Forecast']\n for column in tabname.column_definitions:\n print(column.name, column.type.typename, column.nullok)\n sys.exit(0)\n",
"step-3": "<mask token>\nimport os\nimport sys\nfrom deriva.core import DerivaServer, ErmrestCatalog, get_credential\nfrom deriva.chisel import Model, Schema, Table, Column, Key, ForeignKey, builtin_types, tag\nif __name__ == '__main__':\n hostname = 'forecast.derivacloud.org'\n catalog_id = '5'\n model = Model.from_catalog(DerivaServer('https', hostname, credentials=\n get_credential(hostname)).connect_ermrest(catalog_id))\n tabname = model.schemas['ETAS'].tables['Forecast']\n print('Before Adding Column')\n for column in tabname.column_definitions:\n print(column.name, column.type.typename, column.nullok)\n \"\"\"\n Define a series of column names that reflect metadata we expect to extract from\n the ETAS directory names. These are initial names, defined by developers.\n ETAS modelers may want to rename these columns to be more meaningful to domain experts.\n For this first version, all fields are defined as free text.\n Redefinition of these values as controlled vocabularies are a future refinement.\n \n 1) Sim_Start_Time: Enumeration List\n e.g: \"2019_07_16\"\n not null\n \n 2) Catalog_Mag: Enumeration List\n e.g.: \"ComCatM7p1\"\n not null\n \n 3) Event_ID: Enumeration List\n e.g.: \"ci39457511\"\n not null\n \n 4) Post_Event_Date: Enumeration List\n e.g.: \"7DaysAfter\"\n maybe null\n \n 5) Rupture_Def: Enumeration List\n e.g. \"ShakeMapSurfaces\"\n \"ShakeMapSurfaces-noSpont-full_td-scale1.14\"\n not null\n \"\"\"\n tabname.create_column(Column.define('Sim_Start_Time', builtin_types.\n text, comment='Simulation Start Time'))\n tabname.create_column(Column.define('Catalog_Mag', builtin_types.text,\n comment='Catalog Name and Event Magnitude'))\n tabname.create_column(Column.define('Event_ID', builtin_types.text,\n comment='Earthquake Event ID'))\n tabname.create_column(Column.define('Post_Event_Date', builtin_types.\n text, comment='Days Forecast made after Mainshock'))\n tabname.create_column(Column.define('Rupture_Definition', builtin_types\n .text, comment='Type of Rupture used in ETAS forecast'))\n print('After Adding Column')\n etas_model = model.schemas['ETAS']\n tabname = etas_model.tables['Forecast']\n for column in tabname.column_definitions:\n print(column.name, column.type.typename, column.nullok)\n sys.exit(0)\n",
"step-4": "#!/usr/bin/env python\n\n\n\"\"\"add_columns.py: This script reads an SCEC ETAS forecast directory name\nand extracts key fields that are then added as attributes in the SCEC Deriva\nschema.\n\n This script is an example of how the ERD used by Deriva is extended as additional\n information or metadata is added to the asset descriptions in Deriva.\n\n This must be run after the create_model.py script has been run, because this modifies\n the ERD created by that script.\n \n The expectation is this is run once. If it is run a second time, we expect errors\n indicating the columns already exist.\n \nPhilip Maechling\n3 April 2021\n\"\"\"\nimport os\nimport sys\nfrom deriva.core import DerivaServer, ErmrestCatalog, get_credential\nfrom deriva.chisel import Model, Schema, Table, Column, Key, ForeignKey, builtin_types, tag\n\nif __name__ == \"__main__\":\n\n # Connect to server and catalog ------------------------------------------------------------------#\n\n hostname = 'forecast.derivacloud.org' # this is a dev server for throw-away work (change to 'forecast.derivacloud.org)\n catalog_id = '5' # this was a throw-away catalog used to test this script (change to TBD)\n\n model = Model.from_catalog(\n DerivaServer('https', hostname, credentials=get_credential(hostname)).connect_ermrest(catalog_id)\n )\n\n #\n # During testing, exit before any table modifications are done\n #\n\n\n tabname = model.schemas['ETAS'].tables[\"Forecast\"]\n print(\"Before Adding Column\")\n for column in tabname.column_definitions:\n print(column.name,column.type.typename,column.nullok)\n\n \"\"\"\n Define a series of column names that reflect metadata we expect to extract from\n the ETAS directory names. These are initial names, defined by developers.\n ETAS modelers may want to rename these columns to be more meaningful to domain experts.\n For this first version, all fields are defined as free text.\n Redefinition of these values as controlled vocabularies are a future refinement.\n \n 1) Sim_Start_Time: Enumeration List\n e.g: \"2019_07_16\"\n not null\n \n 2) Catalog_Mag: Enumeration List\n e.g.: \"ComCatM7p1\"\n not null\n \n 3) Event_ID: Enumeration List\n e.g.: \"ci39457511\"\n not null\n \n 4) Post_Event_Date: Enumeration List\n e.g.: \"7DaysAfter\"\n maybe null\n \n 5) Rupture_Def: Enumeration List\n e.g. \"ShakeMapSurfaces\"\n \"ShakeMapSurfaces-noSpont-full_td-scale1.14\"\n not null\n \"\"\"\n\n\n tabname.create_column(Column.define('Sim_Start_Time',\n builtin_types.text,\n comment=\"Simulation Start Time\"))\n\n tabname.create_column(Column.define('Catalog_Mag',\n builtin_types.text,\n comment=\"Catalog Name and Event Magnitude\"))\n\n tabname.create_column(Column.define('Event_ID',\n builtin_types.text,\n comment=\"Earthquake Event ID\"))\n\n tabname.create_column(Column.define('Post_Event_Date',\n builtin_types.text,\n comment=\"Days Forecast made after Mainshock\"))\n\n tabname.create_column(Column.define('Rupture_Definition',\n builtin_types.text,\n comment=\"Type of Rupture used in ETAS forecast\"))\n\n # retrieve catalog model again to ensure we reflect latest structural changes\n # example shows this, but I'm not sure what it returns\n print(\"After Adding Column\")\n etas_model = model.schemas['ETAS']\n tabname = etas_model.tables[\"Forecast\"]\n for column in tabname.column_definitions:\n print(column.name,column.type.typename,column.nullok)\n\n sys.exit(0)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
"""Very basic codec tests.
:copyright: the translitcodec authors and developers, see AUTHORS.
:license: MIT, see LICENSE for more details.
"""
import codecs
import translitcodec
data = u'£ ☹ wøóf méåw'
def test_default():
assert codecs.encode(data, 'transliterate') == u'GBP :-( woof meaaw'
def test_translit_long():
assert codecs.encode(data, 'translit/long') == u'GBP :-( woof meaaw'
def test_translit_short():
assert codecs.encode(data, 'translit/short') == u'GBP :-( woof meaw'
def test_translit_one():
assert codecs.encode(data, 'translit/one') == u'\u00a3 \u2639 woof meaw'
def test_translit_long_ascii():
data.encode('translit/long/ascii') == b'GBP :-( woof meaaw'
def test_translit_short_ascii():
data.encode('translit/short/ascii') == b'GBP :-( woof meaw'
def test_translit_one_ascii():
try:
codecs.encode(data, 'translit/one/ascii')
assert False
except UnicodeEncodeError:
assert True
assert codecs.encode(data, 'translit/one/ascii', 'replace') == b'? ? woof meaw'
def test_ascii_level_characters_remain():
assert codecs.encode(u"'", 'translit/long') == u"'"
def test_zero_width_space():
try:
char = codecs.encode(u'\u200b', 'translit/long')
assert char == u''
except TypeError:
assert False
|
normal
|
{
"blob_id": "426002bf900e23fd9b1d32c484350ac854228459",
"index": 2565,
"step-1": "<mask token>\n\n\ndef test_translit_long():\n assert codecs.encode(data, 'translit/long') == u'GBP :-( woof meaaw'\n\n\ndef test_translit_short():\n assert codecs.encode(data, 'translit/short') == u'GBP :-( woof meaw'\n\n\n<mask token>\n\n\ndef test_translit_long_ascii():\n data.encode('translit/long/ascii') == b'GBP :-( woof meaaw'\n\n\ndef test_translit_short_ascii():\n data.encode('translit/short/ascii') == b'GBP :-( woof meaw'\n\n\ndef test_translit_one_ascii():\n try:\n codecs.encode(data, 'translit/one/ascii')\n assert False\n except UnicodeEncodeError:\n assert True\n assert codecs.encode(data, 'translit/one/ascii', 'replace'\n ) == b'? ? woof meaw'\n\n\n<mask token>\n\n\ndef test_zero_width_space():\n try:\n char = codecs.encode(u'\\u200b', 'translit/long')\n assert char == u''\n except TypeError:\n assert False\n",
"step-2": "<mask token>\n\n\ndef test_translit_long():\n assert codecs.encode(data, 'translit/long') == u'GBP :-( woof meaaw'\n\n\ndef test_translit_short():\n assert codecs.encode(data, 'translit/short') == u'GBP :-( woof meaw'\n\n\ndef test_translit_one():\n assert codecs.encode(data, 'translit/one') == u'£ ☹ woof meaw'\n\n\ndef test_translit_long_ascii():\n data.encode('translit/long/ascii') == b'GBP :-( woof meaaw'\n\n\ndef test_translit_short_ascii():\n data.encode('translit/short/ascii') == b'GBP :-( woof meaw'\n\n\ndef test_translit_one_ascii():\n try:\n codecs.encode(data, 'translit/one/ascii')\n assert False\n except UnicodeEncodeError:\n assert True\n assert codecs.encode(data, 'translit/one/ascii', 'replace'\n ) == b'? ? woof meaw'\n\n\ndef test_ascii_level_characters_remain():\n assert codecs.encode(u\"'\", 'translit/long') == u\"'\"\n\n\ndef test_zero_width_space():\n try:\n char = codecs.encode(u'\\u200b', 'translit/long')\n assert char == u''\n except TypeError:\n assert False\n",
"step-3": "<mask token>\n\n\ndef test_default():\n assert codecs.encode(data, 'transliterate') == u'GBP :-( woof meaaw'\n\n\ndef test_translit_long():\n assert codecs.encode(data, 'translit/long') == u'GBP :-( woof meaaw'\n\n\ndef test_translit_short():\n assert codecs.encode(data, 'translit/short') == u'GBP :-( woof meaw'\n\n\ndef test_translit_one():\n assert codecs.encode(data, 'translit/one') == u'£ ☹ woof meaw'\n\n\ndef test_translit_long_ascii():\n data.encode('translit/long/ascii') == b'GBP :-( woof meaaw'\n\n\ndef test_translit_short_ascii():\n data.encode('translit/short/ascii') == b'GBP :-( woof meaw'\n\n\ndef test_translit_one_ascii():\n try:\n codecs.encode(data, 'translit/one/ascii')\n assert False\n except UnicodeEncodeError:\n assert True\n assert codecs.encode(data, 'translit/one/ascii', 'replace'\n ) == b'? ? woof meaw'\n\n\ndef test_ascii_level_characters_remain():\n assert codecs.encode(u\"'\", 'translit/long') == u\"'\"\n\n\ndef test_zero_width_space():\n try:\n char = codecs.encode(u'\\u200b', 'translit/long')\n assert char == u''\n except TypeError:\n assert False\n",
"step-4": "<mask token>\ndata = u'£ ☹ wøóf méåw'\n\n\ndef test_default():\n assert codecs.encode(data, 'transliterate') == u'GBP :-( woof meaaw'\n\n\ndef test_translit_long():\n assert codecs.encode(data, 'translit/long') == u'GBP :-( woof meaaw'\n\n\ndef test_translit_short():\n assert codecs.encode(data, 'translit/short') == u'GBP :-( woof meaw'\n\n\ndef test_translit_one():\n assert codecs.encode(data, 'translit/one') == u'£ ☹ woof meaw'\n\n\ndef test_translit_long_ascii():\n data.encode('translit/long/ascii') == b'GBP :-( woof meaaw'\n\n\ndef test_translit_short_ascii():\n data.encode('translit/short/ascii') == b'GBP :-( woof meaw'\n\n\ndef test_translit_one_ascii():\n try:\n codecs.encode(data, 'translit/one/ascii')\n assert False\n except UnicodeEncodeError:\n assert True\n assert codecs.encode(data, 'translit/one/ascii', 'replace'\n ) == b'? ? woof meaw'\n\n\ndef test_ascii_level_characters_remain():\n assert codecs.encode(u\"'\", 'translit/long') == u\"'\"\n\n\ndef test_zero_width_space():\n try:\n char = codecs.encode(u'\\u200b', 'translit/long')\n assert char == u''\n except TypeError:\n assert False\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"Very basic codec tests.\n\n:copyright: the translitcodec authors and developers, see AUTHORS.\n:license: MIT, see LICENSE for more details.\n\n\"\"\"\nimport codecs\nimport translitcodec\n\n\ndata = u'£ ☹ wøóf méåw'\n\ndef test_default():\n assert codecs.encode(data, 'transliterate') == u'GBP :-( woof meaaw'\n\ndef test_translit_long():\n assert codecs.encode(data, 'translit/long') == u'GBP :-( woof meaaw'\n\ndef test_translit_short():\n assert codecs.encode(data, 'translit/short') == u'GBP :-( woof meaw'\n\ndef test_translit_one():\n assert codecs.encode(data, 'translit/one') == u'\\u00a3 \\u2639 woof meaw'\n\ndef test_translit_long_ascii():\n data.encode('translit/long/ascii') == b'GBP :-( woof meaaw'\n\ndef test_translit_short_ascii():\n data.encode('translit/short/ascii') == b'GBP :-( woof meaw'\n\ndef test_translit_one_ascii():\n try:\n codecs.encode(data, 'translit/one/ascii')\n assert False\n except UnicodeEncodeError:\n assert True\n\n assert codecs.encode(data, 'translit/one/ascii', 'replace') == b'? ? woof meaw'\n\ndef test_ascii_level_characters_remain():\n assert codecs.encode(u\"'\", 'translit/long') == u\"'\"\n\ndef test_zero_width_space():\n try:\n char = codecs.encode(u'\\u200b', 'translit/long')\n assert char == u''\n except TypeError:\n assert False\n",
"step-ids": [
6,
8,
9,
10,
12
]
}
|
[
6,
8,
9,
10,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class HoursForm(FlaskForm):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
csrf = False
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class HoursForm(FlaskForm):
date = StringField('Date')
begins = DecimalField('Begins')
ends = DecimalField('Ends')
class Meta:
csrf = False
<|reserved_special_token_1|>
from flask_wtf import FlaskForm
from wtforms import StringField, DateField, DecimalField
class HoursForm(FlaskForm):
date = StringField('Date')
begins = DecimalField('Begins')
ends = DecimalField('Ends')
class Meta:
csrf = False
<|reserved_special_token_1|>
from flask_wtf import FlaskForm
from wtforms import StringField, DateField, DecimalField
class HoursForm(FlaskForm):
date = StringField("Date")
begins = DecimalField("Begins")
ends = DecimalField("Ends")
class Meta:
csrf = False
|
flexible
|
{
"blob_id": "b1a808e76008edec02d37ec596461e3a00a1d349",
"index": 4553,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass HoursForm(FlaskForm):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n csrf = False\n",
"step-3": "<mask token>\n\n\nclass HoursForm(FlaskForm):\n date = StringField('Date')\n begins = DecimalField('Begins')\n ends = DecimalField('Ends')\n\n\n class Meta:\n csrf = False\n",
"step-4": "from flask_wtf import FlaskForm\nfrom wtforms import StringField, DateField, DecimalField\n\n\nclass HoursForm(FlaskForm):\n date = StringField('Date')\n begins = DecimalField('Begins')\n ends = DecimalField('Ends')\n\n\n class Meta:\n csrf = False\n",
"step-5": "from flask_wtf import FlaskForm\nfrom wtforms import StringField, DateField, DecimalField\n\nclass HoursForm(FlaskForm):\n date = StringField(\"Date\")\n begins = DecimalField(\"Begins\")\n ends = DecimalField(\"Ends\")\n \n class Meta:\n csrf = False\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
from redis import Redis
try:
if os.environ.get('DEBUG'):
import settings_local as settings
else:
import settings_prod as settings
except ImportError:
import settings
redis_env = os.environ.get('REDISTOGO_URL')
if redis_env:
redis = Redis.from_url(redis_env)
elif getattr(settings, 'REDIS_URL', None):
redis = Redis.from_url(settings.REDIS_URL)
else:
redis = Redis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=
settings.REDIS_DB, password=settings.REDIS_PASS)
|
normal
|
{
"blob_id": "4c3a27bf1f7e617f4b85dc2b59efa184751b69ac",
"index": 3868,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n if os.environ.get('DEBUG'):\n import settings_local as settings\n else:\n import settings_prod as settings\nexcept ImportError:\n import settings\n<mask token>\nif redis_env:\n redis = Redis.from_url(redis_env)\nelif getattr(settings, 'REDIS_URL', None):\n redis = Redis.from_url(settings.REDIS_URL)\nelse:\n redis = Redis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=\n settings.REDIS_DB, password=settings.REDIS_PASS)\n",
"step-3": "<mask token>\ntry:\n if os.environ.get('DEBUG'):\n import settings_local as settings\n else:\n import settings_prod as settings\nexcept ImportError:\n import settings\nredis_env = os.environ.get('REDISTOGO_URL')\nif redis_env:\n redis = Redis.from_url(redis_env)\nelif getattr(settings, 'REDIS_URL', None):\n redis = Redis.from_url(settings.REDIS_URL)\nelse:\n redis = Redis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=\n settings.REDIS_DB, password=settings.REDIS_PASS)\n",
"step-4": "import os\nfrom redis import Redis\ntry:\n if os.environ.get('DEBUG'):\n import settings_local as settings\n else:\n import settings_prod as settings\nexcept ImportError:\n import settings\nredis_env = os.environ.get('REDISTOGO_URL')\nif redis_env:\n redis = Redis.from_url(redis_env)\nelif getattr(settings, 'REDIS_URL', None):\n redis = Redis.from_url(settings.REDIS_URL)\nelse:\n redis = Redis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=\n settings.REDIS_DB, password=settings.REDIS_PASS)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Utilizator(AbstractUser):
""" Tabel info utilizator
nume - extras automat din email ([nume]@gmail.com)
email - se va loga cu emailul
parola - ***
descriere - informatiile despre utilizator scrise de acesta pentru celilati potential colegi de apartament
ocupatie - tipul jobului
sex - mf
varsta -
buget -
imagine_profil - imagine profil
cont_admin - are access la backend, administratorul poate gestiona utilizatorii si anunturile
cont_premium: regular: cont gratis poate avea activ doar un anunt,
premium: cont platit poate avea activ unul sau mai multe anunturi,
poate vedea statistici cu privire la anunturile postate
primeste prin email atunci cand un anunt a fost postat
Un utilizator poate avea unul sau mai multe anunturi postate si/sau unul sau mai multe anunturi salvate la favorite
"""
email = models.EmailField(unique=True)
descriere = models.CharField(max_length=255, blank=True)
ocupatie = models.CharField(max_length=50, blank=True, default=
'nespecificat')
nume = models.CharField(max_length=50, blank=True)
sex = models.CharField(max_length=1, blank=True, default='N')
varsta = models.PositiveIntegerField(blank=True, null=True)
buget = models.PositiveIntegerField(blank=False, null=True)
telefon = models.CharField(max_length=20, blank=True, default=
'nespecificat')
imagine_profil = models.ImageField(blank=True, upload_to='utilizatori/',
default='utilizatori/imagine_profil.svg')
cont_premium = models.BooleanField(default=False)
token = models.CharField(max_length=1, blank=True)
first_name = None
last_name = None
username = None
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = UserManager()
def __str__(self):
return f'{self.email}'
class Meta:
verbose_name_plural = 'Utilizatori'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserManager(BaseUserManager):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Utilizator(AbstractUser):
""" Tabel info utilizator
nume - extras automat din email ([nume]@gmail.com)
email - se va loga cu emailul
parola - ***
descriere - informatiile despre utilizator scrise de acesta pentru celilati potential colegi de apartament
ocupatie - tipul jobului
sex - mf
varsta -
buget -
imagine_profil - imagine profil
cont_admin - are access la backend, administratorul poate gestiona utilizatorii si anunturile
cont_premium: regular: cont gratis poate avea activ doar un anunt,
premium: cont platit poate avea activ unul sau mai multe anunturi,
poate vedea statistici cu privire la anunturile postate
primeste prin email atunci cand un anunt a fost postat
Un utilizator poate avea unul sau mai multe anunturi postate si/sau unul sau mai multe anunturi salvate la favorite
"""
email = models.EmailField(unique=True)
descriere = models.CharField(max_length=255, blank=True)
ocupatie = models.CharField(max_length=50, blank=True, default=
'nespecificat')
nume = models.CharField(max_length=50, blank=True)
sex = models.CharField(max_length=1, blank=True, default='N')
varsta = models.PositiveIntegerField(blank=True, null=True)
buget = models.PositiveIntegerField(blank=False, null=True)
telefon = models.CharField(max_length=20, blank=True, default=
'nespecificat')
imagine_profil = models.ImageField(blank=True, upload_to='utilizatori/',
default='utilizatori/imagine_profil.svg')
cont_premium = models.BooleanField(default=False)
token = models.CharField(max_length=1, blank=True)
first_name = None
last_name = None
username = None
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = UserManager()
def __str__(self):
return f'{self.email}'
class Meta:
verbose_name_plural = 'Utilizatori'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserManager(BaseUserManager):
def create_user(self, email, password, **kwargs):
user = self.model(email=email, **kwargs)
user.set_password(password)
user.save()
return user
<|reserved_special_token_0|>
class Utilizator(AbstractUser):
""" Tabel info utilizator
nume - extras automat din email ([nume]@gmail.com)
email - se va loga cu emailul
parola - ***
descriere - informatiile despre utilizator scrise de acesta pentru celilati potential colegi de apartament
ocupatie - tipul jobului
sex - mf
varsta -
buget -
imagine_profil - imagine profil
cont_admin - are access la backend, administratorul poate gestiona utilizatorii si anunturile
cont_premium: regular: cont gratis poate avea activ doar un anunt,
premium: cont platit poate avea activ unul sau mai multe anunturi,
poate vedea statistici cu privire la anunturile postate
primeste prin email atunci cand un anunt a fost postat
Un utilizator poate avea unul sau mai multe anunturi postate si/sau unul sau mai multe anunturi salvate la favorite
"""
email = models.EmailField(unique=True)
descriere = models.CharField(max_length=255, blank=True)
ocupatie = models.CharField(max_length=50, blank=True, default=
'nespecificat')
nume = models.CharField(max_length=50, blank=True)
sex = models.CharField(max_length=1, blank=True, default='N')
varsta = models.PositiveIntegerField(blank=True, null=True)
buget = models.PositiveIntegerField(blank=False, null=True)
telefon = models.CharField(max_length=20, blank=True, default=
'nespecificat')
imagine_profil = models.ImageField(blank=True, upload_to='utilizatori/',
default='utilizatori/imagine_profil.svg')
cont_premium = models.BooleanField(default=False)
token = models.CharField(max_length=1, blank=True)
first_name = None
last_name = None
username = None
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = UserManager()
def __str__(self):
return f'{self.email}'
class Meta:
verbose_name_plural = 'Utilizatori'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserManager(BaseUserManager):
def create_user(self, email, password, **kwargs):
user = self.model(email=email, **kwargs)
user.set_password(password)
user.save()
return user
def create_superuser(self, email, password, **kwargs):
user = self.model(email=email, is_staff=True, is_superuser=True, **
kwargs)
user.set_password(password)
user.save()
return user
class Utilizator(AbstractUser):
""" Tabel info utilizator
nume - extras automat din email ([nume]@gmail.com)
email - se va loga cu emailul
parola - ***
descriere - informatiile despre utilizator scrise de acesta pentru celilati potential colegi de apartament
ocupatie - tipul jobului
sex - mf
varsta -
buget -
imagine_profil - imagine profil
cont_admin - are access la backend, administratorul poate gestiona utilizatorii si anunturile
cont_premium: regular: cont gratis poate avea activ doar un anunt,
premium: cont platit poate avea activ unul sau mai multe anunturi,
poate vedea statistici cu privire la anunturile postate
primeste prin email atunci cand un anunt a fost postat
Un utilizator poate avea unul sau mai multe anunturi postate si/sau unul sau mai multe anunturi salvate la favorite
"""
email = models.EmailField(unique=True)
descriere = models.CharField(max_length=255, blank=True)
ocupatie = models.CharField(max_length=50, blank=True, default=
'nespecificat')
nume = models.CharField(max_length=50, blank=True)
sex = models.CharField(max_length=1, blank=True, default='N')
varsta = models.PositiveIntegerField(blank=True, null=True)
buget = models.PositiveIntegerField(blank=False, null=True)
telefon = models.CharField(max_length=20, blank=True, default=
'nespecificat')
imagine_profil = models.ImageField(blank=True, upload_to='utilizatori/',
default='utilizatori/imagine_profil.svg')
cont_premium = models.BooleanField(default=False)
token = models.CharField(max_length=1, blank=True)
first_name = None
last_name = None
username = None
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = UserManager()
def __str__(self):
return f'{self.email}'
class Meta:
verbose_name_plural = 'Utilizatori'
<|reserved_special_token_1|>
from django.db import models
from django.contrib.auth.models import AbstractUser, BaseUserManager
class UserManager(BaseUserManager):
#Necesar pentru a scoate username de la required
def create_user(self, email, password, **kwargs):
user = self.model(email=email, **kwargs)
user.set_password(password)
user.save()
return user
def create_superuser(self, email, password, **kwargs):
user = self.model(email=email, is_staff=True, is_superuser=True, **kwargs)
user.set_password(password)
user.save()
return user
class Utilizator(AbstractUser):
""" Tabel info utilizator
nume - extras automat din email ([nume]@gmail.com)
email - se va loga cu emailul
parola - ***
descriere - informatiile despre utilizator scrise de acesta pentru celilati potential colegi de apartament
ocupatie - tipul jobului
sex - mf
varsta -
buget -
imagine_profil - imagine profil
cont_admin - are access la backend, administratorul poate gestiona utilizatorii si anunturile
cont_premium: regular: cont gratis poate avea activ doar un anunt,
premium: cont platit poate avea activ unul sau mai multe anunturi,
poate vedea statistici cu privire la anunturile postate
primeste prin email atunci cand un anunt a fost postat
Un utilizator poate avea unul sau mai multe anunturi postate si/sau unul sau mai multe anunturi salvate la favorite
"""
email = models.EmailField(unique=True)
descriere = models.CharField(max_length=255, blank=True)
ocupatie = models.CharField(max_length=50, blank=True, default="nespecificat")
nume = models.CharField(max_length=50, blank=True)
sex = models.CharField(max_length=1, blank=True, default="N")
varsta = models.PositiveIntegerField(blank=True, null=True)
buget = models.PositiveIntegerField(blank=False, null=True)
telefon = models.CharField(max_length=20, blank=True, default="nespecificat")
imagine_profil = models.ImageField(blank=True, upload_to="utilizatori/", default="utilizatori/imagine_profil.svg")
cont_premium = models.BooleanField(default=False)
token = models.CharField(max_length=1, blank=True)
#Scoatem field/coloanele
first_name = None
last_name = None
#Necesare pentru a inlocui username cu email
username = None
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = UserManager()
def __str__(self):
return f"{self.email}"
class Meta:
verbose_name_plural = "Utilizatori"
|
flexible
|
{
"blob_id": "85b8ffe1bca879acd86251e4662b33648b713588",
"index": 7243,
"step-1": "<mask token>\n\n\nclass Utilizator(AbstractUser):\n \"\"\" Tabel info utilizator \n nume - extras automat din email ([nume]@gmail.com)\n email - se va loga cu emailul\n parola - *** \n descriere - informatiile despre utilizator scrise de acesta pentru celilati potential colegi de apartament\n ocupatie - tipul jobului\n sex - mf\n varsta - \n buget - \n imagine_profil - imagine profil\n cont_admin - are access la backend, administratorul poate gestiona utilizatorii si anunturile\n cont_premium: regular: cont gratis poate avea activ doar un anunt, \n premium: cont platit poate avea activ unul sau mai multe anunturi, \n poate vedea statistici cu privire la anunturile postate\n primeste prin email atunci cand un anunt a fost postat\n Un utilizator poate avea unul sau mai multe anunturi postate si/sau unul sau mai multe anunturi salvate la favorite\n \"\"\"\n email = models.EmailField(unique=True)\n descriere = models.CharField(max_length=255, blank=True)\n ocupatie = models.CharField(max_length=50, blank=True, default=\n 'nespecificat')\n nume = models.CharField(max_length=50, blank=True)\n sex = models.CharField(max_length=1, blank=True, default='N')\n varsta = models.PositiveIntegerField(blank=True, null=True)\n buget = models.PositiveIntegerField(blank=False, null=True)\n telefon = models.CharField(max_length=20, blank=True, default=\n 'nespecificat')\n imagine_profil = models.ImageField(blank=True, upload_to='utilizatori/',\n default='utilizatori/imagine_profil.svg')\n cont_premium = models.BooleanField(default=False)\n token = models.CharField(max_length=1, blank=True)\n first_name = None\n last_name = None\n username = None\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = []\n objects = UserManager()\n\n def __str__(self):\n return f'{self.email}'\n\n\n class Meta:\n verbose_name_plural = 'Utilizatori'\n",
"step-2": "<mask token>\n\n\nclass UserManager(BaseUserManager):\n <mask token>\n <mask token>\n\n\nclass Utilizator(AbstractUser):\n \"\"\" Tabel info utilizator \n nume - extras automat din email ([nume]@gmail.com)\n email - se va loga cu emailul\n parola - *** \n descriere - informatiile despre utilizator scrise de acesta pentru celilati potential colegi de apartament\n ocupatie - tipul jobului\n sex - mf\n varsta - \n buget - \n imagine_profil - imagine profil\n cont_admin - are access la backend, administratorul poate gestiona utilizatorii si anunturile\n cont_premium: regular: cont gratis poate avea activ doar un anunt, \n premium: cont platit poate avea activ unul sau mai multe anunturi, \n poate vedea statistici cu privire la anunturile postate\n primeste prin email atunci cand un anunt a fost postat\n Un utilizator poate avea unul sau mai multe anunturi postate si/sau unul sau mai multe anunturi salvate la favorite\n \"\"\"\n email = models.EmailField(unique=True)\n descriere = models.CharField(max_length=255, blank=True)\n ocupatie = models.CharField(max_length=50, blank=True, default=\n 'nespecificat')\n nume = models.CharField(max_length=50, blank=True)\n sex = models.CharField(max_length=1, blank=True, default='N')\n varsta = models.PositiveIntegerField(blank=True, null=True)\n buget = models.PositiveIntegerField(blank=False, null=True)\n telefon = models.CharField(max_length=20, blank=True, default=\n 'nespecificat')\n imagine_profil = models.ImageField(blank=True, upload_to='utilizatori/',\n default='utilizatori/imagine_profil.svg')\n cont_premium = models.BooleanField(default=False)\n token = models.CharField(max_length=1, blank=True)\n first_name = None\n last_name = None\n username = None\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = []\n objects = UserManager()\n\n def __str__(self):\n return f'{self.email}'\n\n\n class Meta:\n verbose_name_plural = 'Utilizatori'\n",
"step-3": "<mask token>\n\n\nclass UserManager(BaseUserManager):\n\n def create_user(self, email, password, **kwargs):\n user = self.model(email=email, **kwargs)\n user.set_password(password)\n user.save()\n return user\n <mask token>\n\n\nclass Utilizator(AbstractUser):\n \"\"\" Tabel info utilizator \n nume - extras automat din email ([nume]@gmail.com)\n email - se va loga cu emailul\n parola - *** \n descriere - informatiile despre utilizator scrise de acesta pentru celilati potential colegi de apartament\n ocupatie - tipul jobului\n sex - mf\n varsta - \n buget - \n imagine_profil - imagine profil\n cont_admin - are access la backend, administratorul poate gestiona utilizatorii si anunturile\n cont_premium: regular: cont gratis poate avea activ doar un anunt, \n premium: cont platit poate avea activ unul sau mai multe anunturi, \n poate vedea statistici cu privire la anunturile postate\n primeste prin email atunci cand un anunt a fost postat\n Un utilizator poate avea unul sau mai multe anunturi postate si/sau unul sau mai multe anunturi salvate la favorite\n \"\"\"\n email = models.EmailField(unique=True)\n descriere = models.CharField(max_length=255, blank=True)\n ocupatie = models.CharField(max_length=50, blank=True, default=\n 'nespecificat')\n nume = models.CharField(max_length=50, blank=True)\n sex = models.CharField(max_length=1, blank=True, default='N')\n varsta = models.PositiveIntegerField(blank=True, null=True)\n buget = models.PositiveIntegerField(blank=False, null=True)\n telefon = models.CharField(max_length=20, blank=True, default=\n 'nespecificat')\n imagine_profil = models.ImageField(blank=True, upload_to='utilizatori/',\n default='utilizatori/imagine_profil.svg')\n cont_premium = models.BooleanField(default=False)\n token = models.CharField(max_length=1, blank=True)\n first_name = None\n last_name = None\n username = None\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = []\n objects = UserManager()\n\n def __str__(self):\n return f'{self.email}'\n\n\n class Meta:\n verbose_name_plural = 'Utilizatori'\n",
"step-4": "<mask token>\n\n\nclass UserManager(BaseUserManager):\n\n def create_user(self, email, password, **kwargs):\n user = self.model(email=email, **kwargs)\n user.set_password(password)\n user.save()\n return user\n\n def create_superuser(self, email, password, **kwargs):\n user = self.model(email=email, is_staff=True, is_superuser=True, **\n kwargs)\n user.set_password(password)\n user.save()\n return user\n\n\nclass Utilizator(AbstractUser):\n \"\"\" Tabel info utilizator \n nume - extras automat din email ([nume]@gmail.com)\n email - se va loga cu emailul\n parola - *** \n descriere - informatiile despre utilizator scrise de acesta pentru celilati potential colegi de apartament\n ocupatie - tipul jobului\n sex - mf\n varsta - \n buget - \n imagine_profil - imagine profil\n cont_admin - are access la backend, administratorul poate gestiona utilizatorii si anunturile\n cont_premium: regular: cont gratis poate avea activ doar un anunt, \n premium: cont platit poate avea activ unul sau mai multe anunturi, \n poate vedea statistici cu privire la anunturile postate\n primeste prin email atunci cand un anunt a fost postat\n Un utilizator poate avea unul sau mai multe anunturi postate si/sau unul sau mai multe anunturi salvate la favorite\n \"\"\"\n email = models.EmailField(unique=True)\n descriere = models.CharField(max_length=255, blank=True)\n ocupatie = models.CharField(max_length=50, blank=True, default=\n 'nespecificat')\n nume = models.CharField(max_length=50, blank=True)\n sex = models.CharField(max_length=1, blank=True, default='N')\n varsta = models.PositiveIntegerField(blank=True, null=True)\n buget = models.PositiveIntegerField(blank=False, null=True)\n telefon = models.CharField(max_length=20, blank=True, default=\n 'nespecificat')\n imagine_profil = models.ImageField(blank=True, upload_to='utilizatori/',\n default='utilizatori/imagine_profil.svg')\n cont_premium = models.BooleanField(default=False)\n token = models.CharField(max_length=1, blank=True)\n first_name = None\n last_name = None\n username = None\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = []\n objects = UserManager()\n\n def __str__(self):\n return f'{self.email}'\n\n\n class Meta:\n verbose_name_plural = 'Utilizatori'\n",
"step-5": "from django.db import models\n\nfrom django.contrib.auth.models import AbstractUser, BaseUserManager\n\n\nclass UserManager(BaseUserManager):\n #Necesar pentru a scoate username de la required\n\n def create_user(self, email, password, **kwargs):\n user = self.model(email=email, **kwargs)\n user.set_password(password)\n user.save()\n return user\n\n def create_superuser(self, email, password, **kwargs):\n user = self.model(email=email, is_staff=True, is_superuser=True, **kwargs)\n user.set_password(password)\n user.save()\n return user\n\n\nclass Utilizator(AbstractUser):\n \"\"\" Tabel info utilizator \n nume - extras automat din email ([nume]@gmail.com)\n email - se va loga cu emailul\n parola - *** \n descriere - informatiile despre utilizator scrise de acesta pentru celilati potential colegi de apartament\n ocupatie - tipul jobului\n sex - mf\n varsta - \n buget - \n imagine_profil - imagine profil\n cont_admin - are access la backend, administratorul poate gestiona utilizatorii si anunturile\n cont_premium: regular: cont gratis poate avea activ doar un anunt, \n premium: cont platit poate avea activ unul sau mai multe anunturi, \n poate vedea statistici cu privire la anunturile postate\n primeste prin email atunci cand un anunt a fost postat\n Un utilizator poate avea unul sau mai multe anunturi postate si/sau unul sau mai multe anunturi salvate la favorite\n \"\"\"\n email = models.EmailField(unique=True)\n descriere = models.CharField(max_length=255, blank=True)\n ocupatie = models.CharField(max_length=50, blank=True, default=\"nespecificat\")\n nume = models.CharField(max_length=50, blank=True)\n sex = models.CharField(max_length=1, blank=True, default=\"N\")\n varsta = models.PositiveIntegerField(blank=True, null=True)\n buget = models.PositiveIntegerField(blank=False, null=True)\n telefon = models.CharField(max_length=20, blank=True, default=\"nespecificat\")\n imagine_profil = models.ImageField(blank=True, upload_to=\"utilizatori/\", default=\"utilizatori/imagine_profil.svg\")\n cont_premium = models.BooleanField(default=False)\n \n token = models.CharField(max_length=1, blank=True)\n \n #Scoatem field/coloanele \n first_name = None\n last_name = None\n\n #Necesare pentru a inlocui username cu email\n username = None\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = []\n objects = UserManager()\n \n def __str__(self):\n return f\"{self.email}\"\n\n class Meta:\n verbose_name_plural = \"Utilizatori\"\n\n ",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/4/11 16:07
# @Author : LiuZhi
# @Site :
# @File : Function.py
# @Software: PyCharm
#求绝对值的函数
print(abs(100))
print(abs(-20))
print(abs(12.34))
#print(abs(1,2))
#print(abs('q'))
print(max(1,2))
print(max(1,2,3,-5))
print(int('123'))
print(int(12.34))
print(float('12.34'))
print(str(1.23))
print(str(100))
print(bool(1))
print(bool(''))
a = abs
print(a(-1))
n1 = 255
n2 = 1000
print(hex(255))
print(hex(1000))
from abstest import my_abs
print(my_abs(-2))
#print(my_abs(-2,3))
#print(my_abs('222'))
'''
pass用法
def nop():
pass
age = 26
if age >= 18:
pass
'''
import math
def move(x,y, step, angle = 0):
nx = x + step * math.cos(angle)
ny = y - step * math.sin(angle)
return nx,ny
x, y = move(100, 100, 60, math.pi/6)
print(x,y)
r = move(100, 100, 60, math.pi/6)
print(r)
#求一元二次方程的根
def quadratic(a, b, c):
if not isinstance(a, (int, float)):
raise TypeError('a is not a number')
if not isinstance(b, (int, float)):
raise TypeError('a is not a number')
if not isinstance(c, (int, float)):
raise TypeError('a is not a number')
d = b*b - 4*a*c
if a == 0:
if b == 0:
if c == 0:
return '方程根为全体实数'
else:
return '方程无根'
else:
x1 = -c/b
x2 = x1
return x1, x2
else:
if d<0:
return '方程无根'
else:
x1 = (-b + math.sqrt(d))/2/a
x2 = (-b - math.sqrt(d))/2/a
return x1,x2
print(quadratic(2, 3, 1))
print(quadratic(0,0,0))
def power(x):
return x*x
print(power(4))
print(power(-2))
#默认参数
def powerThree(x, n=2):
s = 1
while n >0 :
n = n - 1
s = s * x
return s
print(powerThree(5,3))
print(powerThree(5))
def enroll(name, gender, age = 6, city = 'Beijing'):
print('name:', name)
print('gender', gender)
print('age', age)
print('city', city)
print(enroll('Sarah', 'F'))
def add_end(l=None):
if l is None:
l = []
l.append('END')
return l
print(add_end([1, 2, 3]))
print(add_end())
print(add_end())
print(add_end())
#可变参数
def calc(numbers):
sum = 0
for n in numbers:
sum = sum + n * n
return sum
print(calc([1, 2, 3]))
print(calc((1, 2, 3)))
def calcTwo(*numbers):
sum = 0
for n in numbers:
sum = sum + n * n
return sum
print(calcTwo(1,2))
print(calcTwo())
numbers = [1, 2, 3]
print(calcTwo(numbers[0], numbers[1], numbers[2]))
print(calcTwo(*numbers))
#关键字参数
def person(name, age, **kw):
print('name:', name, 'age:', age, 'other:', kw)
person('michael', 30)
person('michael', 30, city='Beijing')
person('michael', 30, gender='m', job='engineer')
extra = {'city':'Beijing', 'job': 'engineer'}
person('Jack', 24, city = extra['city'], job = extra['job'])
person('Jack', 24, **extra)
def person(name, age, **kw):
if 'city' in kw:
pass
if 'job' in kw:
pass
print('name:', name, 'age:', age, 'other:', kw)
person('jack', 24, city='beijing', addr = 'chaoyang', zipcode=123456)
def personTwo(name, age, *, city, job):
print(name, age, city, job)
personTwo('jack', 24, city='beijing', job='engineer')
def personThree(name, age, *args, city, job):
print(name, age, args, city, job)
#personThree('jack', 24, 'beijing', 'engineer')
def personFour(name, age, *, city='beijing', job):
print(name, age, city, job)
personFour('jack', 24, job = 'engineer')
def personFive(name, age, city, job):
pass
def f1(a, b, c=0, *args, **kw):
print('a=', a, 'b=', b, 'c=', c, 'args=', args, 'kw=', kw)
def f2(a, b, c=0, *, d, **kw):
print('a=', a, 'b=', b, 'c=', c, 'd=', d, 'kw=', kw)
f1(1, 2)
f1(1, 2, 3)
f1(1, 2, 3, 'a', 'b')
f1(1, 2, 3, 'a', 'b', x=99)
f2(1, 2, d=99, ext=None)
args = (1,2, 3, 4)
kw = {'d':99,'x':'#'}
f1(*args, **kw)
args = (1,2, 3)
kw = {'d':88, 'x':'#'}
f2(*args, **kw)
|
normal
|
{
"blob_id": "8a6eb2eb746e3b9de92998b70ddff2a39cb1f269",
"index": 6374,
"step-1": "<mask token>\n\n\ndef move(x, y, step, angle=0):\n nx = x + step * math.cos(angle)\n ny = y - step * math.sin(angle)\n return nx, ny\n\n\n<mask token>\n\n\ndef enroll(name, gender, age=6, city='Beijing'):\n print('name:', name)\n print('gender', gender)\n print('age', age)\n print('city', city)\n\n\n<mask token>\n\n\ndef add_end(l=None):\n if l is None:\n l = []\n l.append('END')\n return l\n\n\n<mask token>\n\n\ndef calcTwo(*numbers):\n sum = 0\n for n in numbers:\n sum = sum + n * n\n return sum\n\n\n<mask token>\n\n\ndef personThree(name, age, *args, city, job):\n print(name, age, args, city, job)\n\n\ndef personFour(name, age, *, city='beijing', job):\n print(name, age, city, job)\n\n\n<mask token>\n\n\ndef personFive(name, age, city, job):\n pass\n\n\ndef f1(a, b, c=0, *args, **kw):\n print('a=', a, 'b=', b, 'c=', c, 'args=', args, 'kw=', kw)\n\n\ndef f2(a, b, c=0, *, d, **kw):\n print('a=', a, 'b=', b, 'c=', c, 'd=', d, 'kw=', kw)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef move(x, y, step, angle=0):\n nx = x + step * math.cos(angle)\n ny = y - step * math.sin(angle)\n return nx, ny\n\n\n<mask token>\n\n\ndef quadratic(a, b, c):\n if not isinstance(a, (int, float)):\n raise TypeError('a is not a number')\n if not isinstance(b, (int, float)):\n raise TypeError('a is not a number')\n if not isinstance(c, (int, float)):\n raise TypeError('a is not a number')\n d = b * b - 4 * a * c\n if a == 0:\n if b == 0:\n if c == 0:\n return '方程根为全体实数'\n else:\n return '方程无根'\n else:\n x1 = -c / b\n x2 = x1\n return x1, x2\n elif d < 0:\n return '方程无根'\n else:\n x1 = (-b + math.sqrt(d)) / 2 / a\n x2 = (-b - math.sqrt(d)) / 2 / a\n return x1, x2\n\n\n<mask token>\n\n\ndef power(x):\n return x * x\n\n\n<mask token>\n\n\ndef powerThree(x, n=2):\n s = 1\n while n > 0:\n n = n - 1\n s = s * x\n return s\n\n\n<mask token>\n\n\ndef enroll(name, gender, age=6, city='Beijing'):\n print('name:', name)\n print('gender', gender)\n print('age', age)\n print('city', city)\n\n\n<mask token>\n\n\ndef add_end(l=None):\n if l is None:\n l = []\n l.append('END')\n return l\n\n\n<mask token>\n\n\ndef calcTwo(*numbers):\n sum = 0\n for n in numbers:\n sum = sum + n * n\n return sum\n\n\n<mask token>\n\n\ndef person(name, age, **kw):\n print('name:', name, 'age:', age, 'other:', kw)\n\n\n<mask token>\n\n\ndef person(name, age, **kw):\n if 'city' in kw:\n pass\n if 'job' in kw:\n pass\n print('name:', name, 'age:', age, 'other:', kw)\n\n\n<mask token>\n\n\ndef personThree(name, age, *args, city, job):\n print(name, age, args, city, job)\n\n\ndef personFour(name, age, *, city='beijing', job):\n print(name, age, city, job)\n\n\n<mask token>\n\n\ndef personFive(name, age, city, job):\n pass\n\n\ndef f1(a, b, c=0, *args, **kw):\n print('a=', a, 'b=', b, 'c=', c, 'args=', args, 'kw=', kw)\n\n\ndef f2(a, b, c=0, *, d, **kw):\n print('a=', a, 'b=', b, 'c=', c, 'd=', d, 'kw=', kw)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef move(x, y, step, angle=0):\n nx = x + step * math.cos(angle)\n ny = y - step * math.sin(angle)\n return nx, ny\n\n\n<mask token>\n\n\ndef quadratic(a, b, c):\n if not isinstance(a, (int, float)):\n raise TypeError('a is not a number')\n if not isinstance(b, (int, float)):\n raise TypeError('a is not a number')\n if not isinstance(c, (int, float)):\n raise TypeError('a is not a number')\n d = b * b - 4 * a * c\n if a == 0:\n if b == 0:\n if c == 0:\n return '方程根为全体实数'\n else:\n return '方程无根'\n else:\n x1 = -c / b\n x2 = x1\n return x1, x2\n elif d < 0:\n return '方程无根'\n else:\n x1 = (-b + math.sqrt(d)) / 2 / a\n x2 = (-b - math.sqrt(d)) / 2 / a\n return x1, x2\n\n\n<mask token>\n\n\ndef power(x):\n return x * x\n\n\n<mask token>\n\n\ndef powerThree(x, n=2):\n s = 1\n while n > 0:\n n = n - 1\n s = s * x\n return s\n\n\n<mask token>\n\n\ndef enroll(name, gender, age=6, city='Beijing'):\n print('name:', name)\n print('gender', gender)\n print('age', age)\n print('city', city)\n\n\n<mask token>\n\n\ndef add_end(l=None):\n if l is None:\n l = []\n l.append('END')\n return l\n\n\n<mask token>\n\n\ndef calc(numbers):\n sum = 0\n for n in numbers:\n sum = sum + n * n\n return sum\n\n\n<mask token>\n\n\ndef calcTwo(*numbers):\n sum = 0\n for n in numbers:\n sum = sum + n * n\n return sum\n\n\n<mask token>\n\n\ndef person(name, age, **kw):\n print('name:', name, 'age:', age, 'other:', kw)\n\n\n<mask token>\n\n\ndef person(name, age, **kw):\n if 'city' in kw:\n pass\n if 'job' in kw:\n pass\n print('name:', name, 'age:', age, 'other:', kw)\n\n\n<mask token>\n\n\ndef personThree(name, age, *args, city, job):\n print(name, age, args, city, job)\n\n\ndef personFour(name, age, *, city='beijing', job):\n print(name, age, city, job)\n\n\n<mask token>\n\n\ndef personFive(name, age, city, job):\n pass\n\n\ndef f1(a, b, c=0, *args, **kw):\n print('a=', a, 'b=', b, 'c=', c, 'args=', args, 'kw=', kw)\n\n\ndef f2(a, b, c=0, *, d, **kw):\n print('a=', a, 'b=', b, 'c=', c, 'd=', d, 'kw=', kw)\n\n\n<mask token>\n",
"step-4": "print(abs(100))\nprint(abs(-20))\nprint(abs(12.34))\nprint(max(1, 2))\nprint(max(1, 2, 3, -5))\nprint(int('123'))\nprint(int(12.34))\nprint(float('12.34'))\nprint(str(1.23))\nprint(str(100))\nprint(bool(1))\nprint(bool(''))\na = abs\nprint(a(-1))\nn1 = 255\nn2 = 1000\nprint(hex(255))\nprint(hex(1000))\nfrom abstest import my_abs\nprint(my_abs(-2))\n<mask token>\nimport math\n\n\ndef move(x, y, step, angle=0):\n nx = x + step * math.cos(angle)\n ny = y - step * math.sin(angle)\n return nx, ny\n\n\nx, y = move(100, 100, 60, math.pi / 6)\nprint(x, y)\nr = move(100, 100, 60, math.pi / 6)\nprint(r)\n\n\ndef quadratic(a, b, c):\n if not isinstance(a, (int, float)):\n raise TypeError('a is not a number')\n if not isinstance(b, (int, float)):\n raise TypeError('a is not a number')\n if not isinstance(c, (int, float)):\n raise TypeError('a is not a number')\n d = b * b - 4 * a * c\n if a == 0:\n if b == 0:\n if c == 0:\n return '方程根为全体实数'\n else:\n return '方程无根'\n else:\n x1 = -c / b\n x2 = x1\n return x1, x2\n elif d < 0:\n return '方程无根'\n else:\n x1 = (-b + math.sqrt(d)) / 2 / a\n x2 = (-b - math.sqrt(d)) / 2 / a\n return x1, x2\n\n\nprint(quadratic(2, 3, 1))\nprint(quadratic(0, 0, 0))\n\n\ndef power(x):\n return x * x\n\n\nprint(power(4))\nprint(power(-2))\n\n\ndef powerThree(x, n=2):\n s = 1\n while n > 0:\n n = n - 1\n s = s * x\n return s\n\n\nprint(powerThree(5, 3))\nprint(powerThree(5))\n\n\ndef enroll(name, gender, age=6, city='Beijing'):\n print('name:', name)\n print('gender', gender)\n print('age', age)\n print('city', city)\n\n\nprint(enroll('Sarah', 'F'))\n\n\ndef add_end(l=None):\n if l is None:\n l = []\n l.append('END')\n return l\n\n\nprint(add_end([1, 2, 3]))\nprint(add_end())\nprint(add_end())\nprint(add_end())\n\n\ndef calc(numbers):\n sum = 0\n for n in numbers:\n sum = sum + n * n\n return sum\n\n\nprint(calc([1, 2, 3]))\nprint(calc((1, 2, 3)))\n\n\ndef calcTwo(*numbers):\n sum = 0\n for n in numbers:\n sum = sum + n * n\n return sum\n\n\nprint(calcTwo(1, 2))\nprint(calcTwo())\nnumbers = [1, 2, 3]\nprint(calcTwo(numbers[0], numbers[1], numbers[2]))\nprint(calcTwo(*numbers))\n\n\ndef person(name, age, **kw):\n print('name:', name, 'age:', age, 'other:', kw)\n\n\nperson('michael', 30)\nperson('michael', 30, city='Beijing')\nperson('michael', 30, gender='m', job='engineer')\nextra = {'city': 'Beijing', 'job': 'engineer'}\nperson('Jack', 24, city=extra['city'], job=extra['job'])\nperson('Jack', 24, **extra)\n\n\ndef person(name, age, **kw):\n if 'city' in kw:\n pass\n if 'job' in kw:\n pass\n print('name:', name, 'age:', age, 'other:', kw)\n\n\nperson('jack', 24, city='beijing', addr='chaoyang', zipcode=123456)\n\n\ndef personTwo(name, age, *, city, job):\n print(name, age, city, job)\n\n\npersonTwo('jack', 24, city='beijing', job='engineer')\n\n\ndef personThree(name, age, *args, city, job):\n print(name, age, args, city, job)\n\n\ndef personFour(name, age, *, city='beijing', job):\n print(name, age, city, job)\n\n\npersonFour('jack', 24, job='engineer')\n\n\ndef personFive(name, age, city, job):\n pass\n\n\ndef f1(a, b, c=0, *args, **kw):\n print('a=', a, 'b=', b, 'c=', c, 'args=', args, 'kw=', kw)\n\n\ndef f2(a, b, c=0, *, d, **kw):\n print('a=', a, 'b=', b, 'c=', c, 'd=', d, 'kw=', kw)\n\n\nf1(1, 2)\nf1(1, 2, 3)\nf1(1, 2, 3, 'a', 'b')\nf1(1, 2, 3, 'a', 'b', x=99)\nf2(1, 2, d=99, ext=None)\nargs = 1, 2, 3, 4\nkw = {'d': 99, 'x': '#'}\nf1(*args, **kw)\nargs = 1, 2, 3\nkw = {'d': 88, 'x': '#'}\nf2(*args, **kw)\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/4/11 16:07\n# @Author : LiuZhi\n# @Site : \n# @File : Function.py\n# @Software: PyCharm\n\n#求绝对值的函数\nprint(abs(100))\nprint(abs(-20))\nprint(abs(12.34))\n\n#print(abs(1,2))\n#print(abs('q'))\n\nprint(max(1,2))\nprint(max(1,2,3,-5))\n\nprint(int('123'))\nprint(int(12.34))\nprint(float('12.34'))\nprint(str(1.23))\nprint(str(100))\nprint(bool(1))\nprint(bool(''))\n\na = abs\nprint(a(-1))\n\nn1 = 255\nn2 = 1000\nprint(hex(255))\nprint(hex(1000))\n\n\nfrom abstest import my_abs\nprint(my_abs(-2))\n#print(my_abs(-2,3))\n#print(my_abs('222'))\n'''\npass用法\ndef nop():\n pass\n\nage = 26\nif age >= 18:\n pass\n'''\nimport math\n\ndef move(x,y, step, angle = 0):\n nx = x + step * math.cos(angle)\n ny = y - step * math.sin(angle)\n return nx,ny\nx, y = move(100, 100, 60, math.pi/6)\nprint(x,y)\nr = move(100, 100, 60, math.pi/6)\nprint(r)\n\n#求一元二次方程的根\ndef quadratic(a, b, c):\n if not isinstance(a, (int, float)):\n raise TypeError('a is not a number')\n if not isinstance(b, (int, float)):\n raise TypeError('a is not a number')\n if not isinstance(c, (int, float)):\n raise TypeError('a is not a number')\n d = b*b - 4*a*c\n if a == 0:\n if b == 0:\n if c == 0:\n return '方程根为全体实数'\n else:\n return '方程无根'\n else:\n x1 = -c/b\n x2 = x1\n return x1, x2\n else:\n if d<0:\n return '方程无根'\n else:\n x1 = (-b + math.sqrt(d))/2/a\n x2 = (-b - math.sqrt(d))/2/a\n return x1,x2\nprint(quadratic(2, 3, 1))\nprint(quadratic(0,0,0))\n\ndef power(x):\n return x*x\n\nprint(power(4))\nprint(power(-2))\n\n#默认参数\ndef powerThree(x, n=2):\n s = 1\n while n >0 :\n n = n - 1\n s = s * x\n return s\n\nprint(powerThree(5,3))\nprint(powerThree(5))\n\ndef enroll(name, gender, age = 6, city = 'Beijing'):\n print('name:', name)\n print('gender', gender)\n print('age', age)\n print('city', city)\nprint(enroll('Sarah', 'F'))\n\ndef add_end(l=None):\n if l is None:\n l = []\n l.append('END')\n return l\nprint(add_end([1, 2, 3]))\nprint(add_end())\nprint(add_end())\nprint(add_end())\n\n#可变参数\ndef calc(numbers):\n sum = 0\n for n in numbers:\n sum = sum + n * n\n return sum\n\nprint(calc([1, 2, 3]))\nprint(calc((1, 2, 3)))\n\ndef calcTwo(*numbers):\n sum = 0\n for n in numbers:\n sum = sum + n * n\n return sum\n\nprint(calcTwo(1,2))\nprint(calcTwo())\nnumbers = [1, 2, 3]\nprint(calcTwo(numbers[0], numbers[1], numbers[2]))\nprint(calcTwo(*numbers))\n\n#关键字参数\ndef person(name, age, **kw):\n print('name:', name, 'age:', age, 'other:', kw)\n\nperson('michael', 30)\nperson('michael', 30, city='Beijing')\nperson('michael', 30, gender='m', job='engineer')\nextra = {'city':'Beijing', 'job': 'engineer'}\nperson('Jack', 24, city = extra['city'], job = extra['job'])\nperson('Jack', 24, **extra)\n\ndef person(name, age, **kw):\n if 'city' in kw:\n pass\n if 'job' in kw:\n pass\n print('name:', name, 'age:', age, 'other:', kw)\n\nperson('jack', 24, city='beijing', addr = 'chaoyang', zipcode=123456)\ndef personTwo(name, age, *, city, job):\n print(name, age, city, job)\npersonTwo('jack', 24, city='beijing', job='engineer')\ndef personThree(name, age, *args, city, job):\n print(name, age, args, city, job)\n#personThree('jack', 24, 'beijing', 'engineer')\ndef personFour(name, age, *, city='beijing', job):\n print(name, age, city, job)\npersonFour('jack', 24, job = 'engineer')\ndef personFive(name, age, city, job):\n pass\ndef f1(a, b, c=0, *args, **kw):\n print('a=', a, 'b=', b, 'c=', c, 'args=', args, 'kw=', kw)\ndef f2(a, b, c=0, *, d, **kw):\n print('a=', a, 'b=', b, 'c=', c, 'd=', d, 'kw=', kw)\nf1(1, 2)\nf1(1, 2, 3)\nf1(1, 2, 3, 'a', 'b')\nf1(1, 2, 3, 'a', 'b', x=99)\nf2(1, 2, d=99, ext=None)\n\nargs = (1,2, 3, 4)\nkw = {'d':99,'x':'#'}\nf1(*args, **kw)\n\nargs = (1,2, 3)\nkw = {'d':88, 'x':'#'}\nf2(*args, **kw)\n\n\n",
"step-ids": [
9,
14,
15,
19,
20
]
}
|
[
9,
14,
15,
19,
20
] |
<|reserved_special_token_0|>
class Team(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Team(models.Model):
teamName = models.TextField()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Team(models.Model):
teamName = models.TextField()
return
<|reserved_special_token_1|>
from django.db import models
class Team(models.Model):
teamName = models.TextField()
return
<|reserved_special_token_1|>
# models.py- Team
from django.db import models
class Team(models.Model):
teamName = models.TextField()
#Seasons associated
#Registrants unique
return
|
flexible
|
{
"blob_id": "331b5f0a34db4d12d713439db3d2818e8c922310",
"index": 4236,
"step-1": "<mask token>\n\n\nclass Team(models.Model):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Team(models.Model):\n teamName = models.TextField()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Team(models.Model):\n teamName = models.TextField()\n\n\nreturn\n",
"step-4": "from django.db import models\n\n\nclass Team(models.Model):\n teamName = models.TextField()\n\n\nreturn\n",
"step-5": "# models.py- Team\nfrom django.db import models\n\n\nclass Team(models.Model):\n \n teamName = models.TextField()\n\n #Seasons associated\n #Registrants unique\n\nreturn \n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class GetRootData(Function):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GetRootData(Function):
def __init__(self, data_display):
self.data_display = data_display
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GetRootData(Function):
def __init__(self, data_display):
self.data_display = data_display
def call(self, args):
image_folder_path = args[0]
output_path = args[1]
self.data_display.clear()
data = main.generate_data(image_folder_path, self.data_display.
data_tracker)
error_message = self.data_display.display_data(data)
return ''
<|reserved_special_token_1|>
import src.engine.functions.root_analyzer.main as main
from src.engine.functions.function import Function
class GetRootData(Function):
def __init__(self, data_display):
self.data_display = data_display
def call(self, args):
image_folder_path = args[0]
output_path = args[1]
self.data_display.clear()
data = main.generate_data(image_folder_path, self.data_display.
data_tracker)
error_message = self.data_display.display_data(data)
return ''
<|reserved_special_token_1|>
import src.engine.functions.root_analyzer.main as main
from src.engine.functions.function import Function
class GetRootData(Function):
def __init__(self, data_display):
self.data_display = data_display
def call(self, args):
image_folder_path = args[0]
output_path = args[1]
self.data_display.clear()
data = main.generate_data(image_folder_path, self.data_display.data_tracker)
error_message = self.data_display.display_data(data)
return ""
|
flexible
|
{
"blob_id": "e8ea307352805bf0b5129e2ad7f7b68c44e78fc9",
"index": 9118,
"step-1": "<mask token>\n\n\nclass GetRootData(Function):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass GetRootData(Function):\n\n def __init__(self, data_display):\n self.data_display = data_display\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass GetRootData(Function):\n\n def __init__(self, data_display):\n self.data_display = data_display\n\n def call(self, args):\n image_folder_path = args[0]\n output_path = args[1]\n self.data_display.clear()\n data = main.generate_data(image_folder_path, self.data_display.\n data_tracker)\n error_message = self.data_display.display_data(data)\n return ''\n",
"step-4": "import src.engine.functions.root_analyzer.main as main\nfrom src.engine.functions.function import Function\n\n\nclass GetRootData(Function):\n\n def __init__(self, data_display):\n self.data_display = data_display\n\n def call(self, args):\n image_folder_path = args[0]\n output_path = args[1]\n self.data_display.clear()\n data = main.generate_data(image_folder_path, self.data_display.\n data_tracker)\n error_message = self.data_display.display_data(data)\n return ''\n",
"step-5": "import src.engine.functions.root_analyzer.main as main\nfrom src.engine.functions.function import Function\n\nclass GetRootData(Function):\n\n def __init__(self, data_display):\n self.data_display = data_display\n\n def call(self, args):\n image_folder_path = args[0]\n output_path = args[1]\n self.data_display.clear()\n data = main.generate_data(image_folder_path, self.data_display.data_tracker)\n error_message = self.data_display.display_data(data)\n return \"\"\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django import forms
class CriteriaForm(forms.Form):
query = forms.CharField(widget=forms.Textarea)
|
normal
|
{
"blob_id": "b6529dc77d89cdf2d49c689dc583b78c94e31c4d",
"index": 4716,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass CriteriaForm(forms.Form):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass CriteriaForm(forms.Form):\n query = forms.CharField(widget=forms.Textarea)\n",
"step-4": "from django import forms\n\n\nclass CriteriaForm(forms.Form):\n query = forms.CharField(widget=forms.Textarea)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
from Cython.Build import cythonize
cython = True
except ImportError:
cython = False
if platform == 'darwin':
extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x',
'-stdlib=libc++', '-mmacosx-version-min=10.7']
else:
extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x']
<|reserved_special_token_0|>
if cython:
extensions = [Extension('sent2vec', sources=['sent2vec/sent2vec.pyx',
'sent2vec/cpp/src/args.cc', 'sent2vec/cpp/src/dictionary.cc',
'sent2vec/cpp/src/fasttext.cc', 'sent2vec/cpp/src/main.cc',
'sent2vec/cpp/src/matrix.cc', 'sent2vec/cpp/src/model.cc',
'sent2vec/cpp/src/productquantizer.cc',
'sent2vec/cpp/src/qmatrix.cc', 'sent2vec/cpp/src/utils.cc',
'sent2vec/cpp/src/vector.cc'], language='c++', extra_compile_args=
extra_compile_args)]
extensions = cythonize(extensions)
else:
extensions = [Extension('sent2vec', sources=['sent2vec/sent2vec.cpp',
'sent2vec/cpp/src/args.cc', 'sent2vec/cpp/src/dictionary.cc',
'sent2vec/cpp/src/fasttext.cc', 'sent2vec/cpp/src/main.cc',
'sent2vec/cpp/src/matrix.cc', 'sent2vec/cpp/src/model.cc',
'sent2vec/cpp/src/productquantizer.cc',
'sent2vec/cpp/src/qmatrix.cc', 'sent2vec/cpp/src/utils.cc',
'sent2vec/cpp/src/vector.cc'], language='c++', extra_compile_args=
extra_compile_args)]
setup(name='sent2vec', version='0.1.0', author='', author_email='', url='',
description='A Python interface for sent2vec library', license=
'BSD 3-Clause License', packages=['sent2vec'], ext_modules=extensions,
install_requires=[], classifiers=[])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cython = True
try:
from Cython.Build import cythonize
cython = True
except ImportError:
cython = False
if platform == 'darwin':
extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x',
'-stdlib=libc++', '-mmacosx-version-min=10.7']
else:
extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x']
extensions = []
if cython:
extensions = [Extension('sent2vec', sources=['sent2vec/sent2vec.pyx',
'sent2vec/cpp/src/args.cc', 'sent2vec/cpp/src/dictionary.cc',
'sent2vec/cpp/src/fasttext.cc', 'sent2vec/cpp/src/main.cc',
'sent2vec/cpp/src/matrix.cc', 'sent2vec/cpp/src/model.cc',
'sent2vec/cpp/src/productquantizer.cc',
'sent2vec/cpp/src/qmatrix.cc', 'sent2vec/cpp/src/utils.cc',
'sent2vec/cpp/src/vector.cc'], language='c++', extra_compile_args=
extra_compile_args)]
extensions = cythonize(extensions)
else:
extensions = [Extension('sent2vec', sources=['sent2vec/sent2vec.cpp',
'sent2vec/cpp/src/args.cc', 'sent2vec/cpp/src/dictionary.cc',
'sent2vec/cpp/src/fasttext.cc', 'sent2vec/cpp/src/main.cc',
'sent2vec/cpp/src/matrix.cc', 'sent2vec/cpp/src/model.cc',
'sent2vec/cpp/src/productquantizer.cc',
'sent2vec/cpp/src/qmatrix.cc', 'sent2vec/cpp/src/utils.cc',
'sent2vec/cpp/src/vector.cc'], language='c++', extra_compile_args=
extra_compile_args)]
setup(name='sent2vec', version='0.1.0', author='', author_email='', url='',
description='A Python interface for sent2vec library', license=
'BSD 3-Clause License', packages=['sent2vec'], ext_modules=extensions,
install_requires=[], classifiers=[])
<|reserved_special_token_1|>
from setuptools import setup, find_packages
from setuptools.extension import Extension
from sys import platform
cython = True
try:
from Cython.Build import cythonize
cython = True
except ImportError:
cython = False
if platform == 'darwin':
extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x',
'-stdlib=libc++', '-mmacosx-version-min=10.7']
else:
extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x']
extensions = []
if cython:
extensions = [Extension('sent2vec', sources=['sent2vec/sent2vec.pyx',
'sent2vec/cpp/src/args.cc', 'sent2vec/cpp/src/dictionary.cc',
'sent2vec/cpp/src/fasttext.cc', 'sent2vec/cpp/src/main.cc',
'sent2vec/cpp/src/matrix.cc', 'sent2vec/cpp/src/model.cc',
'sent2vec/cpp/src/productquantizer.cc',
'sent2vec/cpp/src/qmatrix.cc', 'sent2vec/cpp/src/utils.cc',
'sent2vec/cpp/src/vector.cc'], language='c++', extra_compile_args=
extra_compile_args)]
extensions = cythonize(extensions)
else:
extensions = [Extension('sent2vec', sources=['sent2vec/sent2vec.cpp',
'sent2vec/cpp/src/args.cc', 'sent2vec/cpp/src/dictionary.cc',
'sent2vec/cpp/src/fasttext.cc', 'sent2vec/cpp/src/main.cc',
'sent2vec/cpp/src/matrix.cc', 'sent2vec/cpp/src/model.cc',
'sent2vec/cpp/src/productquantizer.cc',
'sent2vec/cpp/src/qmatrix.cc', 'sent2vec/cpp/src/utils.cc',
'sent2vec/cpp/src/vector.cc'], language='c++', extra_compile_args=
extra_compile_args)]
setup(name='sent2vec', version='0.1.0', author='', author_email='', url='',
description='A Python interface for sent2vec library', license=
'BSD 3-Clause License', packages=['sent2vec'], ext_modules=extensions,
install_requires=[], classifiers=[])
<|reserved_special_token_1|>
from setuptools import setup, find_packages
from setuptools.extension import Extension
from sys import platform
cython = True
try:
from Cython.Build import cythonize
cython = True
except ImportError:
cython = False
# Define the C++ extension
if platform == "darwin":
extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x', '-stdlib=libc++', '-mmacosx-version-min=10.7']
else:
extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x']
extensions = []
if cython:
extensions = [
Extension('sent2vec',
sources=[
'sent2vec/sent2vec.pyx',
'sent2vec/cpp/src/args.cc',
'sent2vec/cpp/src/dictionary.cc',
'sent2vec/cpp/src/fasttext.cc',
'sent2vec/cpp/src/main.cc',
'sent2vec/cpp/src/matrix.cc',
'sent2vec/cpp/src/model.cc',
'sent2vec/cpp/src/productquantizer.cc',
'sent2vec/cpp/src/qmatrix.cc',
'sent2vec/cpp/src/utils.cc',
'sent2vec/cpp/src/vector.cc'
],
language='c++',
extra_compile_args=extra_compile_args
)
]
extensions = cythonize(extensions)
else:
extensions = [
Extension('sent2vec',
sources=[
'sent2vec/sent2vec.cpp',
'sent2vec/cpp/src/args.cc',
'sent2vec/cpp/src/dictionary.cc',
'sent2vec/cpp/src/fasttext.cc',
'sent2vec/cpp/src/main.cc',
'sent2vec/cpp/src/matrix.cc',
'sent2vec/cpp/src/model.cc',
'sent2vec/cpp/src/productquantizer.cc',
'sent2vec/cpp/src/qmatrix.cc',
'sent2vec/cpp/src/utils.cc',
'sent2vec/cpp/src/vector.cc'
],
language='c++',
extra_compile_args=extra_compile_args
)
]
# Package details
setup(
name='sent2vec',
version='0.1.0',
author='',
author_email='',
url='',
description='A Python interface for sent2vec library',
license='BSD 3-Clause License',
packages=['sent2vec'],
ext_modules = extensions,
install_requires=[],
classifiers= []
)
|
flexible
|
{
"blob_id": "312cc666c88fcd22882c49598db8c5e18bd3dae1",
"index": 26,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n from Cython.Build import cythonize\n cython = True\nexcept ImportError:\n cython = False\nif platform == 'darwin':\n extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x',\n '-stdlib=libc++', '-mmacosx-version-min=10.7']\nelse:\n extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x']\n<mask token>\nif cython:\n extensions = [Extension('sent2vec', sources=['sent2vec/sent2vec.pyx',\n 'sent2vec/cpp/src/args.cc', 'sent2vec/cpp/src/dictionary.cc',\n 'sent2vec/cpp/src/fasttext.cc', 'sent2vec/cpp/src/main.cc',\n 'sent2vec/cpp/src/matrix.cc', 'sent2vec/cpp/src/model.cc',\n 'sent2vec/cpp/src/productquantizer.cc',\n 'sent2vec/cpp/src/qmatrix.cc', 'sent2vec/cpp/src/utils.cc',\n 'sent2vec/cpp/src/vector.cc'], language='c++', extra_compile_args=\n extra_compile_args)]\n extensions = cythonize(extensions)\nelse:\n extensions = [Extension('sent2vec', sources=['sent2vec/sent2vec.cpp',\n 'sent2vec/cpp/src/args.cc', 'sent2vec/cpp/src/dictionary.cc',\n 'sent2vec/cpp/src/fasttext.cc', 'sent2vec/cpp/src/main.cc',\n 'sent2vec/cpp/src/matrix.cc', 'sent2vec/cpp/src/model.cc',\n 'sent2vec/cpp/src/productquantizer.cc',\n 'sent2vec/cpp/src/qmatrix.cc', 'sent2vec/cpp/src/utils.cc',\n 'sent2vec/cpp/src/vector.cc'], language='c++', extra_compile_args=\n extra_compile_args)]\nsetup(name='sent2vec', version='0.1.0', author='', author_email='', url='',\n description='A Python interface for sent2vec library', license=\n 'BSD 3-Clause License', packages=['sent2vec'], ext_modules=extensions,\n install_requires=[], classifiers=[])\n",
"step-3": "<mask token>\ncython = True\ntry:\n from Cython.Build import cythonize\n cython = True\nexcept ImportError:\n cython = False\nif platform == 'darwin':\n extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x',\n '-stdlib=libc++', '-mmacosx-version-min=10.7']\nelse:\n extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x']\nextensions = []\nif cython:\n extensions = [Extension('sent2vec', sources=['sent2vec/sent2vec.pyx',\n 'sent2vec/cpp/src/args.cc', 'sent2vec/cpp/src/dictionary.cc',\n 'sent2vec/cpp/src/fasttext.cc', 'sent2vec/cpp/src/main.cc',\n 'sent2vec/cpp/src/matrix.cc', 'sent2vec/cpp/src/model.cc',\n 'sent2vec/cpp/src/productquantizer.cc',\n 'sent2vec/cpp/src/qmatrix.cc', 'sent2vec/cpp/src/utils.cc',\n 'sent2vec/cpp/src/vector.cc'], language='c++', extra_compile_args=\n extra_compile_args)]\n extensions = cythonize(extensions)\nelse:\n extensions = [Extension('sent2vec', sources=['sent2vec/sent2vec.cpp',\n 'sent2vec/cpp/src/args.cc', 'sent2vec/cpp/src/dictionary.cc',\n 'sent2vec/cpp/src/fasttext.cc', 'sent2vec/cpp/src/main.cc',\n 'sent2vec/cpp/src/matrix.cc', 'sent2vec/cpp/src/model.cc',\n 'sent2vec/cpp/src/productquantizer.cc',\n 'sent2vec/cpp/src/qmatrix.cc', 'sent2vec/cpp/src/utils.cc',\n 'sent2vec/cpp/src/vector.cc'], language='c++', extra_compile_args=\n extra_compile_args)]\nsetup(name='sent2vec', version='0.1.0', author='', author_email='', url='',\n description='A Python interface for sent2vec library', license=\n 'BSD 3-Clause License', packages=['sent2vec'], ext_modules=extensions,\n install_requires=[], classifiers=[])\n",
"step-4": "from setuptools import setup, find_packages\nfrom setuptools.extension import Extension\nfrom sys import platform\ncython = True\ntry:\n from Cython.Build import cythonize\n cython = True\nexcept ImportError:\n cython = False\nif platform == 'darwin':\n extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x',\n '-stdlib=libc++', '-mmacosx-version-min=10.7']\nelse:\n extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x']\nextensions = []\nif cython:\n extensions = [Extension('sent2vec', sources=['sent2vec/sent2vec.pyx',\n 'sent2vec/cpp/src/args.cc', 'sent2vec/cpp/src/dictionary.cc',\n 'sent2vec/cpp/src/fasttext.cc', 'sent2vec/cpp/src/main.cc',\n 'sent2vec/cpp/src/matrix.cc', 'sent2vec/cpp/src/model.cc',\n 'sent2vec/cpp/src/productquantizer.cc',\n 'sent2vec/cpp/src/qmatrix.cc', 'sent2vec/cpp/src/utils.cc',\n 'sent2vec/cpp/src/vector.cc'], language='c++', extra_compile_args=\n extra_compile_args)]\n extensions = cythonize(extensions)\nelse:\n extensions = [Extension('sent2vec', sources=['sent2vec/sent2vec.cpp',\n 'sent2vec/cpp/src/args.cc', 'sent2vec/cpp/src/dictionary.cc',\n 'sent2vec/cpp/src/fasttext.cc', 'sent2vec/cpp/src/main.cc',\n 'sent2vec/cpp/src/matrix.cc', 'sent2vec/cpp/src/model.cc',\n 'sent2vec/cpp/src/productquantizer.cc',\n 'sent2vec/cpp/src/qmatrix.cc', 'sent2vec/cpp/src/utils.cc',\n 'sent2vec/cpp/src/vector.cc'], language='c++', extra_compile_args=\n extra_compile_args)]\nsetup(name='sent2vec', version='0.1.0', author='', author_email='', url='',\n description='A Python interface for sent2vec library', license=\n 'BSD 3-Clause License', packages=['sent2vec'], ext_modules=extensions,\n install_requires=[], classifiers=[])\n",
"step-5": "from setuptools import setup, find_packages\nfrom setuptools.extension import Extension\nfrom sys import platform\n\ncython = True\n\ntry:\n from Cython.Build import cythonize\n cython = True\nexcept ImportError:\n cython = False\n\n# Define the C++ extension\nif platform == \"darwin\":\n extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x', '-stdlib=libc++', '-mmacosx-version-min=10.7']\nelse:\n extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x']\n\nextensions = []\n\nif cython:\n extensions = [\n Extension('sent2vec',\n sources=[\n 'sent2vec/sent2vec.pyx',\n 'sent2vec/cpp/src/args.cc',\n 'sent2vec/cpp/src/dictionary.cc',\n 'sent2vec/cpp/src/fasttext.cc',\n 'sent2vec/cpp/src/main.cc',\n 'sent2vec/cpp/src/matrix.cc',\n 'sent2vec/cpp/src/model.cc',\n 'sent2vec/cpp/src/productquantizer.cc',\n 'sent2vec/cpp/src/qmatrix.cc',\n 'sent2vec/cpp/src/utils.cc',\n 'sent2vec/cpp/src/vector.cc'\n ],\n language='c++',\n extra_compile_args=extra_compile_args\n )\n ]\n\n extensions = cythonize(extensions)\nelse:\n extensions = [\n Extension('sent2vec',\n sources=[\n 'sent2vec/sent2vec.cpp',\n 'sent2vec/cpp/src/args.cc',\n 'sent2vec/cpp/src/dictionary.cc',\n 'sent2vec/cpp/src/fasttext.cc',\n 'sent2vec/cpp/src/main.cc',\n 'sent2vec/cpp/src/matrix.cc',\n 'sent2vec/cpp/src/model.cc',\n 'sent2vec/cpp/src/productquantizer.cc',\n 'sent2vec/cpp/src/qmatrix.cc',\n 'sent2vec/cpp/src/utils.cc',\n 'sent2vec/cpp/src/vector.cc'\n ],\n language='c++',\n extra_compile_args=extra_compile_args\n )\n ]\n\n# Package details\nsetup(\n name='sent2vec',\n version='0.1.0',\n author='',\n author_email='',\n url='',\n description='A Python interface for sent2vec library',\n license='BSD 3-Clause License',\n packages=['sent2vec'],\n ext_modules = extensions,\n install_requires=[],\n classifiers= []\n)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from ._enums import *
__all__ = [
'ApplicationCredential',
'ApplicationTag',
]
@pulumi.output_type
class ApplicationCredential(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "credentialType":
suggest = "credential_type"
elif key == "databaseName":
suggest = "database_name"
elif key == "secretId":
suggest = "secret_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ApplicationCredential. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ApplicationCredential.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ApplicationCredential.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
credential_type: Optional['ApplicationCredentialCredentialType'] = None,
database_name: Optional[str] = None,
secret_id: Optional[str] = None):
if credential_type is not None:
pulumi.set(__self__, "credential_type", credential_type)
if database_name is not None:
pulumi.set(__self__, "database_name", database_name)
if secret_id is not None:
pulumi.set(__self__, "secret_id", secret_id)
@property
@pulumi.getter(name="credentialType")
def credential_type(self) -> Optional['ApplicationCredentialCredentialType']:
return pulumi.get(self, "credential_type")
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> Optional[str]:
return pulumi.get(self, "database_name")
@property
@pulumi.getter(name="secretId")
def secret_id(self) -> Optional[str]:
return pulumi.get(self, "secret_id")
@pulumi.output_type
class ApplicationTag(dict):
"""
A key-value pair to associate with a resource.
"""
def __init__(__self__, *,
key: str,
value: str):
"""
A key-value pair to associate with a resource.
:param str key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
:param str value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
"""
The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
"""
The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "value")
|
normal
|
{
"blob_id": "8535020e7157699310b3412fe6c5a28ee8e61f49",
"index": 6911,
"step-1": "<mask token>\n\n\[email protected]_type\nclass ApplicationCredential(dict):\n <mask token>\n\n def __getitem__(self, key: str) ->Any:\n ApplicationCredential.__key_warning(key)\n return super().__getitem__(key)\n <mask token>\n\n def __init__(__self__, *, credential_type: Optional[\n 'ApplicationCredentialCredentialType']=None, database_name:\n Optional[str]=None, secret_id: Optional[str]=None):\n if credential_type is not None:\n pulumi.set(__self__, 'credential_type', credential_type)\n if database_name is not None:\n pulumi.set(__self__, 'database_name', database_name)\n if secret_id is not None:\n pulumi.set(__self__, 'secret_id', secret_id)\n\n @property\n @pulumi.getter(name='credentialType')\n def credential_type(self) ->Optional['ApplicationCredentialCredentialType'\n ]:\n return pulumi.get(self, 'credential_type')\n\n @property\n @pulumi.getter(name='databaseName')\n def database_name(self) ->Optional[str]:\n return pulumi.get(self, 'database_name')\n <mask token>\n\n\[email protected]_type\nclass ApplicationTag(dict):\n \"\"\"\n A key-value pair to associate with a resource.\n \"\"\"\n\n def __init__(__self__, *, key: str, value: str):\n \"\"\"\n A key-value pair to associate with a resource.\n :param str key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n :param str value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n pulumi.set(__self__, 'key', key)\n pulumi.set(__self__, 'value', value)\n\n @property\n @pulumi.getter\n def key(self) ->str:\n \"\"\"\n The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, 'key')\n\n @property\n @pulumi.getter\n def value(self) ->str:\n \"\"\"\n The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, 'value')\n",
"step-2": "<mask token>\n\n\[email protected]_type\nclass ApplicationCredential(dict):\n <mask token>\n\n def __getitem__(self, key: str) ->Any:\n ApplicationCredential.__key_warning(key)\n return super().__getitem__(key)\n <mask token>\n\n def __init__(__self__, *, credential_type: Optional[\n 'ApplicationCredentialCredentialType']=None, database_name:\n Optional[str]=None, secret_id: Optional[str]=None):\n if credential_type is not None:\n pulumi.set(__self__, 'credential_type', credential_type)\n if database_name is not None:\n pulumi.set(__self__, 'database_name', database_name)\n if secret_id is not None:\n pulumi.set(__self__, 'secret_id', secret_id)\n\n @property\n @pulumi.getter(name='credentialType')\n def credential_type(self) ->Optional['ApplicationCredentialCredentialType'\n ]:\n return pulumi.get(self, 'credential_type')\n\n @property\n @pulumi.getter(name='databaseName')\n def database_name(self) ->Optional[str]:\n return pulumi.get(self, 'database_name')\n\n @property\n @pulumi.getter(name='secretId')\n def secret_id(self) ->Optional[str]:\n return pulumi.get(self, 'secret_id')\n\n\[email protected]_type\nclass ApplicationTag(dict):\n \"\"\"\n A key-value pair to associate with a resource.\n \"\"\"\n\n def __init__(__self__, *, key: str, value: str):\n \"\"\"\n A key-value pair to associate with a resource.\n :param str key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n :param str value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n pulumi.set(__self__, 'key', key)\n pulumi.set(__self__, 'value', value)\n\n @property\n @pulumi.getter\n def key(self) ->str:\n \"\"\"\n The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, 'key')\n\n @property\n @pulumi.getter\n def value(self) ->str:\n \"\"\"\n The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, 'value')\n",
"step-3": "<mask token>\n\n\[email protected]_type\nclass ApplicationCredential(dict):\n <mask token>\n\n def __getitem__(self, key: str) ->Any:\n ApplicationCredential.__key_warning(key)\n return super().__getitem__(key)\n\n def get(self, key: str, default=None) ->Any:\n ApplicationCredential.__key_warning(key)\n return super().get(key, default)\n\n def __init__(__self__, *, credential_type: Optional[\n 'ApplicationCredentialCredentialType']=None, database_name:\n Optional[str]=None, secret_id: Optional[str]=None):\n if credential_type is not None:\n pulumi.set(__self__, 'credential_type', credential_type)\n if database_name is not None:\n pulumi.set(__self__, 'database_name', database_name)\n if secret_id is not None:\n pulumi.set(__self__, 'secret_id', secret_id)\n\n @property\n @pulumi.getter(name='credentialType')\n def credential_type(self) ->Optional['ApplicationCredentialCredentialType'\n ]:\n return pulumi.get(self, 'credential_type')\n\n @property\n @pulumi.getter(name='databaseName')\n def database_name(self) ->Optional[str]:\n return pulumi.get(self, 'database_name')\n\n @property\n @pulumi.getter(name='secretId')\n def secret_id(self) ->Optional[str]:\n return pulumi.get(self, 'secret_id')\n\n\[email protected]_type\nclass ApplicationTag(dict):\n \"\"\"\n A key-value pair to associate with a resource.\n \"\"\"\n\n def __init__(__self__, *, key: str, value: str):\n \"\"\"\n A key-value pair to associate with a resource.\n :param str key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n :param str value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n pulumi.set(__self__, 'key', key)\n pulumi.set(__self__, 'value', value)\n\n @property\n @pulumi.getter\n def key(self) ->str:\n \"\"\"\n The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, 'key')\n\n @property\n @pulumi.getter\n def value(self) ->str:\n \"\"\"\n The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, 'value')\n",
"step-4": "<mask token>\n\n\[email protected]_type\nclass ApplicationCredential(dict):\n\n @staticmethod\n def __key_warning(key: str):\n suggest = None\n if key == 'credentialType':\n suggest = 'credential_type'\n elif key == 'databaseName':\n suggest = 'database_name'\n elif key == 'secretId':\n suggest = 'secret_id'\n if suggest:\n pulumi.log.warn(\n f\"Key '{key}' not found in ApplicationCredential. Access the value via the '{suggest}' property getter instead.\"\n )\n\n def __getitem__(self, key: str) ->Any:\n ApplicationCredential.__key_warning(key)\n return super().__getitem__(key)\n\n def get(self, key: str, default=None) ->Any:\n ApplicationCredential.__key_warning(key)\n return super().get(key, default)\n\n def __init__(__self__, *, credential_type: Optional[\n 'ApplicationCredentialCredentialType']=None, database_name:\n Optional[str]=None, secret_id: Optional[str]=None):\n if credential_type is not None:\n pulumi.set(__self__, 'credential_type', credential_type)\n if database_name is not None:\n pulumi.set(__self__, 'database_name', database_name)\n if secret_id is not None:\n pulumi.set(__self__, 'secret_id', secret_id)\n\n @property\n @pulumi.getter(name='credentialType')\n def credential_type(self) ->Optional['ApplicationCredentialCredentialType'\n ]:\n return pulumi.get(self, 'credential_type')\n\n @property\n @pulumi.getter(name='databaseName')\n def database_name(self) ->Optional[str]:\n return pulumi.get(self, 'database_name')\n\n @property\n @pulumi.getter(name='secretId')\n def secret_id(self) ->Optional[str]:\n return pulumi.get(self, 'secret_id')\n\n\[email protected]_type\nclass ApplicationTag(dict):\n \"\"\"\n A key-value pair to associate with a resource.\n \"\"\"\n\n def __init__(__self__, *, key: str, value: str):\n \"\"\"\n A key-value pair to associate with a resource.\n :param str key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n :param str value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n pulumi.set(__self__, 'key', key)\n pulumi.set(__self__, 'value', value)\n\n @property\n @pulumi.getter\n def key(self) ->str:\n \"\"\"\n The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, 'key')\n\n @property\n @pulumi.getter\n def value(self) ->str:\n \"\"\"\n The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, 'value')\n",
"step-5": "# coding=utf-8\n# *** WARNING: this file was generated by the Pulumi SDK Generator. ***\n# *** Do not edit by hand unless you're certain you know what you are doing! ***\n\nimport copy\nimport warnings\nimport pulumi\nimport pulumi.runtime\nfrom typing import Any, Mapping, Optional, Sequence, Union, overload\nfrom .. import _utilities\nfrom ._enums import *\n\n__all__ = [\n 'ApplicationCredential',\n 'ApplicationTag',\n]\n\[email protected]_type\nclass ApplicationCredential(dict):\n @staticmethod\n def __key_warning(key: str):\n suggest = None\n if key == \"credentialType\":\n suggest = \"credential_type\"\n elif key == \"databaseName\":\n suggest = \"database_name\"\n elif key == \"secretId\":\n suggest = \"secret_id\"\n\n if suggest:\n pulumi.log.warn(f\"Key '{key}' not found in ApplicationCredential. Access the value via the '{suggest}' property getter instead.\")\n\n def __getitem__(self, key: str) -> Any:\n ApplicationCredential.__key_warning(key)\n return super().__getitem__(key)\n\n def get(self, key: str, default = None) -> Any:\n ApplicationCredential.__key_warning(key)\n return super().get(key, default)\n\n def __init__(__self__, *,\n credential_type: Optional['ApplicationCredentialCredentialType'] = None,\n database_name: Optional[str] = None,\n secret_id: Optional[str] = None):\n if credential_type is not None:\n pulumi.set(__self__, \"credential_type\", credential_type)\n if database_name is not None:\n pulumi.set(__self__, \"database_name\", database_name)\n if secret_id is not None:\n pulumi.set(__self__, \"secret_id\", secret_id)\n\n @property\n @pulumi.getter(name=\"credentialType\")\n def credential_type(self) -> Optional['ApplicationCredentialCredentialType']:\n return pulumi.get(self, \"credential_type\")\n\n @property\n @pulumi.getter(name=\"databaseName\")\n def database_name(self) -> Optional[str]:\n return pulumi.get(self, \"database_name\")\n\n @property\n @pulumi.getter(name=\"secretId\")\n def secret_id(self) -> Optional[str]:\n return pulumi.get(self, \"secret_id\")\n\n\[email protected]_type\nclass ApplicationTag(dict):\n \"\"\"\n A key-value pair to associate with a resource.\n \"\"\"\n def __init__(__self__, *,\n key: str,\n value: str):\n \"\"\"\n A key-value pair to associate with a resource.\n :param str key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n :param str value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)\n\n @property\n @pulumi.getter\n def key(self) -> str:\n \"\"\"\n The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, \"key\")\n\n @property\n @pulumi.getter\n def value(self) -> str:\n \"\"\"\n The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, \"value\")\n\n\n",
"step-ids": [
10,
11,
12,
13,
16
]
}
|
[
10,
11,
12,
13,
16
] |
#H##############################################################
# FILENAME : rec.py
#
# DESCRIPTION :
# Classifies text using defined regular expressions
#
# PUBLIC FUNCTIONS :
# int processToken( string )
#
# NOTES :
# This function uses specific critera to classify
# Criteria described in README.md
#
# Copyright 2018, Jacob Wilkins. All rights reserved.
#
# AUTHOR : Jacob Wilkins START DATE : 24 Sep 18
#
#H#
import re
import sys
def processToken(token) :
idPattern1 = re.compile(r'^([$]|[|]|[a-z])[A-Z0-9]*$')
idPattern2 = re.compile(r'^([|][A-Z0-9]*[|])$')
intPattern = re.compile(r'^(%)([0-9]|[A-Fa-f])+$')
fpPattern = re.compile(r'^[0-9]+[.][0-9]+$')
idMatch1 = idPattern1.match(token)
idMatch2 = idPattern2.match(token)
intMatch = intPattern.match(token)
fpMatch = fpPattern.match(token)
if idMatch1:
print('>%s< matches ID' % (token))
elif idMatch2:
print('>%s< matches ID' % (token))
elif intMatch:
print('>%s< matches INT' % (token))
elif fpMatch:
print('>%s< matches FP' % (token))
else:
print('>%s< does not match' % (token))
def main() :
fName = sys.argv[1]
print('processing tokens from ' + fName + ' ...')
with open(fName, 'r') as fp :
lines = fp.read().replace('\r', '').split('\n')
for line in lines :
for token in line.split() :
processToken(token)
if (__name__ == '__main__') :
main()
|
normal
|
{
"blob_id": "6d543e9e24debaff7640006a3836c59ec0096255",
"index": 5205,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef processToken(token):\n idPattern1 = re.compile('^([$]|[|]|[a-z])[A-Z0-9]*$')\n idPattern2 = re.compile('^([|][A-Z0-9]*[|])$')\n intPattern = re.compile('^(%)([0-9]|[A-Fa-f])+$')\n fpPattern = re.compile('^[0-9]+[.][0-9]+$')\n idMatch1 = idPattern1.match(token)\n idMatch2 = idPattern2.match(token)\n intMatch = intPattern.match(token)\n fpMatch = fpPattern.match(token)\n if idMatch1:\n print('>%s< matches ID' % token)\n elif idMatch2:\n print('>%s< matches ID' % token)\n elif intMatch:\n print('>%s< matches INT' % token)\n elif fpMatch:\n print('>%s< matches FP' % token)\n else:\n print('>%s< does not match' % token)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef processToken(token):\n idPattern1 = re.compile('^([$]|[|]|[a-z])[A-Z0-9]*$')\n idPattern2 = re.compile('^([|][A-Z0-9]*[|])$')\n intPattern = re.compile('^(%)([0-9]|[A-Fa-f])+$')\n fpPattern = re.compile('^[0-9]+[.][0-9]+$')\n idMatch1 = idPattern1.match(token)\n idMatch2 = idPattern2.match(token)\n intMatch = intPattern.match(token)\n fpMatch = fpPattern.match(token)\n if idMatch1:\n print('>%s< matches ID' % token)\n elif idMatch2:\n print('>%s< matches ID' % token)\n elif intMatch:\n print('>%s< matches INT' % token)\n elif fpMatch:\n print('>%s< matches FP' % token)\n else:\n print('>%s< does not match' % token)\n\n\ndef main():\n fName = sys.argv[1]\n print('processing tokens from ' + fName + ' ...')\n with open(fName, 'r') as fp:\n lines = fp.read().replace('\\r', '').split('\\n')\n for line in lines:\n for token in line.split():\n processToken(token)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import re\nimport sys\n\n\ndef processToken(token):\n idPattern1 = re.compile('^([$]|[|]|[a-z])[A-Z0-9]*$')\n idPattern2 = re.compile('^([|][A-Z0-9]*[|])$')\n intPattern = re.compile('^(%)([0-9]|[A-Fa-f])+$')\n fpPattern = re.compile('^[0-9]+[.][0-9]+$')\n idMatch1 = idPattern1.match(token)\n idMatch2 = idPattern2.match(token)\n intMatch = intPattern.match(token)\n fpMatch = fpPattern.match(token)\n if idMatch1:\n print('>%s< matches ID' % token)\n elif idMatch2:\n print('>%s< matches ID' % token)\n elif intMatch:\n print('>%s< matches INT' % token)\n elif fpMatch:\n print('>%s< matches FP' % token)\n else:\n print('>%s< does not match' % token)\n\n\ndef main():\n fName = sys.argv[1]\n print('processing tokens from ' + fName + ' ...')\n with open(fName, 'r') as fp:\n lines = fp.read().replace('\\r', '').split('\\n')\n for line in lines:\n for token in line.split():\n processToken(token)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#H##############################################################\n# FILENAME : rec.py\n#\n# DESCRIPTION :\n# Classifies text using defined regular expressions \n#\n# PUBLIC FUNCTIONS :\n# int processToken( string )\n#\n# NOTES :\n# This function uses specific critera to classify\n# Criteria described in README.md\n#\n# Copyright 2018, Jacob Wilkins. All rights reserved.\n# \n# AUTHOR : Jacob Wilkins START DATE : 24 Sep 18\n#\n#H#\n\nimport re\nimport sys\n\ndef processToken(token) :\n\n idPattern1 = re.compile(r'^([$]|[|]|[a-z])[A-Z0-9]*$')\n idPattern2 = re.compile(r'^([|][A-Z0-9]*[|])$')\n intPattern = re.compile(r'^(%)([0-9]|[A-Fa-f])+$')\n fpPattern = re.compile(r'^[0-9]+[.][0-9]+$')\n\n idMatch1 = idPattern1.match(token)\n idMatch2 = idPattern2.match(token)\n intMatch = intPattern.match(token)\n fpMatch = fpPattern.match(token)\n\n if idMatch1:\n print('>%s< matches ID' % (token))\n elif idMatch2:\n print('>%s< matches ID' % (token))\n elif intMatch:\n print('>%s< matches INT' % (token))\n elif fpMatch:\n print('>%s< matches FP' % (token))\n else:\n print('>%s< does not match' % (token))\n\ndef main() :\n fName = sys.argv[1]\n print('processing tokens from ' + fName + ' ...')\n\n with open(fName, 'r') as fp :\n lines = fp.read().replace('\\r', '').split('\\n')\n\n for line in lines :\n for token in line.split() :\n processToken(token)\n\nif (__name__ == '__main__') :\n main()\n",
"step-ids": [
0,
1,
3,
4,
5
]
}
|
[
0,
1,
3,
4,
5
] |
# -*- coding: utf-8 -*-
from django.db import models
from backend.models.account import Account
from string import Template
out = Template("$account: $parts")
class Group(models.Model):
name = models.CharField(max_length=100)
class GroupParticipation(models.Model):
account = models.ForeignKey(Account, related_name='groups')
parts = models.FloatField(default=1.0)
group = models.ForeignKey(Group, related_name='participants')
def __str__(self):
return out.substitute(account=self.account, parts=self.parts)
class Meta:
unique_together = ('account', 'parts', 'group')
|
normal
|
{
"blob_id": "11337f6f9cf22ba6fbed68dfcb7a07fb6368e94e",
"index": 6350,
"step-1": "<mask token>\n\n\nclass GroupParticipation(models.Model):\n account = models.ForeignKey(Account, related_name='groups')\n parts = models.FloatField(default=1.0)\n group = models.ForeignKey(Group, related_name='participants')\n\n def __str__(self):\n return out.substitute(account=self.account, parts=self.parts)\n\n\n class Meta:\n unique_together = 'account', 'parts', 'group'\n",
"step-2": "<mask token>\n\n\nclass Group(models.Model):\n <mask token>\n\n\nclass GroupParticipation(models.Model):\n account = models.ForeignKey(Account, related_name='groups')\n parts = models.FloatField(default=1.0)\n group = models.ForeignKey(Group, related_name='participants')\n\n def __str__(self):\n return out.substitute(account=self.account, parts=self.parts)\n\n\n class Meta:\n unique_together = 'account', 'parts', 'group'\n",
"step-3": "<mask token>\nout = Template('$account: $parts')\n\n\nclass Group(models.Model):\n name = models.CharField(max_length=100)\n\n\nclass GroupParticipation(models.Model):\n account = models.ForeignKey(Account, related_name='groups')\n parts = models.FloatField(default=1.0)\n group = models.ForeignKey(Group, related_name='participants')\n\n def __str__(self):\n return out.substitute(account=self.account, parts=self.parts)\n\n\n class Meta:\n unique_together = 'account', 'parts', 'group'\n",
"step-4": "from django.db import models\nfrom backend.models.account import Account\nfrom string import Template\nout = Template('$account: $parts')\n\n\nclass Group(models.Model):\n name = models.CharField(max_length=100)\n\n\nclass GroupParticipation(models.Model):\n account = models.ForeignKey(Account, related_name='groups')\n parts = models.FloatField(default=1.0)\n group = models.ForeignKey(Group, related_name='participants')\n\n def __str__(self):\n return out.substitute(account=self.account, parts=self.parts)\n\n\n class Meta:\n unique_together = 'account', 'parts', 'group'\n",
"step-5": "# -*- coding: utf-8 -*-\n\nfrom django.db import models\n\nfrom backend.models.account import Account\nfrom string import Template\n\n\nout = Template(\"$account: $parts\")\n\n\nclass Group(models.Model):\n name = models.CharField(max_length=100)\n\n\nclass GroupParticipation(models.Model):\n account = models.ForeignKey(Account, related_name='groups')\n parts = models.FloatField(default=1.0)\n group = models.ForeignKey(Group, related_name='participants')\n\n def __str__(self):\n return out.substitute(account=self.account, parts=self.parts)\n\n class Meta:\n unique_together = ('account', 'parts', 'group')\n\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cPickle.dump(v, open(PROJECT + 'db/dictvectorizer.pickle', 'wb'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
videos, users, reviews = load_data()
orig_X = np.array([(x['date'], x['text'], x['user']) for x in reviews])
feats = create_features(orig_X, None)
v = DictVectorizer(sparse=False)
feats = v.fit_transform(feats)
cPickle.dump(v, open(PROJECT + 'db/dictvectorizer.pickle', 'wb'))
<|reserved_special_token_1|>
import numpy as np
import cPickle
from features import create_features, PROJECT
from parse import load_data
from dict_vectorizer import DictVectorizer
videos, users, reviews = load_data()
orig_X = np.array([(x['date'], x['text'], x['user']) for x in reviews])
feats = create_features(orig_X, None)
v = DictVectorizer(sparse=False)
feats = v.fit_transform(feats)
cPickle.dump(v, open(PROJECT + 'db/dictvectorizer.pickle', 'wb'))
<|reserved_special_token_1|>
import numpy as np
import cPickle
from features import create_features, PROJECT
from parse import load_data
from dict_vectorizer import DictVectorizer
videos, users, reviews = load_data()
orig_X = np.array([(x['date'], x['text'], x['user']) for x in reviews])
feats = create_features(orig_X, None)
v = DictVectorizer(sparse=False)
feats = v.fit_transform(feats)
# feats is now in vectorized format
# v.transform() is the transformation that needs to be used on test data
cPickle.dump(v, open(PROJECT + "db/dictvectorizer.pickle", "wb"))
|
flexible
|
{
"blob_id": "e26fa69ea1f0bee82b4108ac5a541a6175645728",
"index": 5955,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncPickle.dump(v, open(PROJECT + 'db/dictvectorizer.pickle', 'wb'))\n",
"step-3": "<mask token>\nvideos, users, reviews = load_data()\norig_X = np.array([(x['date'], x['text'], x['user']) for x in reviews])\nfeats = create_features(orig_X, None)\nv = DictVectorizer(sparse=False)\nfeats = v.fit_transform(feats)\ncPickle.dump(v, open(PROJECT + 'db/dictvectorizer.pickle', 'wb'))\n",
"step-4": "import numpy as np\nimport cPickle\nfrom features import create_features, PROJECT\nfrom parse import load_data\nfrom dict_vectorizer import DictVectorizer\nvideos, users, reviews = load_data()\norig_X = np.array([(x['date'], x['text'], x['user']) for x in reviews])\nfeats = create_features(orig_X, None)\nv = DictVectorizer(sparse=False)\nfeats = v.fit_transform(feats)\ncPickle.dump(v, open(PROJECT + 'db/dictvectorizer.pickle', 'wb'))\n",
"step-5": "import numpy as np\nimport cPickle\n\nfrom features import create_features, PROJECT\nfrom parse import load_data\nfrom dict_vectorizer import DictVectorizer\n\nvideos, users, reviews = load_data()\norig_X = np.array([(x['date'], x['text'], x['user']) for x in reviews])\nfeats = create_features(orig_X, None)\nv = DictVectorizer(sparse=False)\nfeats = v.fit_transform(feats)\n\n# feats is now in vectorized format\n# v.transform() is the transformation that needs to be used on test data\n\ncPickle.dump(v, open(PROJECT + \"db/dictvectorizer.pickle\", \"wb\"))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def submissions_to_user_submission_activities_dfs(submissions_df: DataFrame
) ->Dict[str, DataFrame]:
"""
Convert a Submission API DataFrame to a Dict of UserActivity
UDM DataFrames grouped by source system section id.
Parameters
----------
submissions_df: DataFrame
is a Submission API DataFrame
Returns
-------
Dict[str, DataFrame] LMS UDM UserActivity DataFrames
grouped by source system section id
Notes
-----
UserActivity DataFrame columns are:
ActivityDateTime: The date/time the activity occurred
ActivityStatus: The activity status
ActivityTimeInMinutes: The total activity time in minutes
ActivityType: The type of activity, here "Submission" or "Grade"
AssignmentIdentifier: A unique numeric identifier assigned to the assignment
Content: Content associated with the activity
LMSSectionIdentifier: A unique numeric identifier assigned to the section
SourceSystem: The system code or name providing the user activity data
SourceSystemIdentifier: A unique number or alphanumeric code assigned to a
user activity by the source system
LMSUserIdentifier: A unique numeric identifier assigned to the user
CreateDate: Date this record was created in the extractor
LastModifiedDate: Date this record was last updated in the extractor
"""
assert 'submissionHistory' in submissions_df.columns
assert 'id' in submissions_df.columns
assert 'courseId' in submissions_df.columns
assert 'courseWorkId' in submissions_df.columns
submissions_df['submissionHistory'] = submissions_df['submissionHistory'
].apply(lambda json_like: json.loads(json_like.replace("'", '"')))
submissions_df['AssignmentIdentifier'] = submissions_df[['courseId',
'courseWorkId']].agg('-'.join, axis=1)
submissions_df = submissions_df[['id', 'courseId', 'courseWorkId',
'submissionHistory', 'AssignmentIdentifier', 'CreateDate',
'LastModifiedDate']]
history_df = submissions_df.explode(column='submissionHistory')
history_df = history_df['submissionHistory'].apply(Series).merge(history_df
, left_index=True, right_index=True, how='outer')
history_df.drop(columns=['submissionHistory'], inplace=True)
user_submission_df = concat([history_df, history_df['stateHistory'].
apply(Series)], axis=1)
user_submission_df.dropna(subset=['stateHistory'], inplace=True)
user_submission_df['SourceSystemIdentifier'] = 'S-' + user_submission_df[[
'courseId', 'courseWorkId', 'id', 'stateTimestamp']].agg('-'.join,
axis=1)
user_submission_df = user_submission_df[['SourceSystemIdentifier',
'AssignmentIdentifier', 'stateTimestamp', 'state', 'courseId',
'actorUserId', 'CreateDate', 'LastModifiedDate']]
user_submission_df = user_submission_df.rename(columns={
'stateTimestamp': 'ActivityDateTime', 'state': 'ActivityStatus',
'courseId': 'LMSSectionIdentifier', 'actorUserId': 'LMSUserIdentifier'}
)
user_submission_df['ActivityType'] = ACTIVITY_TYPE_STATE
if 'gradeHistory' in history_df:
grade_history_df = concat([history_df, history_df['gradeHistory'].
apply(Series)], axis=1)
grade_history_df.dropna(subset=['gradeHistory'], inplace=True)
grade_history_df['SourceSystemIdentifier'] = 'G-' + grade_history_df[[
'courseId', 'courseWorkId', 'id', 'gradeTimestamp']].agg('-'.
join, axis=1)
grade_history_df = grade_history_df[['SourceSystemIdentifier',
'AssignmentIdentifier', 'gradeTimestamp', 'gradeChangeType',
'courseId', 'actorUserId', 'CreateDate', 'LastModifiedDate']]
grade_history_df = grade_history_df.rename(columns={
'gradeTimestamp': 'ActivityDateTime', 'gradeChangeType':
'ActivityStatus', 'courseId': 'LMSSectionIdentifier',
'actorUserId': 'LMSUserIdentifier'})
grade_history_df['ActivityType'] = ACTIVITY_TYPE_GRADE
user_submission_df = user_submission_df.append(grade_history_df)
user_submission_df.drop_duplicates(subset=['SourceSystemIdentifier'],
inplace=True)
user_submission_df['ActivityTimeInMinutes'] = ''
user_submission_df['Content'] = ''
user_submission_df['SourceSystem'] = SOURCE_SYSTEM
user_submission_df['SourceCreateDate'] = ''
user_submission_df['SourceLastModifiedDate'] = ''
result: Dict[str, DataFrame] = dict(tuple(user_submission_df.groupby([
'LMSSectionIdentifier'])))
return result
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ACTIVITY_TYPE_STATE = 'Submission State Change'
ACTIVITY_TYPE_GRADE = 'Submission Grade Change'
def submissions_to_user_submission_activities_dfs(submissions_df: DataFrame
) ->Dict[str, DataFrame]:
"""
Convert a Submission API DataFrame to a Dict of UserActivity
UDM DataFrames grouped by source system section id.
Parameters
----------
submissions_df: DataFrame
is a Submission API DataFrame
Returns
-------
Dict[str, DataFrame] LMS UDM UserActivity DataFrames
grouped by source system section id
Notes
-----
UserActivity DataFrame columns are:
ActivityDateTime: The date/time the activity occurred
ActivityStatus: The activity status
ActivityTimeInMinutes: The total activity time in minutes
ActivityType: The type of activity, here "Submission" or "Grade"
AssignmentIdentifier: A unique numeric identifier assigned to the assignment
Content: Content associated with the activity
LMSSectionIdentifier: A unique numeric identifier assigned to the section
SourceSystem: The system code or name providing the user activity data
SourceSystemIdentifier: A unique number or alphanumeric code assigned to a
user activity by the source system
LMSUserIdentifier: A unique numeric identifier assigned to the user
CreateDate: Date this record was created in the extractor
LastModifiedDate: Date this record was last updated in the extractor
"""
assert 'submissionHistory' in submissions_df.columns
assert 'id' in submissions_df.columns
assert 'courseId' in submissions_df.columns
assert 'courseWorkId' in submissions_df.columns
submissions_df['submissionHistory'] = submissions_df['submissionHistory'
].apply(lambda json_like: json.loads(json_like.replace("'", '"')))
submissions_df['AssignmentIdentifier'] = submissions_df[['courseId',
'courseWorkId']].agg('-'.join, axis=1)
submissions_df = submissions_df[['id', 'courseId', 'courseWorkId',
'submissionHistory', 'AssignmentIdentifier', 'CreateDate',
'LastModifiedDate']]
history_df = submissions_df.explode(column='submissionHistory')
history_df = history_df['submissionHistory'].apply(Series).merge(history_df
, left_index=True, right_index=True, how='outer')
history_df.drop(columns=['submissionHistory'], inplace=True)
user_submission_df = concat([history_df, history_df['stateHistory'].
apply(Series)], axis=1)
user_submission_df.dropna(subset=['stateHistory'], inplace=True)
user_submission_df['SourceSystemIdentifier'] = 'S-' + user_submission_df[[
'courseId', 'courseWorkId', 'id', 'stateTimestamp']].agg('-'.join,
axis=1)
user_submission_df = user_submission_df[['SourceSystemIdentifier',
'AssignmentIdentifier', 'stateTimestamp', 'state', 'courseId',
'actorUserId', 'CreateDate', 'LastModifiedDate']]
user_submission_df = user_submission_df.rename(columns={
'stateTimestamp': 'ActivityDateTime', 'state': 'ActivityStatus',
'courseId': 'LMSSectionIdentifier', 'actorUserId': 'LMSUserIdentifier'}
)
user_submission_df['ActivityType'] = ACTIVITY_TYPE_STATE
if 'gradeHistory' in history_df:
grade_history_df = concat([history_df, history_df['gradeHistory'].
apply(Series)], axis=1)
grade_history_df.dropna(subset=['gradeHistory'], inplace=True)
grade_history_df['SourceSystemIdentifier'] = 'G-' + grade_history_df[[
'courseId', 'courseWorkId', 'id', 'gradeTimestamp']].agg('-'.
join, axis=1)
grade_history_df = grade_history_df[['SourceSystemIdentifier',
'AssignmentIdentifier', 'gradeTimestamp', 'gradeChangeType',
'courseId', 'actorUserId', 'CreateDate', 'LastModifiedDate']]
grade_history_df = grade_history_df.rename(columns={
'gradeTimestamp': 'ActivityDateTime', 'gradeChangeType':
'ActivityStatus', 'courseId': 'LMSSectionIdentifier',
'actorUserId': 'LMSUserIdentifier'})
grade_history_df['ActivityType'] = ACTIVITY_TYPE_GRADE
user_submission_df = user_submission_df.append(grade_history_df)
user_submission_df.drop_duplicates(subset=['SourceSystemIdentifier'],
inplace=True)
user_submission_df['ActivityTimeInMinutes'] = ''
user_submission_df['Content'] = ''
user_submission_df['SourceSystem'] = SOURCE_SYSTEM
user_submission_df['SourceCreateDate'] = ''
user_submission_df['SourceLastModifiedDate'] = ''
result: Dict[str, DataFrame] = dict(tuple(user_submission_df.groupby([
'LMSSectionIdentifier'])))
return result
<|reserved_special_token_1|>
import json
from typing import Dict
from pandas import DataFrame, concat, Series
from edfi_google_classroom_extractor.mapping.constants import SOURCE_SYSTEM
ACTIVITY_TYPE_STATE = 'Submission State Change'
ACTIVITY_TYPE_GRADE = 'Submission Grade Change'
def submissions_to_user_submission_activities_dfs(submissions_df: DataFrame
) ->Dict[str, DataFrame]:
"""
Convert a Submission API DataFrame to a Dict of UserActivity
UDM DataFrames grouped by source system section id.
Parameters
----------
submissions_df: DataFrame
is a Submission API DataFrame
Returns
-------
Dict[str, DataFrame] LMS UDM UserActivity DataFrames
grouped by source system section id
Notes
-----
UserActivity DataFrame columns are:
ActivityDateTime: The date/time the activity occurred
ActivityStatus: The activity status
ActivityTimeInMinutes: The total activity time in minutes
ActivityType: The type of activity, here "Submission" or "Grade"
AssignmentIdentifier: A unique numeric identifier assigned to the assignment
Content: Content associated with the activity
LMSSectionIdentifier: A unique numeric identifier assigned to the section
SourceSystem: The system code or name providing the user activity data
SourceSystemIdentifier: A unique number or alphanumeric code assigned to a
user activity by the source system
LMSUserIdentifier: A unique numeric identifier assigned to the user
CreateDate: Date this record was created in the extractor
LastModifiedDate: Date this record was last updated in the extractor
"""
assert 'submissionHistory' in submissions_df.columns
assert 'id' in submissions_df.columns
assert 'courseId' in submissions_df.columns
assert 'courseWorkId' in submissions_df.columns
submissions_df['submissionHistory'] = submissions_df['submissionHistory'
].apply(lambda json_like: json.loads(json_like.replace("'", '"')))
submissions_df['AssignmentIdentifier'] = submissions_df[['courseId',
'courseWorkId']].agg('-'.join, axis=1)
submissions_df = submissions_df[['id', 'courseId', 'courseWorkId',
'submissionHistory', 'AssignmentIdentifier', 'CreateDate',
'LastModifiedDate']]
history_df = submissions_df.explode(column='submissionHistory')
history_df = history_df['submissionHistory'].apply(Series).merge(history_df
, left_index=True, right_index=True, how='outer')
history_df.drop(columns=['submissionHistory'], inplace=True)
user_submission_df = concat([history_df, history_df['stateHistory'].
apply(Series)], axis=1)
user_submission_df.dropna(subset=['stateHistory'], inplace=True)
user_submission_df['SourceSystemIdentifier'] = 'S-' + user_submission_df[[
'courseId', 'courseWorkId', 'id', 'stateTimestamp']].agg('-'.join,
axis=1)
user_submission_df = user_submission_df[['SourceSystemIdentifier',
'AssignmentIdentifier', 'stateTimestamp', 'state', 'courseId',
'actorUserId', 'CreateDate', 'LastModifiedDate']]
user_submission_df = user_submission_df.rename(columns={
'stateTimestamp': 'ActivityDateTime', 'state': 'ActivityStatus',
'courseId': 'LMSSectionIdentifier', 'actorUserId': 'LMSUserIdentifier'}
)
user_submission_df['ActivityType'] = ACTIVITY_TYPE_STATE
if 'gradeHistory' in history_df:
grade_history_df = concat([history_df, history_df['gradeHistory'].
apply(Series)], axis=1)
grade_history_df.dropna(subset=['gradeHistory'], inplace=True)
grade_history_df['SourceSystemIdentifier'] = 'G-' + grade_history_df[[
'courseId', 'courseWorkId', 'id', 'gradeTimestamp']].agg('-'.
join, axis=1)
grade_history_df = grade_history_df[['SourceSystemIdentifier',
'AssignmentIdentifier', 'gradeTimestamp', 'gradeChangeType',
'courseId', 'actorUserId', 'CreateDate', 'LastModifiedDate']]
grade_history_df = grade_history_df.rename(columns={
'gradeTimestamp': 'ActivityDateTime', 'gradeChangeType':
'ActivityStatus', 'courseId': 'LMSSectionIdentifier',
'actorUserId': 'LMSUserIdentifier'})
grade_history_df['ActivityType'] = ACTIVITY_TYPE_GRADE
user_submission_df = user_submission_df.append(grade_history_df)
user_submission_df.drop_duplicates(subset=['SourceSystemIdentifier'],
inplace=True)
user_submission_df['ActivityTimeInMinutes'] = ''
user_submission_df['Content'] = ''
user_submission_df['SourceSystem'] = SOURCE_SYSTEM
user_submission_df['SourceCreateDate'] = ''
user_submission_df['SourceLastModifiedDate'] = ''
result: Dict[str, DataFrame] = dict(tuple(user_submission_df.groupby([
'LMSSectionIdentifier'])))
return result
<|reserved_special_token_1|>
# SPDX-License-Identifier: Apache-2.0
# Licensed to the Ed-Fi Alliance under one or more agreements.
# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.
# See the LICENSE and NOTICES files in the project root for more information.
import json
from typing import Dict
from pandas import DataFrame, concat, Series
from edfi_google_classroom_extractor.mapping.constants import SOURCE_SYSTEM
ACTIVITY_TYPE_STATE = "Submission State Change"
ACTIVITY_TYPE_GRADE = "Submission Grade Change"
def submissions_to_user_submission_activities_dfs(
submissions_df: DataFrame,
) -> Dict[str, DataFrame]:
"""
Convert a Submission API DataFrame to a Dict of UserActivity
UDM DataFrames grouped by source system section id.
Parameters
----------
submissions_df: DataFrame
is a Submission API DataFrame
Returns
-------
Dict[str, DataFrame] LMS UDM UserActivity DataFrames
grouped by source system section id
Notes
-----
UserActivity DataFrame columns are:
ActivityDateTime: The date/time the activity occurred
ActivityStatus: The activity status
ActivityTimeInMinutes: The total activity time in minutes
ActivityType: The type of activity, here "Submission" or "Grade"
AssignmentIdentifier: A unique numeric identifier assigned to the assignment
Content: Content associated with the activity
LMSSectionIdentifier: A unique numeric identifier assigned to the section
SourceSystem: The system code or name providing the user activity data
SourceSystemIdentifier: A unique number or alphanumeric code assigned to a
user activity by the source system
LMSUserIdentifier: A unique numeric identifier assigned to the user
CreateDate: Date this record was created in the extractor
LastModifiedDate: Date this record was last updated in the extractor
"""
assert "submissionHistory" in submissions_df.columns
assert "id" in submissions_df.columns
assert "courseId" in submissions_df.columns
assert "courseWorkId" in submissions_df.columns
# convert json-like submissionHistory string to list of dicts
submissions_df["submissionHistory"] = submissions_df["submissionHistory"].apply(lambda json_like: json.loads(json_like.replace("'", '"')))
submissions_df["AssignmentIdentifier"] = submissions_df[
["courseId", "courseWorkId"]
].agg("-".join, axis=1)
submissions_df = submissions_df[["id", "courseId", "courseWorkId", "submissionHistory", "AssignmentIdentifier", "CreateDate", "LastModifiedDate"]]
# explode submissionHistory lists into rows with other columns duplicated
history_df = submissions_df.explode(column="submissionHistory") # type: ignore
# expand submissionHistory dicts (stateHistory and gradeHistory) into their own columns
history_df = history_df["submissionHistory"].apply(Series).merge(history_df, left_index=True, right_index=True, how='outer')
history_df.drop(columns=["submissionHistory"], inplace=True)
# expand stateHistory (can assume exists, should always have at least one "CREATED" entry)
user_submission_df = concat([history_df, history_df["stateHistory"].apply(Series)], axis=1)
user_submission_df.dropna(subset=["stateHistory"], inplace=True)
# enrich stateHistory
user_submission_df["SourceSystemIdentifier"] = "S-" + user_submission_df[
["courseId", "courseWorkId", "id", "stateTimestamp"]
].agg("-".join, axis=1)
user_submission_df = user_submission_df[
[
"SourceSystemIdentifier",
"AssignmentIdentifier",
"stateTimestamp",
"state",
"courseId",
"actorUserId",
"CreateDate",
"LastModifiedDate"
]
]
user_submission_df = user_submission_df.rename(
columns={
"stateTimestamp": "ActivityDateTime",
"state": "ActivityStatus",
"courseId": "LMSSectionIdentifier",
"actorUserId": "LMSUserIdentifier",
}
)
user_submission_df["ActivityType"] = ACTIVITY_TYPE_STATE
# expand gradeHistory if exists
if "gradeHistory" in history_df:
grade_history_df = concat([history_df, history_df["gradeHistory"].apply(Series)], axis=1)
grade_history_df.dropna(subset=["gradeHistory"], inplace=True)
# enrich gradeHistory
grade_history_df["SourceSystemIdentifier"] = "G-" + grade_history_df[
["courseId", "courseWorkId", "id", "gradeTimestamp"]
].agg("-".join, axis=1)
grade_history_df = grade_history_df[
[
"SourceSystemIdentifier",
"AssignmentIdentifier",
"gradeTimestamp",
"gradeChangeType",
"courseId",
"actorUserId",
"CreateDate",
"LastModifiedDate"
]
]
grade_history_df = grade_history_df.rename(
columns={
"gradeTimestamp": "ActivityDateTime",
"gradeChangeType": "ActivityStatus",
"courseId": "LMSSectionIdentifier",
"actorUserId": "LMSUserIdentifier",
}
)
grade_history_df["ActivityType"] = ACTIVITY_TYPE_GRADE
# combine with stateHistory
user_submission_df = user_submission_df.append(grade_history_df)
# teacher actions can show up on student histories and vice-versa
user_submission_df.drop_duplicates(subset=["SourceSystemIdentifier"], inplace=True)
# finish with common columns
user_submission_df["ActivityTimeInMinutes"] = ""
user_submission_df["Content"] = ""
user_submission_df["SourceSystem"] = SOURCE_SYSTEM
user_submission_df["SourceCreateDate"] = "" # No create date available from API
user_submission_df["SourceLastModifiedDate"] = "" # No modified date available from API
# group by section id as a Dict of DataFrames
result: Dict[str, DataFrame] = dict(
tuple(user_submission_df.groupby(["LMSSectionIdentifier"]))
)
return result
|
flexible
|
{
"blob_id": "d6a760774b45454c959c2932d7b28deee7f81872",
"index": 318,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef submissions_to_user_submission_activities_dfs(submissions_df: DataFrame\n ) ->Dict[str, DataFrame]:\n \"\"\"\n Convert a Submission API DataFrame to a Dict of UserActivity\n UDM DataFrames grouped by source system section id.\n\n Parameters\n ----------\n submissions_df: DataFrame\n is a Submission API DataFrame\n\n Returns\n -------\n Dict[str, DataFrame] LMS UDM UserActivity DataFrames\n grouped by source system section id\n\n Notes\n -----\n UserActivity DataFrame columns are:\n ActivityDateTime: The date/time the activity occurred\n ActivityStatus: The activity status\n ActivityTimeInMinutes: The total activity time in minutes\n ActivityType: The type of activity, here \"Submission\" or \"Grade\"\n AssignmentIdentifier: A unique numeric identifier assigned to the assignment\n Content: Content associated with the activity\n LMSSectionIdentifier: A unique numeric identifier assigned to the section\n SourceSystem: The system code or name providing the user activity data\n SourceSystemIdentifier: A unique number or alphanumeric code assigned to a\n user activity by the source system\n LMSUserIdentifier: A unique numeric identifier assigned to the user\n CreateDate: Date this record was created in the extractor\n LastModifiedDate: Date this record was last updated in the extractor\n \"\"\"\n assert 'submissionHistory' in submissions_df.columns\n assert 'id' in submissions_df.columns\n assert 'courseId' in submissions_df.columns\n assert 'courseWorkId' in submissions_df.columns\n submissions_df['submissionHistory'] = submissions_df['submissionHistory'\n ].apply(lambda json_like: json.loads(json_like.replace(\"'\", '\"')))\n submissions_df['AssignmentIdentifier'] = submissions_df[['courseId',\n 'courseWorkId']].agg('-'.join, axis=1)\n submissions_df = submissions_df[['id', 'courseId', 'courseWorkId',\n 'submissionHistory', 'AssignmentIdentifier', 'CreateDate',\n 'LastModifiedDate']]\n history_df = submissions_df.explode(column='submissionHistory')\n history_df = history_df['submissionHistory'].apply(Series).merge(history_df\n , left_index=True, right_index=True, how='outer')\n history_df.drop(columns=['submissionHistory'], inplace=True)\n user_submission_df = concat([history_df, history_df['stateHistory'].\n apply(Series)], axis=1)\n user_submission_df.dropna(subset=['stateHistory'], inplace=True)\n user_submission_df['SourceSystemIdentifier'] = 'S-' + user_submission_df[[\n 'courseId', 'courseWorkId', 'id', 'stateTimestamp']].agg('-'.join,\n axis=1)\n user_submission_df = user_submission_df[['SourceSystemIdentifier',\n 'AssignmentIdentifier', 'stateTimestamp', 'state', 'courseId',\n 'actorUserId', 'CreateDate', 'LastModifiedDate']]\n user_submission_df = user_submission_df.rename(columns={\n 'stateTimestamp': 'ActivityDateTime', 'state': 'ActivityStatus',\n 'courseId': 'LMSSectionIdentifier', 'actorUserId': 'LMSUserIdentifier'}\n )\n user_submission_df['ActivityType'] = ACTIVITY_TYPE_STATE\n if 'gradeHistory' in history_df:\n grade_history_df = concat([history_df, history_df['gradeHistory'].\n apply(Series)], axis=1)\n grade_history_df.dropna(subset=['gradeHistory'], inplace=True)\n grade_history_df['SourceSystemIdentifier'] = 'G-' + grade_history_df[[\n 'courseId', 'courseWorkId', 'id', 'gradeTimestamp']].agg('-'.\n join, axis=1)\n grade_history_df = grade_history_df[['SourceSystemIdentifier',\n 'AssignmentIdentifier', 'gradeTimestamp', 'gradeChangeType',\n 'courseId', 'actorUserId', 'CreateDate', 'LastModifiedDate']]\n grade_history_df = grade_history_df.rename(columns={\n 'gradeTimestamp': 'ActivityDateTime', 'gradeChangeType':\n 'ActivityStatus', 'courseId': 'LMSSectionIdentifier',\n 'actorUserId': 'LMSUserIdentifier'})\n grade_history_df['ActivityType'] = ACTIVITY_TYPE_GRADE\n user_submission_df = user_submission_df.append(grade_history_df)\n user_submission_df.drop_duplicates(subset=['SourceSystemIdentifier'],\n inplace=True)\n user_submission_df['ActivityTimeInMinutes'] = ''\n user_submission_df['Content'] = ''\n user_submission_df['SourceSystem'] = SOURCE_SYSTEM\n user_submission_df['SourceCreateDate'] = ''\n user_submission_df['SourceLastModifiedDate'] = ''\n result: Dict[str, DataFrame] = dict(tuple(user_submission_df.groupby([\n 'LMSSectionIdentifier'])))\n return result\n",
"step-3": "<mask token>\nACTIVITY_TYPE_STATE = 'Submission State Change'\nACTIVITY_TYPE_GRADE = 'Submission Grade Change'\n\n\ndef submissions_to_user_submission_activities_dfs(submissions_df: DataFrame\n ) ->Dict[str, DataFrame]:\n \"\"\"\n Convert a Submission API DataFrame to a Dict of UserActivity\n UDM DataFrames grouped by source system section id.\n\n Parameters\n ----------\n submissions_df: DataFrame\n is a Submission API DataFrame\n\n Returns\n -------\n Dict[str, DataFrame] LMS UDM UserActivity DataFrames\n grouped by source system section id\n\n Notes\n -----\n UserActivity DataFrame columns are:\n ActivityDateTime: The date/time the activity occurred\n ActivityStatus: The activity status\n ActivityTimeInMinutes: The total activity time in minutes\n ActivityType: The type of activity, here \"Submission\" or \"Grade\"\n AssignmentIdentifier: A unique numeric identifier assigned to the assignment\n Content: Content associated with the activity\n LMSSectionIdentifier: A unique numeric identifier assigned to the section\n SourceSystem: The system code or name providing the user activity data\n SourceSystemIdentifier: A unique number or alphanumeric code assigned to a\n user activity by the source system\n LMSUserIdentifier: A unique numeric identifier assigned to the user\n CreateDate: Date this record was created in the extractor\n LastModifiedDate: Date this record was last updated in the extractor\n \"\"\"\n assert 'submissionHistory' in submissions_df.columns\n assert 'id' in submissions_df.columns\n assert 'courseId' in submissions_df.columns\n assert 'courseWorkId' in submissions_df.columns\n submissions_df['submissionHistory'] = submissions_df['submissionHistory'\n ].apply(lambda json_like: json.loads(json_like.replace(\"'\", '\"')))\n submissions_df['AssignmentIdentifier'] = submissions_df[['courseId',\n 'courseWorkId']].agg('-'.join, axis=1)\n submissions_df = submissions_df[['id', 'courseId', 'courseWorkId',\n 'submissionHistory', 'AssignmentIdentifier', 'CreateDate',\n 'LastModifiedDate']]\n history_df = submissions_df.explode(column='submissionHistory')\n history_df = history_df['submissionHistory'].apply(Series).merge(history_df\n , left_index=True, right_index=True, how='outer')\n history_df.drop(columns=['submissionHistory'], inplace=True)\n user_submission_df = concat([history_df, history_df['stateHistory'].\n apply(Series)], axis=1)\n user_submission_df.dropna(subset=['stateHistory'], inplace=True)\n user_submission_df['SourceSystemIdentifier'] = 'S-' + user_submission_df[[\n 'courseId', 'courseWorkId', 'id', 'stateTimestamp']].agg('-'.join,\n axis=1)\n user_submission_df = user_submission_df[['SourceSystemIdentifier',\n 'AssignmentIdentifier', 'stateTimestamp', 'state', 'courseId',\n 'actorUserId', 'CreateDate', 'LastModifiedDate']]\n user_submission_df = user_submission_df.rename(columns={\n 'stateTimestamp': 'ActivityDateTime', 'state': 'ActivityStatus',\n 'courseId': 'LMSSectionIdentifier', 'actorUserId': 'LMSUserIdentifier'}\n )\n user_submission_df['ActivityType'] = ACTIVITY_TYPE_STATE\n if 'gradeHistory' in history_df:\n grade_history_df = concat([history_df, history_df['gradeHistory'].\n apply(Series)], axis=1)\n grade_history_df.dropna(subset=['gradeHistory'], inplace=True)\n grade_history_df['SourceSystemIdentifier'] = 'G-' + grade_history_df[[\n 'courseId', 'courseWorkId', 'id', 'gradeTimestamp']].agg('-'.\n join, axis=1)\n grade_history_df = grade_history_df[['SourceSystemIdentifier',\n 'AssignmentIdentifier', 'gradeTimestamp', 'gradeChangeType',\n 'courseId', 'actorUserId', 'CreateDate', 'LastModifiedDate']]\n grade_history_df = grade_history_df.rename(columns={\n 'gradeTimestamp': 'ActivityDateTime', 'gradeChangeType':\n 'ActivityStatus', 'courseId': 'LMSSectionIdentifier',\n 'actorUserId': 'LMSUserIdentifier'})\n grade_history_df['ActivityType'] = ACTIVITY_TYPE_GRADE\n user_submission_df = user_submission_df.append(grade_history_df)\n user_submission_df.drop_duplicates(subset=['SourceSystemIdentifier'],\n inplace=True)\n user_submission_df['ActivityTimeInMinutes'] = ''\n user_submission_df['Content'] = ''\n user_submission_df['SourceSystem'] = SOURCE_SYSTEM\n user_submission_df['SourceCreateDate'] = ''\n user_submission_df['SourceLastModifiedDate'] = ''\n result: Dict[str, DataFrame] = dict(tuple(user_submission_df.groupby([\n 'LMSSectionIdentifier'])))\n return result\n",
"step-4": "import json\nfrom typing import Dict\nfrom pandas import DataFrame, concat, Series\nfrom edfi_google_classroom_extractor.mapping.constants import SOURCE_SYSTEM\nACTIVITY_TYPE_STATE = 'Submission State Change'\nACTIVITY_TYPE_GRADE = 'Submission Grade Change'\n\n\ndef submissions_to_user_submission_activities_dfs(submissions_df: DataFrame\n ) ->Dict[str, DataFrame]:\n \"\"\"\n Convert a Submission API DataFrame to a Dict of UserActivity\n UDM DataFrames grouped by source system section id.\n\n Parameters\n ----------\n submissions_df: DataFrame\n is a Submission API DataFrame\n\n Returns\n -------\n Dict[str, DataFrame] LMS UDM UserActivity DataFrames\n grouped by source system section id\n\n Notes\n -----\n UserActivity DataFrame columns are:\n ActivityDateTime: The date/time the activity occurred\n ActivityStatus: The activity status\n ActivityTimeInMinutes: The total activity time in minutes\n ActivityType: The type of activity, here \"Submission\" or \"Grade\"\n AssignmentIdentifier: A unique numeric identifier assigned to the assignment\n Content: Content associated with the activity\n LMSSectionIdentifier: A unique numeric identifier assigned to the section\n SourceSystem: The system code or name providing the user activity data\n SourceSystemIdentifier: A unique number or alphanumeric code assigned to a\n user activity by the source system\n LMSUserIdentifier: A unique numeric identifier assigned to the user\n CreateDate: Date this record was created in the extractor\n LastModifiedDate: Date this record was last updated in the extractor\n \"\"\"\n assert 'submissionHistory' in submissions_df.columns\n assert 'id' in submissions_df.columns\n assert 'courseId' in submissions_df.columns\n assert 'courseWorkId' in submissions_df.columns\n submissions_df['submissionHistory'] = submissions_df['submissionHistory'\n ].apply(lambda json_like: json.loads(json_like.replace(\"'\", '\"')))\n submissions_df['AssignmentIdentifier'] = submissions_df[['courseId',\n 'courseWorkId']].agg('-'.join, axis=1)\n submissions_df = submissions_df[['id', 'courseId', 'courseWorkId',\n 'submissionHistory', 'AssignmentIdentifier', 'CreateDate',\n 'LastModifiedDate']]\n history_df = submissions_df.explode(column='submissionHistory')\n history_df = history_df['submissionHistory'].apply(Series).merge(history_df\n , left_index=True, right_index=True, how='outer')\n history_df.drop(columns=['submissionHistory'], inplace=True)\n user_submission_df = concat([history_df, history_df['stateHistory'].\n apply(Series)], axis=1)\n user_submission_df.dropna(subset=['stateHistory'], inplace=True)\n user_submission_df['SourceSystemIdentifier'] = 'S-' + user_submission_df[[\n 'courseId', 'courseWorkId', 'id', 'stateTimestamp']].agg('-'.join,\n axis=1)\n user_submission_df = user_submission_df[['SourceSystemIdentifier',\n 'AssignmentIdentifier', 'stateTimestamp', 'state', 'courseId',\n 'actorUserId', 'CreateDate', 'LastModifiedDate']]\n user_submission_df = user_submission_df.rename(columns={\n 'stateTimestamp': 'ActivityDateTime', 'state': 'ActivityStatus',\n 'courseId': 'LMSSectionIdentifier', 'actorUserId': 'LMSUserIdentifier'}\n )\n user_submission_df['ActivityType'] = ACTIVITY_TYPE_STATE\n if 'gradeHistory' in history_df:\n grade_history_df = concat([history_df, history_df['gradeHistory'].\n apply(Series)], axis=1)\n grade_history_df.dropna(subset=['gradeHistory'], inplace=True)\n grade_history_df['SourceSystemIdentifier'] = 'G-' + grade_history_df[[\n 'courseId', 'courseWorkId', 'id', 'gradeTimestamp']].agg('-'.\n join, axis=1)\n grade_history_df = grade_history_df[['SourceSystemIdentifier',\n 'AssignmentIdentifier', 'gradeTimestamp', 'gradeChangeType',\n 'courseId', 'actorUserId', 'CreateDate', 'LastModifiedDate']]\n grade_history_df = grade_history_df.rename(columns={\n 'gradeTimestamp': 'ActivityDateTime', 'gradeChangeType':\n 'ActivityStatus', 'courseId': 'LMSSectionIdentifier',\n 'actorUserId': 'LMSUserIdentifier'})\n grade_history_df['ActivityType'] = ACTIVITY_TYPE_GRADE\n user_submission_df = user_submission_df.append(grade_history_df)\n user_submission_df.drop_duplicates(subset=['SourceSystemIdentifier'],\n inplace=True)\n user_submission_df['ActivityTimeInMinutes'] = ''\n user_submission_df['Content'] = ''\n user_submission_df['SourceSystem'] = SOURCE_SYSTEM\n user_submission_df['SourceCreateDate'] = ''\n user_submission_df['SourceLastModifiedDate'] = ''\n result: Dict[str, DataFrame] = dict(tuple(user_submission_df.groupby([\n 'LMSSectionIdentifier'])))\n return result\n",
"step-5": "# SPDX-License-Identifier: Apache-2.0\n# Licensed to the Ed-Fi Alliance under one or more agreements.\n# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.\n# See the LICENSE and NOTICES files in the project root for more information.\n\nimport json\nfrom typing import Dict\nfrom pandas import DataFrame, concat, Series\nfrom edfi_google_classroom_extractor.mapping.constants import SOURCE_SYSTEM\n\nACTIVITY_TYPE_STATE = \"Submission State Change\"\nACTIVITY_TYPE_GRADE = \"Submission Grade Change\"\n\n\ndef submissions_to_user_submission_activities_dfs(\n submissions_df: DataFrame,\n) -> Dict[str, DataFrame]:\n \"\"\"\n Convert a Submission API DataFrame to a Dict of UserActivity\n UDM DataFrames grouped by source system section id.\n\n Parameters\n ----------\n submissions_df: DataFrame\n is a Submission API DataFrame\n\n Returns\n -------\n Dict[str, DataFrame] LMS UDM UserActivity DataFrames\n grouped by source system section id\n\n Notes\n -----\n UserActivity DataFrame columns are:\n ActivityDateTime: The date/time the activity occurred\n ActivityStatus: The activity status\n ActivityTimeInMinutes: The total activity time in minutes\n ActivityType: The type of activity, here \"Submission\" or \"Grade\"\n AssignmentIdentifier: A unique numeric identifier assigned to the assignment\n Content: Content associated with the activity\n LMSSectionIdentifier: A unique numeric identifier assigned to the section\n SourceSystem: The system code or name providing the user activity data\n SourceSystemIdentifier: A unique number or alphanumeric code assigned to a\n user activity by the source system\n LMSUserIdentifier: A unique numeric identifier assigned to the user\n CreateDate: Date this record was created in the extractor\n LastModifiedDate: Date this record was last updated in the extractor\n \"\"\"\n assert \"submissionHistory\" in submissions_df.columns\n assert \"id\" in submissions_df.columns\n assert \"courseId\" in submissions_df.columns\n assert \"courseWorkId\" in submissions_df.columns\n\n # convert json-like submissionHistory string to list of dicts\n submissions_df[\"submissionHistory\"] = submissions_df[\"submissionHistory\"].apply(lambda json_like: json.loads(json_like.replace(\"'\", '\"')))\n submissions_df[\"AssignmentIdentifier\"] = submissions_df[\n [\"courseId\", \"courseWorkId\"]\n ].agg(\"-\".join, axis=1)\n\n submissions_df = submissions_df[[\"id\", \"courseId\", \"courseWorkId\", \"submissionHistory\", \"AssignmentIdentifier\", \"CreateDate\", \"LastModifiedDate\"]]\n\n # explode submissionHistory lists into rows with other columns duplicated\n history_df = submissions_df.explode(column=\"submissionHistory\") # type: ignore\n\n # expand submissionHistory dicts (stateHistory and gradeHistory) into their own columns\n history_df = history_df[\"submissionHistory\"].apply(Series).merge(history_df, left_index=True, right_index=True, how='outer')\n history_df.drop(columns=[\"submissionHistory\"], inplace=True)\n\n # expand stateHistory (can assume exists, should always have at least one \"CREATED\" entry)\n user_submission_df = concat([history_df, history_df[\"stateHistory\"].apply(Series)], axis=1)\n user_submission_df.dropna(subset=[\"stateHistory\"], inplace=True)\n\n # enrich stateHistory\n user_submission_df[\"SourceSystemIdentifier\"] = \"S-\" + user_submission_df[\n [\"courseId\", \"courseWorkId\", \"id\", \"stateTimestamp\"]\n ].agg(\"-\".join, axis=1)\n\n user_submission_df = user_submission_df[\n [\n \"SourceSystemIdentifier\",\n \"AssignmentIdentifier\",\n \"stateTimestamp\",\n \"state\",\n \"courseId\",\n \"actorUserId\",\n \"CreateDate\",\n \"LastModifiedDate\"\n ]\n ]\n\n user_submission_df = user_submission_df.rename(\n columns={\n \"stateTimestamp\": \"ActivityDateTime\",\n \"state\": \"ActivityStatus\",\n \"courseId\": \"LMSSectionIdentifier\",\n \"actorUserId\": \"LMSUserIdentifier\",\n }\n )\n\n user_submission_df[\"ActivityType\"] = ACTIVITY_TYPE_STATE\n\n # expand gradeHistory if exists\n if \"gradeHistory\" in history_df:\n grade_history_df = concat([history_df, history_df[\"gradeHistory\"].apply(Series)], axis=1)\n grade_history_df.dropna(subset=[\"gradeHistory\"], inplace=True)\n\n # enrich gradeHistory\n grade_history_df[\"SourceSystemIdentifier\"] = \"G-\" + grade_history_df[\n [\"courseId\", \"courseWorkId\", \"id\", \"gradeTimestamp\"]\n ].agg(\"-\".join, axis=1)\n\n grade_history_df = grade_history_df[\n [\n \"SourceSystemIdentifier\",\n \"AssignmentIdentifier\",\n \"gradeTimestamp\",\n \"gradeChangeType\",\n \"courseId\",\n \"actorUserId\",\n \"CreateDate\",\n \"LastModifiedDate\"\n ]\n ]\n\n grade_history_df = grade_history_df.rename(\n columns={\n \"gradeTimestamp\": \"ActivityDateTime\",\n \"gradeChangeType\": \"ActivityStatus\",\n \"courseId\": \"LMSSectionIdentifier\",\n \"actorUserId\": \"LMSUserIdentifier\",\n }\n )\n\n grade_history_df[\"ActivityType\"] = ACTIVITY_TYPE_GRADE\n\n # combine with stateHistory\n user_submission_df = user_submission_df.append(grade_history_df)\n\n # teacher actions can show up on student histories and vice-versa\n user_submission_df.drop_duplicates(subset=[\"SourceSystemIdentifier\"], inplace=True)\n\n # finish with common columns\n user_submission_df[\"ActivityTimeInMinutes\"] = \"\"\n user_submission_df[\"Content\"] = \"\"\n user_submission_df[\"SourceSystem\"] = SOURCE_SYSTEM\n user_submission_df[\"SourceCreateDate\"] = \"\" # No create date available from API\n user_submission_df[\"SourceLastModifiedDate\"] = \"\" # No modified date available from API\n\n # group by section id as a Dict of DataFrames\n result: Dict[str, DataFrame] = dict(\n tuple(user_submission_df.groupby([\"LMSSectionIdentifier\"]))\n )\n\n return result\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# The following code causes an infinite loop. Can you figure out what’s missing and how to fix it?
# def print_range(start, end):
# # Loop through the numbers from start to end
# n = start
# while n <= end:
# print(n)
# print_range(1, 5) # Should print 1 2 3 4 5 (each number on its own line)
# Solution
# Variable n's value is not being incremented. We need to increment the value.
# Here is the example
def print_range(start, end):
# Loop through the numbers from start to end
n = start
while n <= end:
print(n)
n+=1
print_range(1, 5) # Should print 1 2 3 4 5 (each number on its own line)
|
normal
|
{
"blob_id": "05454cc6c9961aa5e0de6979bb546342f5bd7b79",
"index": 3321,
"step-1": "# The following code causes an infinite loop. Can you figure out what’s missing and how to fix it?\n\n# def print_range(start, end):\n# \t# Loop through the numbers from start to end\n# \tn = start\n# \twhile n <= end:\n# \t\tprint(n)\n\n# print_range(1, 5) # Should print 1 2 3 4 5 (each number on its own line) \n\n# Solution\n# Variable n's value is not being incremented. We need to increment the value.\n# Here is the example\n\n\ndef print_range(start, end):\n\t# Loop through the numbers from start to end\n\tn = start\n \n\twhile n <= end:\n\t\tprint(n)\n n+=1 \n\nprint_range(1, 5) # Should print 1 2 3 4 5 (each number on its own line) ",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from typing import List, Optional
from backend.domain.well import FacilityState, Well
from backend.repository.persistence.well import WellPersistenceSchema
class WellRepository:
schema = WellPersistenceSchema()
def __init__(self, db):
self._db = db
def list(self) -> List[Well]:
return [self.schema.load(doc) for doc in self._db.wells.find({})]
def save_many(self, wells: List[Well]):
self._db.wells.insert_many([self.schema.dump(well) for well in wells])
def filter_by_facility_status(self, statuses: List[FacilityState]) -> List[Well]:
return [
self.schema.load(doc)
for doc in self._db.wells.find({"facility.lifecycle.name": {"$in": [status.value for status in statuses]}})
]
def find_well_by_facility_id(self, identifier: str) -> Optional[Well]:
doc = self._db.wells.find_one({"facility.id": identifier})
if doc:
return self.schema.load(doc)
|
normal
|
{
"blob_id": "5a181b0c22faa47c6c887daac675dd7374037f30",
"index": 3056,
"step-1": "<mask token>\n\n\nclass WellRepository:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass WellRepository:\n <mask token>\n\n def __init__(self, db):\n self._db = db\n\n def list(self) ->List[Well]:\n return [self.schema.load(doc) for doc in self._db.wells.find({})]\n\n def save_many(self, wells: List[Well]):\n self._db.wells.insert_many([self.schema.dump(well) for well in wells])\n\n def filter_by_facility_status(self, statuses: List[FacilityState]) ->List[\n Well]:\n return [self.schema.load(doc) for doc in self._db.wells.find({\n 'facility.lifecycle.name': {'$in': [status.value for status in\n statuses]}})]\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass WellRepository:\n schema = WellPersistenceSchema()\n\n def __init__(self, db):\n self._db = db\n\n def list(self) ->List[Well]:\n return [self.schema.load(doc) for doc in self._db.wells.find({})]\n\n def save_many(self, wells: List[Well]):\n self._db.wells.insert_many([self.schema.dump(well) for well in wells])\n\n def filter_by_facility_status(self, statuses: List[FacilityState]) ->List[\n Well]:\n return [self.schema.load(doc) for doc in self._db.wells.find({\n 'facility.lifecycle.name': {'$in': [status.value for status in\n statuses]}})]\n\n def find_well_by_facility_id(self, identifier: str) ->Optional[Well]:\n doc = self._db.wells.find_one({'facility.id': identifier})\n if doc:\n return self.schema.load(doc)\n",
"step-4": "from typing import List, Optional\nfrom backend.domain.well import FacilityState, Well\nfrom backend.repository.persistence.well import WellPersistenceSchema\n\n\nclass WellRepository:\n schema = WellPersistenceSchema()\n\n def __init__(self, db):\n self._db = db\n\n def list(self) ->List[Well]:\n return [self.schema.load(doc) for doc in self._db.wells.find({})]\n\n def save_many(self, wells: List[Well]):\n self._db.wells.insert_many([self.schema.dump(well) for well in wells])\n\n def filter_by_facility_status(self, statuses: List[FacilityState]) ->List[\n Well]:\n return [self.schema.load(doc) for doc in self._db.wells.find({\n 'facility.lifecycle.name': {'$in': [status.value for status in\n statuses]}})]\n\n def find_well_by_facility_id(self, identifier: str) ->Optional[Well]:\n doc = self._db.wells.find_one({'facility.id': identifier})\n if doc:\n return self.schema.load(doc)\n",
"step-5": "from typing import List, Optional\n\nfrom backend.domain.well import FacilityState, Well\nfrom backend.repository.persistence.well import WellPersistenceSchema\n\n\nclass WellRepository:\n schema = WellPersistenceSchema()\n\n def __init__(self, db):\n self._db = db\n\n def list(self) -> List[Well]:\n return [self.schema.load(doc) for doc in self._db.wells.find({})]\n\n def save_many(self, wells: List[Well]):\n self._db.wells.insert_many([self.schema.dump(well) for well in wells])\n\n def filter_by_facility_status(self, statuses: List[FacilityState]) -> List[Well]:\n return [\n self.schema.load(doc)\n for doc in self._db.wells.find({\"facility.lifecycle.name\": {\"$in\": [status.value for status in statuses]}})\n ]\n\n def find_well_by_facility_id(self, identifier: str) -> Optional[Well]:\n doc = self._db.wells.find_one({\"facility.id\": identifier})\n if doc:\n return self.schema.load(doc)\n",
"step-ids": [
1,
5,
7,
8,
9
]
}
|
[
1,
5,
7,
8,
9
] |
#!/usr/bin/python
import gzip
import os
infiles = []
ids=[]
ages=[]
with open('all_C_metadata.txt') as f:
f.readline()
f.readline()
for line in f:
infiles.append(line.split('\t')[0])
ids.append(line.split('\t')[1])
ages.append(line.split('\t')[2])
with open('all_C_samples/diversity.txt', 'w') as of:
#this stuff is specific to what i used if for before - not sure if you will need it
of.write('sample'+'\t' + 'age' + '\t' + 'd50' + '\n')
for i in range(len(infiles)):
infile = infiles[i]
os.system('gunzip -k %s'%infile)
with open(infile[:-3]) as f:
print infile
d50_not_reached=1
d50_clone=0
clone_count=0
read_count=0
total_clones=0
f.readline()
for line in f:
total_clones+=1
read_count+=float(line.strip().split('\t')[1])
clone_count+=1
if read_count>=.5 and d50_not_reached:
d50_clone=clone_count
d50_not_reached=0
os.system('rm %s'%infile[:-3])
of.write(ids[i] + '\t' + ages[i] + '\t' + str(d50_clone/float(total_clones))+'\n')
def d50(clones, num_Reads):
"""
clones should be a dict of clones
num_Reads is a property of a rep_seq object, so you can just
pass that if you are finding the d50 of the whole repertoire.
However, I don't think it is a property of each VJ pair, but you can pretty
easily calculate it with something like len(Reads_split_by_VJ[the_VJ_pair] )
This function will determine what percent of the top clones
make up 50% of reads (i.e. do the top X% of clones make up
50 % of reads? )
"""
d50_amount = num_Reads/2
read_count=0
for i in clones:
read_count+=clones[i].num_reads
if read_count>=d50_amount:
return i/float(len(clones))
|
normal
|
{
"blob_id": "c02f46e8d89dd4b141c86df461ecbb8ed608b61b",
"index": 7826,
"step-1": " #!/usr/bin/python\n\nimport gzip\nimport os\n\ninfiles = []\nids=[]\nages=[]\nwith open('all_C_metadata.txt') as f:\n f.readline()\n f.readline()\n for line in f:\n infiles.append(line.split('\\t')[0])\n ids.append(line.split('\\t')[1])\n ages.append(line.split('\\t')[2])\n\nwith open('all_C_samples/diversity.txt', 'w') as of:\n\n #this stuff is specific to what i used if for before - not sure if you will need it\n of.write('sample'+'\\t' + 'age' + '\\t' + 'd50' + '\\n')\n for i in range(len(infiles)):\n infile = infiles[i]\n os.system('gunzip -k %s'%infile)\n\n with open(infile[:-3]) as f:\n print infile\n d50_not_reached=1\n d50_clone=0\n clone_count=0\n read_count=0\n total_clones=0\n f.readline()\n for line in f:\n total_clones+=1\n read_count+=float(line.strip().split('\\t')[1])\n clone_count+=1\n if read_count>=.5 and d50_not_reached:\n d50_clone=clone_count\n d50_not_reached=0\n os.system('rm %s'%infile[:-3])\n of.write(ids[i] + '\\t' + ages[i] + '\\t' + str(d50_clone/float(total_clones))+'\\n')\n\n\n\n\ndef d50(clones, num_Reads):\n \"\"\"\n clones should be a dict of clones\n\n num_Reads is a property of a rep_seq object, so you can just \n pass that if you are finding the d50 of the whole repertoire.\n However, I don't think it is a property of each VJ pair, but you can pretty\n easily calculate it with something like len(Reads_split_by_VJ[the_VJ_pair] )\n\n This function will determine what percent of the top clones\n make up 50% of reads (i.e. do the top X% of clones make up\n 50 % of reads? )\n\n\n \"\"\" \n\n\n d50_amount = num_Reads/2\n read_count=0\n for i in clones:\n read_count+=clones[i].num_reads\n if read_count>=d50_amount:\n return i/float(len(clones))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from . import find_resault
from . import sql
|
normal
|
{
"blob_id": "6f05d1915cd2e123dd72233b59d4de43fd724035",
"index": 7743,
"step-1": "<mask token>\n",
"step-2": "from . import find_resault\nfrom . import sql\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from keyboards import *
from DB import cur, conn
from bot_token import bot
from limit_text import limit_text
def send_answer(question_id, answer_owner, receiver_tel_id, short):
answer = cur.execute('''SELECT answer FROM Answers WHERE question_id = (%s) AND tel_id = (%s)''', (question_id, answer_owner)).fetchone()
keyboard = telebot.types.InlineKeyboardMarkup()
if answer is not None:
id, question_id, tel_id, answer, accepted_answer, rate_answer, photo, document, document_type, document_size, send_date = cur.execute(
'''SELECT * FROM Answers WHERE question_id = (%s) AND tel_id = (%s)''',
(question_id, answer_owner)).fetchone()
question_owner = \
cur.execute('''SELECT tel_id FROM Questions WHERE id = (%s)''', (question_id, )).fetchone()[0]
# Limiting Long Questions and specifying keyboard accordingly
# GETTING ADMINS AND TAs
role = cur.execute('''SELECT role FROM Users WHERE tel_id = (%s)''', (answer_owner, )).fetchone()[0]
# This flag is used at the bottom for Admin and TAs keyboard setting
short_message_flag = False
# Setting keyboard
if limit_text(answer):
short_message_flag = True
# SHOWMORE key
if short:
answer = limit_text(answer)
showkey = showmore
else:
showkey = showless
if receiver_tel_id == question_owner:
if accepted_answer:
keyboard.add(showkey)
else:
keyboard.add(showkey, accept_answer, next_page_answer)
else:
# FOLLOWERs and Answer Owner only get a show more key
keyboard.add(showkey)
else:
if receiver_tel_id == question_owner:
if not accepted_answer:
if question_owner == receiver_tel_id:
keyboard.add(accept_answer, next_page_answer)
# ATTACHMENTs
if photo is not None:
keyboard.add(photo_button)
if document is not None:
document_button = telebot.types.InlineKeyboardButton(emoji.emojize(':paperclip: {0} ({1})'
.format(document_type, document_size)), callback_data='document')
keyboard.add(document_button)
# SETTING EMOJI BASED ON ACCEPTED OR NOT ACCEPTED ANSWER
if role in ['STUDENT', 'TA']:
if accepted_answer:
answer = emoji.emojize(':white_heavy_check_mark: #A_') + str(question_id) + ' #' + \
str(answer_owner) + '\n\n' + answer + emoji.emojize('\n\n:high_voltage: Rated: {0}/5'.format(rate_answer))
else:
answer = emoji.emojize(':bright_button: #A_') + str(question_id) + ' #' + str(answer_owner) + '\n\n' + answer
if role == 'TA':
answer += emoji.emojize('\n\n:bust_in_silhouette: Sent by ') + role
## ADMINs AND TAs answers are indicated with a flag
elif role in ['ADMIN']:
question_state = cur.execute('''SELECT status FROM Questions WHERE id = (%s)''', (question_id,)).fetchone()[0]
# ADMIN Answers are different
keyboard = telebot.types.InlineKeyboardMarkup()
if short_message_flag:
# SHOWMORE key
if short:
showkey = showmore
else:
showkey = showless
keyboard.add(showkey)
else:
keyboard = None
# ATTACHMENTs
if photo is not None:
keyboard.add(photo_button)
if document is not None:
document_button = telebot.types.InlineKeyboardButton(emoji.emojize(':paperclip: {0} ({1})'.format(document_type,
document_size)), callback_data='document')
keyboard.add(document_button)
answer = emoji.emojize(':collision: #A_') + str(question_id) + ' #' + str(answer_owner) + '\n\n' \
+ answer + emoji.emojize('\n\n:bust_in_silhouette: Sent by ') + role
# Returning Answer and Two Keyboards
return (answer, keyboard)
|
normal
|
{
"blob_id": "464fc2c193769eee86a639f73b933d5413be2b87",
"index": 3396,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef send_answer(question_id, answer_owner, receiver_tel_id, short):\n answer = cur.execute(\n 'SELECT answer FROM Answers WHERE question_id = (%s) AND tel_id = (%s)'\n , (question_id, answer_owner)).fetchone()\n keyboard = telebot.types.InlineKeyboardMarkup()\n if answer is not None:\n (id, question_id, tel_id, answer, accepted_answer, rate_answer,\n photo, document, document_type, document_size, send_date) = (cur\n .execute(\n 'SELECT * FROM Answers WHERE question_id = (%s) AND tel_id = (%s)',\n (question_id, answer_owner)).fetchone())\n question_owner = cur.execute(\n 'SELECT tel_id FROM Questions WHERE id = (%s)', (question_id,)\n ).fetchone()[0]\n role = cur.execute('SELECT role FROM Users WHERE tel_id = (%s)', (\n answer_owner,)).fetchone()[0]\n short_message_flag = False\n if limit_text(answer):\n short_message_flag = True\n if short:\n answer = limit_text(answer)\n showkey = showmore\n else:\n showkey = showless\n if receiver_tel_id == question_owner:\n if accepted_answer:\n keyboard.add(showkey)\n else:\n keyboard.add(showkey, accept_answer, next_page_answer)\n else:\n keyboard.add(showkey)\n elif receiver_tel_id == question_owner:\n if not accepted_answer:\n if question_owner == receiver_tel_id:\n keyboard.add(accept_answer, next_page_answer)\n if photo is not None:\n keyboard.add(photo_button)\n if document is not None:\n document_button = telebot.types.InlineKeyboardButton(emoji.\n emojize(':paperclip: {0} ({1})'.format(document_type,\n document_size)), callback_data='document')\n keyboard.add(document_button)\n if role in ['STUDENT', 'TA']:\n if accepted_answer:\n answer = emoji.emojize(':white_heavy_check_mark: #A_') + str(\n question_id) + ' #' + str(answer_owner\n ) + '\\n\\n' + answer + emoji.emojize(\n \"\"\"\n\n:high_voltage: Rated: {0}/5\"\"\".format(rate_answer))\n else:\n answer = emoji.emojize(':bright_button: #A_') + str(question_id\n ) + ' #' + str(answer_owner) + '\\n\\n' + answer\n if role == 'TA':\n answer += emoji.emojize('\\n\\n:bust_in_silhouette: Sent by '\n ) + role\n elif role in ['ADMIN']:\n question_state = cur.execute(\n 'SELECT status FROM Questions WHERE id = (%s)', (question_id,)\n ).fetchone()[0]\n keyboard = telebot.types.InlineKeyboardMarkup()\n if short_message_flag:\n if short:\n showkey = showmore\n else:\n showkey = showless\n keyboard.add(showkey)\n else:\n keyboard = None\n if photo is not None:\n keyboard.add(photo_button)\n if document is not None:\n document_button = telebot.types.InlineKeyboardButton(emoji.\n emojize(':paperclip: {0} ({1})'.format(document_type,\n document_size)), callback_data='document')\n keyboard.add(document_button)\n answer = emoji.emojize(':collision: #A_') + str(question_id\n ) + ' #' + str(answer_owner) + '\\n\\n' + answer + emoji.emojize(\n \"\"\"\n\n:bust_in_silhouette: Sent by \"\"\") + role\n return answer, keyboard\n",
"step-3": "from keyboards import *\nfrom DB import cur, conn\nfrom bot_token import bot\nfrom limit_text import limit_text\n\n\ndef send_answer(question_id, answer_owner, receiver_tel_id, short):\n answer = cur.execute(\n 'SELECT answer FROM Answers WHERE question_id = (%s) AND tel_id = (%s)'\n , (question_id, answer_owner)).fetchone()\n keyboard = telebot.types.InlineKeyboardMarkup()\n if answer is not None:\n (id, question_id, tel_id, answer, accepted_answer, rate_answer,\n photo, document, document_type, document_size, send_date) = (cur\n .execute(\n 'SELECT * FROM Answers WHERE question_id = (%s) AND tel_id = (%s)',\n (question_id, answer_owner)).fetchone())\n question_owner = cur.execute(\n 'SELECT tel_id FROM Questions WHERE id = (%s)', (question_id,)\n ).fetchone()[0]\n role = cur.execute('SELECT role FROM Users WHERE tel_id = (%s)', (\n answer_owner,)).fetchone()[0]\n short_message_flag = False\n if limit_text(answer):\n short_message_flag = True\n if short:\n answer = limit_text(answer)\n showkey = showmore\n else:\n showkey = showless\n if receiver_tel_id == question_owner:\n if accepted_answer:\n keyboard.add(showkey)\n else:\n keyboard.add(showkey, accept_answer, next_page_answer)\n else:\n keyboard.add(showkey)\n elif receiver_tel_id == question_owner:\n if not accepted_answer:\n if question_owner == receiver_tel_id:\n keyboard.add(accept_answer, next_page_answer)\n if photo is not None:\n keyboard.add(photo_button)\n if document is not None:\n document_button = telebot.types.InlineKeyboardButton(emoji.\n emojize(':paperclip: {0} ({1})'.format(document_type,\n document_size)), callback_data='document')\n keyboard.add(document_button)\n if role in ['STUDENT', 'TA']:\n if accepted_answer:\n answer = emoji.emojize(':white_heavy_check_mark: #A_') + str(\n question_id) + ' #' + str(answer_owner\n ) + '\\n\\n' + answer + emoji.emojize(\n \"\"\"\n\n:high_voltage: Rated: {0}/5\"\"\".format(rate_answer))\n else:\n answer = emoji.emojize(':bright_button: #A_') + str(question_id\n ) + ' #' + str(answer_owner) + '\\n\\n' + answer\n if role == 'TA':\n answer += emoji.emojize('\\n\\n:bust_in_silhouette: Sent by '\n ) + role\n elif role in ['ADMIN']:\n question_state = cur.execute(\n 'SELECT status FROM Questions WHERE id = (%s)', (question_id,)\n ).fetchone()[0]\n keyboard = telebot.types.InlineKeyboardMarkup()\n if short_message_flag:\n if short:\n showkey = showmore\n else:\n showkey = showless\n keyboard.add(showkey)\n else:\n keyboard = None\n if photo is not None:\n keyboard.add(photo_button)\n if document is not None:\n document_button = telebot.types.InlineKeyboardButton(emoji.\n emojize(':paperclip: {0} ({1})'.format(document_type,\n document_size)), callback_data='document')\n keyboard.add(document_button)\n answer = emoji.emojize(':collision: #A_') + str(question_id\n ) + ' #' + str(answer_owner) + '\\n\\n' + answer + emoji.emojize(\n \"\"\"\n\n:bust_in_silhouette: Sent by \"\"\") + role\n return answer, keyboard\n",
"step-4": "from keyboards import *\nfrom DB import cur, conn\nfrom bot_token import bot\nfrom limit_text import limit_text\n\ndef send_answer(question_id, answer_owner, receiver_tel_id, short):\n\n answer = cur.execute('''SELECT answer FROM Answers WHERE question_id = (%s) AND tel_id = (%s)''', (question_id, answer_owner)).fetchone()\n keyboard = telebot.types.InlineKeyboardMarkup()\n\n if answer is not None:\n id, question_id, tel_id, answer, accepted_answer, rate_answer, photo, document, document_type, document_size, send_date = cur.execute(\n '''SELECT * FROM Answers WHERE question_id = (%s) AND tel_id = (%s)''',\n (question_id, answer_owner)).fetchone()\n\n question_owner = \\\n cur.execute('''SELECT tel_id FROM Questions WHERE id = (%s)''', (question_id, )).fetchone()[0]\n # Limiting Long Questions and specifying keyboard accordingly\n\n # GETTING ADMINS AND TAs\n role = cur.execute('''SELECT role FROM Users WHERE tel_id = (%s)''', (answer_owner, )).fetchone()[0]\n\n # This flag is used at the bottom for Admin and TAs keyboard setting\n short_message_flag = False\n # Setting keyboard\n if limit_text(answer):\n short_message_flag = True\n # SHOWMORE key\n if short:\n answer = limit_text(answer)\n showkey = showmore\n else:\n showkey = showless\n\n if receiver_tel_id == question_owner:\n if accepted_answer:\n keyboard.add(showkey)\n else:\n keyboard.add(showkey, accept_answer, next_page_answer)\n else:\n # FOLLOWERs and Answer Owner only get a show more key\n keyboard.add(showkey)\n else:\n if receiver_tel_id == question_owner:\n if not accepted_answer:\n if question_owner == receiver_tel_id:\n keyboard.add(accept_answer, next_page_answer)\n\n # ATTACHMENTs\n if photo is not None:\n keyboard.add(photo_button)\n if document is not None:\n document_button = telebot.types.InlineKeyboardButton(emoji.emojize(':paperclip: {0} ({1})'\n .format(document_type, document_size)), callback_data='document')\n keyboard.add(document_button)\n\n # SETTING EMOJI BASED ON ACCEPTED OR NOT ACCEPTED ANSWER\n if role in ['STUDENT', 'TA']:\n if accepted_answer:\n answer = emoji.emojize(':white_heavy_check_mark: #A_') + str(question_id) + ' #' + \\\n str(answer_owner) + '\\n\\n' + answer + emoji.emojize('\\n\\n:high_voltage: Rated: {0}/5'.format(rate_answer))\n\n else:\n answer = emoji.emojize(':bright_button: #A_') + str(question_id) + ' #' + str(answer_owner) + '\\n\\n' + answer\n\n if role == 'TA':\n answer += emoji.emojize('\\n\\n:bust_in_silhouette: Sent by ') + role\n\n ## ADMINs AND TAs answers are indicated with a flag\n elif role in ['ADMIN']:\n question_state = cur.execute('''SELECT status FROM Questions WHERE id = (%s)''', (question_id,)).fetchone()[0]\n\n # ADMIN Answers are different\n keyboard = telebot.types.InlineKeyboardMarkup()\n if short_message_flag:\n # SHOWMORE key\n if short:\n showkey = showmore\n else:\n showkey = showless\n\n keyboard.add(showkey)\n else:\n keyboard = None\n\n # ATTACHMENTs\n if photo is not None:\n keyboard.add(photo_button)\n if document is not None:\n document_button = telebot.types.InlineKeyboardButton(emoji.emojize(':paperclip: {0} ({1})'.format(document_type,\n document_size)), callback_data='document')\n keyboard.add(document_button)\n\n answer = emoji.emojize(':collision: #A_') + str(question_id) + ' #' + str(answer_owner) + '\\n\\n' \\\n + answer + emoji.emojize('\\n\\n:bust_in_silhouette: Sent by ') + role\n\n # Returning Answer and Two Keyboards\n return (answer, keyboard)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/python
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
setup(
name="isc-dhcpd-parser",
version="0.1",
description="Parser for isc-dhcp config files (dhcpd.conf)",
author="Pavel Podkorytov",
author_email="[email protected]",
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
],
packages=find_packages(),
scripts=["bin/isc_dhcpd_leases.py"],
install_requires=["ply"],
)
|
normal
|
{
"blob_id": "79141679bb2839de9d4a25b6c6c285905dddbb0d",
"index": 6460,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='isc-dhcpd-parser', version='0.1', description=\n 'Parser for isc-dhcp config files (dhcpd.conf)', author=\n 'Pavel Podkorytov', author_email='[email protected]', classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3'], packages=find_packages(),\n scripts=['bin/isc_dhcpd_leases.py'], install_requires=['ply'])\n",
"step-3": "from setuptools import setup, find_packages\nsetup(name='isc-dhcpd-parser', version='0.1', description=\n 'Parser for isc-dhcp config files (dhcpd.conf)', author=\n 'Pavel Podkorytov', author_email='[email protected]', classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3'], packages=find_packages(),\n scripts=['bin/isc_dhcpd_leases.py'], install_requires=['ply'])\n",
"step-4": "#!/usr/bin/python\n\n# Always prefer setuptools over distutils\nfrom setuptools import setup, find_packages\n\nsetup(\n name=\"isc-dhcpd-parser\",\n version=\"0.1\",\n description=\"Parser for isc-dhcp config files (dhcpd.conf)\",\n author=\"Pavel Podkorytov\",\n author_email=\"[email protected]\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 3\",\n ],\n packages=find_packages(),\n scripts=[\"bin/isc_dhcpd_leases.py\"],\n install_requires=[\"ply\"],\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class ExecuteCommandTest(TestBase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ExecuteCommandTest(TestBase):
def setUp(self):
super(ExecuteCommandTest, self).setUp()
self.cwd = os.path.join(os.path.dirname(__file__), '../../..')
self.logger = Mock()
MonkeyPatcher.patch(action, 'create_background_logger', Mock(
return_value=self.logger))
<|reserved_special_token_0|>
def create_stdout(self, data):
l = ['SHELF_EVENT={0}'.format(data['event']), 'SHELF_URI={0}'.
format(data['uri']), 'SHELF_META_URI={0}'.format(data['meta_uri'])]
return ', '.join(l)
def test_success(self):
data = self.create_data('./tests/bin/hook-test', Event.
ARTIFACT_UPLOADED)
result = action.execute_command(**data)
self.assertTrue(result)
expected_result = {'stdout': self.create_stdout(data), 'stderr':
'STDERR', 'exit_code': 0}
self.logger.debug.assert_called_with('Command Result: {0}'.format(
json.dumps(expected_result, indent=4)))
def test_failure(self):
data = self.create_data('./tests/bin/hook-test', 'fail')
result = action.execute_command(**data)
self.assertFalse(result)
expected_result = {'stdout': self.create_stdout(data), 'stderr':
'STDERR', 'exit_code': 1}
self.logger.debug.assert_called_with('Command Result: {0}'.format(
json.dumps(expected_result, indent=4)))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ExecuteCommandTest(TestBase):
def setUp(self):
super(ExecuteCommandTest, self).setUp()
self.cwd = os.path.join(os.path.dirname(__file__), '../../..')
self.logger = Mock()
MonkeyPatcher.patch(action, 'create_background_logger', Mock(
return_value=self.logger))
def create_data(self, command, event):
data = {'command': command, 'log_level': logging.DEBUG, 'event':
event, 'uri': 'https://api.shelf.com/fake/artifact/1',
'meta_uri': 'https://api.shelf.com/fake/artifact/1/_meta',
'cwd': self.cwd}
return data
def create_stdout(self, data):
l = ['SHELF_EVENT={0}'.format(data['event']), 'SHELF_URI={0}'.
format(data['uri']), 'SHELF_META_URI={0}'.format(data['meta_uri'])]
return ', '.join(l)
def test_success(self):
data = self.create_data('./tests/bin/hook-test', Event.
ARTIFACT_UPLOADED)
result = action.execute_command(**data)
self.assertTrue(result)
expected_result = {'stdout': self.create_stdout(data), 'stderr':
'STDERR', 'exit_code': 0}
self.logger.debug.assert_called_with('Command Result: {0}'.format(
json.dumps(expected_result, indent=4)))
def test_failure(self):
data = self.create_data('./tests/bin/hook-test', 'fail')
result = action.execute_command(**data)
self.assertFalse(result)
expected_result = {'stdout': self.create_stdout(data), 'stderr':
'STDERR', 'exit_code': 1}
self.logger.debug.assert_called_with('Command Result: {0}'.format(
json.dumps(expected_result, indent=4)))
<|reserved_special_token_1|>
from mock import Mock
from shelf.hook.background import action
from shelf.hook.event import Event
from tests.test_base import TestBase
import json
import os
import logging
from pyproctor import MonkeyPatcher
class ExecuteCommandTest(TestBase):
def setUp(self):
super(ExecuteCommandTest, self).setUp()
self.cwd = os.path.join(os.path.dirname(__file__), '../../..')
self.logger = Mock()
MonkeyPatcher.patch(action, 'create_background_logger', Mock(
return_value=self.logger))
def create_data(self, command, event):
data = {'command': command, 'log_level': logging.DEBUG, 'event':
event, 'uri': 'https://api.shelf.com/fake/artifact/1',
'meta_uri': 'https://api.shelf.com/fake/artifact/1/_meta',
'cwd': self.cwd}
return data
def create_stdout(self, data):
l = ['SHELF_EVENT={0}'.format(data['event']), 'SHELF_URI={0}'.
format(data['uri']), 'SHELF_META_URI={0}'.format(data['meta_uri'])]
return ', '.join(l)
def test_success(self):
data = self.create_data('./tests/bin/hook-test', Event.
ARTIFACT_UPLOADED)
result = action.execute_command(**data)
self.assertTrue(result)
expected_result = {'stdout': self.create_stdout(data), 'stderr':
'STDERR', 'exit_code': 0}
self.logger.debug.assert_called_with('Command Result: {0}'.format(
json.dumps(expected_result, indent=4)))
def test_failure(self):
data = self.create_data('./tests/bin/hook-test', 'fail')
result = action.execute_command(**data)
self.assertFalse(result)
expected_result = {'stdout': self.create_stdout(data), 'stderr':
'STDERR', 'exit_code': 1}
self.logger.debug.assert_called_with('Command Result: {0}'.format(
json.dumps(expected_result, indent=4)))
<|reserved_special_token_1|>
from mock import Mock
from shelf.hook.background import action
from shelf.hook.event import Event
from tests.test_base import TestBase
import json
import os
import logging
from pyproctor import MonkeyPatcher
class ExecuteCommandTest(TestBase):
def setUp(self):
super(ExecuteCommandTest, self).setUp()
self.cwd = os.path.join(os.path.dirname(__file__), "../../..")
self.logger = Mock()
MonkeyPatcher.patch(action, "create_background_logger", Mock(return_value=self.logger))
def create_data(self, command, event):
data = {
"command": command,
"log_level": logging.DEBUG,
"event": event,
"uri": "https://api.shelf.com/fake/artifact/1",
"meta_uri": "https://api.shelf.com/fake/artifact/1/_meta",
"cwd": self.cwd
}
return data
def create_stdout(self, data):
l = [
"SHELF_EVENT={0}".format(data["event"]),
"SHELF_URI={0}".format(data["uri"]),
"SHELF_META_URI={0}".format(data["meta_uri"])
]
return ", ".join(l)
def test_success(self):
data = self.create_data("./tests/bin/hook-test", Event.ARTIFACT_UPLOADED)
result = action.execute_command(**data)
self.assertTrue(result)
expected_result = {
"stdout": self.create_stdout(data),
"stderr": "STDERR",
"exit_code": 0
}
self.logger.debug.assert_called_with("Command Result: {0}".format(json.dumps(expected_result, indent=4)))
def test_failure(self):
data = self.create_data("./tests/bin/hook-test", "fail")
result = action.execute_command(**data)
self.assertFalse(result)
expected_result = {
"stdout": self.create_stdout(data),
"stderr": "STDERR",
"exit_code": 1
}
self.logger.debug.assert_called_with("Command Result: {0}".format(json.dumps(expected_result, indent=4)))
|
flexible
|
{
"blob_id": "c312bf096c7f4aaf9269a8885ff254fd4852cfe0",
"index": 9996,
"step-1": "<mask token>\n\n\nclass ExecuteCommandTest(TestBase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ExecuteCommandTest(TestBase):\n\n def setUp(self):\n super(ExecuteCommandTest, self).setUp()\n self.cwd = os.path.join(os.path.dirname(__file__), '../../..')\n self.logger = Mock()\n MonkeyPatcher.patch(action, 'create_background_logger', Mock(\n return_value=self.logger))\n <mask token>\n\n def create_stdout(self, data):\n l = ['SHELF_EVENT={0}'.format(data['event']), 'SHELF_URI={0}'.\n format(data['uri']), 'SHELF_META_URI={0}'.format(data['meta_uri'])]\n return ', '.join(l)\n\n def test_success(self):\n data = self.create_data('./tests/bin/hook-test', Event.\n ARTIFACT_UPLOADED)\n result = action.execute_command(**data)\n self.assertTrue(result)\n expected_result = {'stdout': self.create_stdout(data), 'stderr':\n 'STDERR', 'exit_code': 0}\n self.logger.debug.assert_called_with('Command Result: {0}'.format(\n json.dumps(expected_result, indent=4)))\n\n def test_failure(self):\n data = self.create_data('./tests/bin/hook-test', 'fail')\n result = action.execute_command(**data)\n self.assertFalse(result)\n expected_result = {'stdout': self.create_stdout(data), 'stderr':\n 'STDERR', 'exit_code': 1}\n self.logger.debug.assert_called_with('Command Result: {0}'.format(\n json.dumps(expected_result, indent=4)))\n",
"step-3": "<mask token>\n\n\nclass ExecuteCommandTest(TestBase):\n\n def setUp(self):\n super(ExecuteCommandTest, self).setUp()\n self.cwd = os.path.join(os.path.dirname(__file__), '../../..')\n self.logger = Mock()\n MonkeyPatcher.patch(action, 'create_background_logger', Mock(\n return_value=self.logger))\n\n def create_data(self, command, event):\n data = {'command': command, 'log_level': logging.DEBUG, 'event':\n event, 'uri': 'https://api.shelf.com/fake/artifact/1',\n 'meta_uri': 'https://api.shelf.com/fake/artifact/1/_meta',\n 'cwd': self.cwd}\n return data\n\n def create_stdout(self, data):\n l = ['SHELF_EVENT={0}'.format(data['event']), 'SHELF_URI={0}'.\n format(data['uri']), 'SHELF_META_URI={0}'.format(data['meta_uri'])]\n return ', '.join(l)\n\n def test_success(self):\n data = self.create_data('./tests/bin/hook-test', Event.\n ARTIFACT_UPLOADED)\n result = action.execute_command(**data)\n self.assertTrue(result)\n expected_result = {'stdout': self.create_stdout(data), 'stderr':\n 'STDERR', 'exit_code': 0}\n self.logger.debug.assert_called_with('Command Result: {0}'.format(\n json.dumps(expected_result, indent=4)))\n\n def test_failure(self):\n data = self.create_data('./tests/bin/hook-test', 'fail')\n result = action.execute_command(**data)\n self.assertFalse(result)\n expected_result = {'stdout': self.create_stdout(data), 'stderr':\n 'STDERR', 'exit_code': 1}\n self.logger.debug.assert_called_with('Command Result: {0}'.format(\n json.dumps(expected_result, indent=4)))\n",
"step-4": "from mock import Mock\nfrom shelf.hook.background import action\nfrom shelf.hook.event import Event\nfrom tests.test_base import TestBase\nimport json\nimport os\nimport logging\nfrom pyproctor import MonkeyPatcher\n\n\nclass ExecuteCommandTest(TestBase):\n\n def setUp(self):\n super(ExecuteCommandTest, self).setUp()\n self.cwd = os.path.join(os.path.dirname(__file__), '../../..')\n self.logger = Mock()\n MonkeyPatcher.patch(action, 'create_background_logger', Mock(\n return_value=self.logger))\n\n def create_data(self, command, event):\n data = {'command': command, 'log_level': logging.DEBUG, 'event':\n event, 'uri': 'https://api.shelf.com/fake/artifact/1',\n 'meta_uri': 'https://api.shelf.com/fake/artifact/1/_meta',\n 'cwd': self.cwd}\n return data\n\n def create_stdout(self, data):\n l = ['SHELF_EVENT={0}'.format(data['event']), 'SHELF_URI={0}'.\n format(data['uri']), 'SHELF_META_URI={0}'.format(data['meta_uri'])]\n return ', '.join(l)\n\n def test_success(self):\n data = self.create_data('./tests/bin/hook-test', Event.\n ARTIFACT_UPLOADED)\n result = action.execute_command(**data)\n self.assertTrue(result)\n expected_result = {'stdout': self.create_stdout(data), 'stderr':\n 'STDERR', 'exit_code': 0}\n self.logger.debug.assert_called_with('Command Result: {0}'.format(\n json.dumps(expected_result, indent=4)))\n\n def test_failure(self):\n data = self.create_data('./tests/bin/hook-test', 'fail')\n result = action.execute_command(**data)\n self.assertFalse(result)\n expected_result = {'stdout': self.create_stdout(data), 'stderr':\n 'STDERR', 'exit_code': 1}\n self.logger.debug.assert_called_with('Command Result: {0}'.format(\n json.dumps(expected_result, indent=4)))\n",
"step-5": "from mock import Mock\nfrom shelf.hook.background import action\nfrom shelf.hook.event import Event\nfrom tests.test_base import TestBase\nimport json\nimport os\nimport logging\nfrom pyproctor import MonkeyPatcher\n\n\nclass ExecuteCommandTest(TestBase):\n\n def setUp(self):\n super(ExecuteCommandTest, self).setUp()\n self.cwd = os.path.join(os.path.dirname(__file__), \"../../..\")\n self.logger = Mock()\n MonkeyPatcher.patch(action, \"create_background_logger\", Mock(return_value=self.logger))\n\n def create_data(self, command, event):\n data = {\n \"command\": command,\n \"log_level\": logging.DEBUG,\n \"event\": event,\n \"uri\": \"https://api.shelf.com/fake/artifact/1\",\n \"meta_uri\": \"https://api.shelf.com/fake/artifact/1/_meta\",\n \"cwd\": self.cwd\n }\n\n return data\n\n def create_stdout(self, data):\n l = [\n \"SHELF_EVENT={0}\".format(data[\"event\"]),\n \"SHELF_URI={0}\".format(data[\"uri\"]),\n \"SHELF_META_URI={0}\".format(data[\"meta_uri\"])\n ]\n\n return \", \".join(l)\n\n def test_success(self):\n data = self.create_data(\"./tests/bin/hook-test\", Event.ARTIFACT_UPLOADED)\n result = action.execute_command(**data)\n self.assertTrue(result)\n\n expected_result = {\n \"stdout\": self.create_stdout(data),\n \"stderr\": \"STDERR\",\n \"exit_code\": 0\n }\n\n self.logger.debug.assert_called_with(\"Command Result: {0}\".format(json.dumps(expected_result, indent=4)))\n\n def test_failure(self):\n data = self.create_data(\"./tests/bin/hook-test\", \"fail\")\n result = action.execute_command(**data)\n self.assertFalse(result)\n\n expected_result = {\n \"stdout\": self.create_stdout(data),\n \"stderr\": \"STDERR\",\n \"exit_code\": 1\n }\n\n self.logger.debug.assert_called_with(\"Command Result: {0}\".format(json.dumps(expected_result, indent=4)))\n",
"step-ids": [
1,
5,
6,
7,
8
]
}
|
[
1,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def SO3_to_R3(x_skew):
x = np.zeros((3, 1))
x[0, 0] = -1 * x_skew[1, 2]
x[1, 0] = x_skew[0, 2]
x[2, 0] = -1 * x_skew[0, 1]
return x
<|reserved_special_token_1|>
import numpy as np
def SO3_to_R3(x_skew):
x = np.zeros((3, 1))
x[0, 0] = -1 * x_skew[1, 2]
x[1, 0] = x_skew[0, 2]
x[2, 0] = -1 * x_skew[0, 1]
return x
|
flexible
|
{
"blob_id": "97bff6eb0cd16c915180cb634e6bf30e17adfdef",
"index": 2080,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef SO3_to_R3(x_skew):\n x = np.zeros((3, 1))\n x[0, 0] = -1 * x_skew[1, 2]\n x[1, 0] = x_skew[0, 2]\n x[2, 0] = -1 * x_skew[0, 1]\n return x\n",
"step-3": "import numpy as np\n\n\ndef SO3_to_R3(x_skew):\n x = np.zeros((3, 1))\n x[0, 0] = -1 * x_skew[1, 2]\n x[1, 0] = x_skew[0, 2]\n x[2, 0] = -1 * x_skew[0, 1]\n return x\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
stevila = [5, 2, 8, 3]
#Izpis vseh števil
print(stevila)
#Izpis števila na mestu 1
print(stevila[1])
|
normal
|
{
"blob_id": "6e845f2543b548fb936cc3719eb150e530281945",
"index": 9505,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(stevila)\nprint(stevila[1])\n",
"step-3": "stevila = [5, 2, 8, 3]\nprint(stevila)\nprint(stevila[1])\n",
"step-4": "stevila = [5, 2, 8, 3]\n\n#Izpis vseh števil\nprint(stevila)\n\n#Izpis števila na mestu 1\nprint(stevila[1])",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def fetch(entries):
input_list = []
for entry in entries:
field = entry[0]
text = entry[1].get()
input_list.append(text)
telnetConnection(input_list[0], input_list[1], input_list[2],
input_list[3], input_list[4])
def makeform(root, fields):
entries = []
for field in fields:
row = tk.Frame(root)
lab = tk.Label(row, width=15, text=field, anchor='w')
ent = tk.Entry(row)
row.pack(side=tk.TOP, fill=tk.X, padx=5, pady=5)
lab.pack(side=tk.LEFT)
ent.pack(side=tk.RIGHT, expand=tk.YES, fill=tk.X)
entries.append((field, ent))
return entries
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def fetch(entries):
input_list = []
for entry in entries:
field = entry[0]
text = entry[1].get()
input_list.append(text)
telnetConnection(input_list[0], input_list[1], input_list[2],
input_list[3], input_list[4])
def makeform(root, fields):
entries = []
for field in fields:
row = tk.Frame(root)
lab = tk.Label(row, width=15, text=field, anchor='w')
ent = tk.Entry(row)
row.pack(side=tk.TOP, fill=tk.X, padx=5, pady=5)
lab.pack(side=tk.LEFT)
ent.pack(side=tk.RIGHT, expand=tk.YES, fill=tk.X)
entries.append((field, ent))
return entries
if __name__ == '__main__':
root = tk.Tk()
ents = makeform(root, fields)
root.bind('<Return>', lambda event, e=ents: fetch(e))
btnSend = tk.Button(root, text='Send', command=lambda e=ents: fetch(e))
btnSend.pack(side=tk.LEFT, padx=5, pady=5)
btnQuit = tk.Button(root, text='Quit', command=root.quit)
btnQuit.pack(side=tk.LEFT, padx=5, pady=5)
root.mainloop()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
fields = 'Host Address', 'UserName', 'Password', 'Message To', 'Text'
def fetch(entries):
input_list = []
for entry in entries:
field = entry[0]
text = entry[1].get()
input_list.append(text)
telnetConnection(input_list[0], input_list[1], input_list[2],
input_list[3], input_list[4])
def makeform(root, fields):
entries = []
for field in fields:
row = tk.Frame(root)
lab = tk.Label(row, width=15, text=field, anchor='w')
ent = tk.Entry(row)
row.pack(side=tk.TOP, fill=tk.X, padx=5, pady=5)
lab.pack(side=tk.LEFT)
ent.pack(side=tk.RIGHT, expand=tk.YES, fill=tk.X)
entries.append((field, ent))
return entries
if __name__ == '__main__':
root = tk.Tk()
ents = makeform(root, fields)
root.bind('<Return>', lambda event, e=ents: fetch(e))
btnSend = tk.Button(root, text='Send', command=lambda e=ents: fetch(e))
btnSend.pack(side=tk.LEFT, padx=5, pady=5)
btnQuit = tk.Button(root, text='Quit', command=root.quit)
btnQuit.pack(side=tk.LEFT, padx=5, pady=5)
root.mainloop()
<|reserved_special_token_1|>
import tkinter as tk
from telnetConn import telnetConnection
fields = 'Host Address', 'UserName', 'Password', 'Message To', 'Text'
def fetch(entries):
input_list = []
for entry in entries:
field = entry[0]
text = entry[1].get()
input_list.append(text)
telnetConnection(input_list[0], input_list[1], input_list[2],
input_list[3], input_list[4])
def makeform(root, fields):
entries = []
for field in fields:
row = tk.Frame(root)
lab = tk.Label(row, width=15, text=field, anchor='w')
ent = tk.Entry(row)
row.pack(side=tk.TOP, fill=tk.X, padx=5, pady=5)
lab.pack(side=tk.LEFT)
ent.pack(side=tk.RIGHT, expand=tk.YES, fill=tk.X)
entries.append((field, ent))
return entries
if __name__ == '__main__':
root = tk.Tk()
ents = makeform(root, fields)
root.bind('<Return>', lambda event, e=ents: fetch(e))
btnSend = tk.Button(root, text='Send', command=lambda e=ents: fetch(e))
btnSend.pack(side=tk.LEFT, padx=5, pady=5)
btnQuit = tk.Button(root, text='Quit', command=root.quit)
btnQuit.pack(side=tk.LEFT, padx=5, pady=5)
root.mainloop()
<|reserved_special_token_1|>
import tkinter as tk
from telnetConn import telnetConnection
fields = 'Host Address', 'UserName', 'Password', 'Message To', 'Text'
def fetch(entries):
input_list = []
for entry in entries:
field = entry[0]
text = entry[1].get()
input_list.append(text)
# print('%s: "%s"' % (field, text))
telnetConnection(input_list[0],input_list[1],input_list[2],input_list[3],input_list[4])
def makeform(root, fields):
entries = []
for field in fields:
row = tk.Frame(root)
lab = tk.Label(row, width=15, text=field, anchor='w')
ent = tk.Entry(row)
row.pack(side=tk.TOP, fill=tk.X, padx=5, pady=5)
lab.pack(side=tk.LEFT)
ent.pack(side=tk.RIGHT, expand=tk.YES, fill=tk.X)
entries.append((field, ent))
return entries
if __name__ == '__main__':
root = tk.Tk()
ents = makeform(root, fields)
root.bind('<Return>', (lambda event, e=ents: fetch(e)))
btnSend = tk.Button(root, text='Send',
command=(lambda e=ents: fetch(e)))
btnSend.pack(side=tk.LEFT, padx=5, pady=5)
btnQuit = tk.Button(root, text='Quit', command=root.quit)
btnQuit.pack(side=tk.LEFT, padx=5, pady=5)
root.mainloop()
|
flexible
|
{
"blob_id": "3328c2ae0816c146398ecde92a056d1e77683696",
"index": 7357,
"step-1": "<mask token>\n\n\ndef fetch(entries):\n input_list = []\n for entry in entries:\n field = entry[0]\n text = entry[1].get()\n input_list.append(text)\n telnetConnection(input_list[0], input_list[1], input_list[2],\n input_list[3], input_list[4])\n\n\ndef makeform(root, fields):\n entries = []\n for field in fields:\n row = tk.Frame(root)\n lab = tk.Label(row, width=15, text=field, anchor='w')\n ent = tk.Entry(row)\n row.pack(side=tk.TOP, fill=tk.X, padx=5, pady=5)\n lab.pack(side=tk.LEFT)\n ent.pack(side=tk.RIGHT, expand=tk.YES, fill=tk.X)\n entries.append((field, ent))\n return entries\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef fetch(entries):\n input_list = []\n for entry in entries:\n field = entry[0]\n text = entry[1].get()\n input_list.append(text)\n telnetConnection(input_list[0], input_list[1], input_list[2],\n input_list[3], input_list[4])\n\n\ndef makeform(root, fields):\n entries = []\n for field in fields:\n row = tk.Frame(root)\n lab = tk.Label(row, width=15, text=field, anchor='w')\n ent = tk.Entry(row)\n row.pack(side=tk.TOP, fill=tk.X, padx=5, pady=5)\n lab.pack(side=tk.LEFT)\n ent.pack(side=tk.RIGHT, expand=tk.YES, fill=tk.X)\n entries.append((field, ent))\n return entries\n\n\nif __name__ == '__main__':\n root = tk.Tk()\n ents = makeform(root, fields)\n root.bind('<Return>', lambda event, e=ents: fetch(e))\n btnSend = tk.Button(root, text='Send', command=lambda e=ents: fetch(e))\n btnSend.pack(side=tk.LEFT, padx=5, pady=5)\n btnQuit = tk.Button(root, text='Quit', command=root.quit)\n btnQuit.pack(side=tk.LEFT, padx=5, pady=5)\n root.mainloop()\n",
"step-3": "<mask token>\nfields = 'Host Address', 'UserName', 'Password', 'Message To', 'Text'\n\n\ndef fetch(entries):\n input_list = []\n for entry in entries:\n field = entry[0]\n text = entry[1].get()\n input_list.append(text)\n telnetConnection(input_list[0], input_list[1], input_list[2],\n input_list[3], input_list[4])\n\n\ndef makeform(root, fields):\n entries = []\n for field in fields:\n row = tk.Frame(root)\n lab = tk.Label(row, width=15, text=field, anchor='w')\n ent = tk.Entry(row)\n row.pack(side=tk.TOP, fill=tk.X, padx=5, pady=5)\n lab.pack(side=tk.LEFT)\n ent.pack(side=tk.RIGHT, expand=tk.YES, fill=tk.X)\n entries.append((field, ent))\n return entries\n\n\nif __name__ == '__main__':\n root = tk.Tk()\n ents = makeform(root, fields)\n root.bind('<Return>', lambda event, e=ents: fetch(e))\n btnSend = tk.Button(root, text='Send', command=lambda e=ents: fetch(e))\n btnSend.pack(side=tk.LEFT, padx=5, pady=5)\n btnQuit = tk.Button(root, text='Quit', command=root.quit)\n btnQuit.pack(side=tk.LEFT, padx=5, pady=5)\n root.mainloop()\n",
"step-4": "import tkinter as tk\nfrom telnetConn import telnetConnection\nfields = 'Host Address', 'UserName', 'Password', 'Message To', 'Text'\n\n\ndef fetch(entries):\n input_list = []\n for entry in entries:\n field = entry[0]\n text = entry[1].get()\n input_list.append(text)\n telnetConnection(input_list[0], input_list[1], input_list[2],\n input_list[3], input_list[4])\n\n\ndef makeform(root, fields):\n entries = []\n for field in fields:\n row = tk.Frame(root)\n lab = tk.Label(row, width=15, text=field, anchor='w')\n ent = tk.Entry(row)\n row.pack(side=tk.TOP, fill=tk.X, padx=5, pady=5)\n lab.pack(side=tk.LEFT)\n ent.pack(side=tk.RIGHT, expand=tk.YES, fill=tk.X)\n entries.append((field, ent))\n return entries\n\n\nif __name__ == '__main__':\n root = tk.Tk()\n ents = makeform(root, fields)\n root.bind('<Return>', lambda event, e=ents: fetch(e))\n btnSend = tk.Button(root, text='Send', command=lambda e=ents: fetch(e))\n btnSend.pack(side=tk.LEFT, padx=5, pady=5)\n btnQuit = tk.Button(root, text='Quit', command=root.quit)\n btnQuit.pack(side=tk.LEFT, padx=5, pady=5)\n root.mainloop()\n",
"step-5": "import tkinter as tk\r\nfrom telnetConn import telnetConnection\r\n\r\n\r\nfields = 'Host Address', 'UserName', 'Password', 'Message To', 'Text'\r\n\r\ndef fetch(entries):\r\n input_list = []\r\n for entry in entries:\r\n field = entry[0]\r\n text = entry[1].get()\r\n input_list.append(text)\r\n # print('%s: \"%s\"' % (field, text)) \r\n telnetConnection(input_list[0],input_list[1],input_list[2],input_list[3],input_list[4])\r\n \r\n\r\n\r\ndef makeform(root, fields):\r\n entries = []\r\n for field in fields:\r\n row = tk.Frame(root)\r\n lab = tk.Label(row, width=15, text=field, anchor='w')\r\n ent = tk.Entry(row)\r\n row.pack(side=tk.TOP, fill=tk.X, padx=5, pady=5)\r\n lab.pack(side=tk.LEFT)\r\n ent.pack(side=tk.RIGHT, expand=tk.YES, fill=tk.X)\r\n entries.append((field, ent))\r\n return entries\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n root = tk.Tk()\r\n ents = makeform(root, fields)\r\n root.bind('<Return>', (lambda event, e=ents: fetch(e))) \r\n\r\n btnSend = tk.Button(root, text='Send',\r\n command=(lambda e=ents: fetch(e))) \r\n btnSend.pack(side=tk.LEFT, padx=5, pady=5)\r\n\r\n btnQuit = tk.Button(root, text='Quit', command=root.quit)\r\n btnQuit.pack(side=tk.LEFT, padx=5, pady=5)\r\n\r\n root.mainloop()",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def updateMatrix(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[List[int]]
"""
cur = 0
col = len(matrix[0])
row = len(matrix)
while True:
cur += 1
flag = False
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if matrix[i][j] == cur:
if i - 1 < 0 or matrix[i - 1][j] >= cur:
pass
else:
continue
if j - 1 < 0 or matrix[i][j - 1] >= cur:
pass
else:
continue
if i + 1 >= row or matrix[i + 1][j] >= cur:
pass
else:
continue
if j + 1 >= col or matrix[i][j + 1] >= cur:
pass
else:
continue
flag = True
matrix[i][j] += 1
if not flag:
break
return matrix
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def updateMatrix(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[List[int]]
"""
cur = 0
col = len(matrix[0])
row = len(matrix)
while True:
cur += 1
flag = False
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if matrix[i][j] == cur:
if i - 1 < 0 or matrix[i - 1][j] >= cur:
pass
else:
continue
if j - 1 < 0 or matrix[i][j - 1] >= cur:
pass
else:
continue
if i + 1 >= row or matrix[i + 1][j] >= cur:
pass
else:
continue
if j + 1 >= col or matrix[i][j + 1] >= cur:
pass
else:
continue
flag = True
matrix[i][j] += 1
if not flag:
break
return matrix
if __name__ == '__main__':
solution = Solution()
data = [[0, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]
print(solution.updateMatrix(data))
data = [[1, 0, 1, 1, 0, 0, 1, 0, 0, 1], [0, 1, 1, 0, 1, 0, 1, 0, 1, 1],
[0, 0, 1, 0, 1, 0, 0, 1, 0, 0], [1, 0, 1, 0, 1, 1, 1, 1, 1, 1], [0,
1, 0, 1, 1, 0, 0, 0, 0, 1], [0, 0, 1, 0, 1, 1, 1, 0, 1, 0], [0, 1,
0, 1, 0, 1, 0, 0, 1, 1], [1, 0, 0, 0, 1, 1, 1, 1, 0, 1], [1, 1, 1,
1, 1, 1, 1, 0, 1, 0], [1, 1, 1, 1, 0, 1, 0, 0, 1, 1]]
result = [[1, 0, 1, 1, 0, 0, 1, 0, 0, 1], [0, 1, 1, 0, 1, 0, 1, 0, 1, 1
], [0, 0, 1, 0, 1, 0, 0, 1, 0, 0], [1, 0, 1, 0, 1, 1, 1, 1, 1, 1],
[0, 1, 0, 1, 1, 0, 0, 0, 0, 1], [0, 0, 1, 0, 1, 1, 1, 0, 1, 0], [0,
1, 0, 1, 0, 1, 0, 0, 1, 1], [1, 0, 0, 0, 1, 2, 1, 1, 0, 1], [2, 1,
1, 1, 1, 1, 1, 0, 1, 0], [1, 2, 1, 1, 0, 1, 0, 0, 1, 1]]
true_result = [[1, 0, 1, 1, 0, 0, 1, 0, 0, 1], [0, 1, 1, 0, 1, 0, 1, 0,
1, 1], [0, 0, 1, 0, 1, 0, 0, 1, 0, 0], [1, 0, 1, 0, 1, 1, 1, 1, 1,
1], [0, 1, 0, 1, 1, 0, 0, 0, 0, 1], [0, 0, 1, 0, 1, 1, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 1, 1], [1, 0, 0, 0, 1, 2, 1, 1, 0, 1], [2,
1, 1, 1, 1, 2, 1, 0, 1, 0], [3, 2, 2, 1, 0, 1, 0, 0, 1, 1]]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
# @Time : 2018/6/11 下午6:45
# @Author : zhanzecheng
# @File : 542.01矩阵1.py
# @Software: PyCharm
"""
# 一个简单的循环方式来解决这个问题
# 这一题的思路不错,用多次循环来计数
# TODO: check 1
class Solution:
def updateMatrix(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[List[int]]
"""
cur = 0
col = len(matrix[0])
row = len(matrix)
while True:
cur += 1
flag = False
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if matrix[i][j] == cur:
if i - 1 < 0 or matrix[i - 1][j] >= cur:
pass
else:
continue
if j - 1 < 0 or matrix[i][j - 1] >= cur:
pass
else:
continue
if i + 1 >= row or matrix[i + 1][j] >= cur:
pass
else:
continue
if j + 1 >= col or matrix[i][j + 1] >= cur:
pass
else:
continue
flag = True
matrix[i][j] += 1
if not flag:
break
return matrix
if __name__ == '__main__':
solution = Solution()
data = [
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]
]
print(solution.updateMatrix(data))
data =[
[1, 0, 1, 1, 0, 0, 1, 0, 0, 1],
[0, 1, 1, 0, 1, 0, 1, 0, 1, 1],
[0, 0, 1, 0, 1, 0, 0, 1, 0, 0],
[1, 0, 1, 0, 1, 1, 1, 1, 1, 1],
[0, 1, 0, 1, 1, 0, 0, 0, 0, 1],
[0, 0, 1, 0, 1, 1, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 1, 1],
[1, 0, 0, 0, 1, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 0, 1, 0],
[1, 1, 1, 1, 0, 1, 0, 0, 1, 1]
]
result = [
[1,0,1,1,0,0,1,0,0,1],
[0,1,1,0,1,0,1,0,1,1],
[0,0,1,0,1,0,0,1,0,0],
[1,0,1,0,1,1,1,1,1,1],
[0,1,0,1,1,0,0,0,0,1],
[0,0,1,0,1,1,1,0,1,0],
[0,1,0,1,0,1,0,0,1,1],
[1,0,0,0,1,2,1,1,0,1],
[2,1,1,1,1,1,1,0,1,0],
[1,2,1,1,0,1,0,0,1,1]
]
true_result = [
[1,0,1,1,0,0,1,0,0,1],
[0,1,1,0,1,0,1,0,1,1],
[0,0,1,0,1,0,0,1,0,0],
[1,0,1,0,1,1,1,1,1,1],
[0,1,0,1,1,0,0,0,0,1],
[0,0,1,0,1,1,1,0,1,0],
[0,1,0,1,0,1,0,0,1,1],
[1,0,0,0,1,2,1,1,0,1],
[2,1,1,1,1,2,1,0,1,0],
[3,2,2,1,0,1,0,0,1,1]
]
|
flexible
|
{
"blob_id": "1145050d82e614d5c248fc7e6a71720e6ff72414",
"index": 6055,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def updateMatrix(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n cur = 0\n col = len(matrix[0])\n row = len(matrix)\n while True:\n cur += 1\n flag = False\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n if matrix[i][j] == cur:\n if i - 1 < 0 or matrix[i - 1][j] >= cur:\n pass\n else:\n continue\n if j - 1 < 0 or matrix[i][j - 1] >= cur:\n pass\n else:\n continue\n if i + 1 >= row or matrix[i + 1][j] >= cur:\n pass\n else:\n continue\n if j + 1 >= col or matrix[i][j + 1] >= cur:\n pass\n else:\n continue\n flag = True\n matrix[i][j] += 1\n if not flag:\n break\n return matrix\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Solution:\n\n def updateMatrix(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n cur = 0\n col = len(matrix[0])\n row = len(matrix)\n while True:\n cur += 1\n flag = False\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n if matrix[i][j] == cur:\n if i - 1 < 0 or matrix[i - 1][j] >= cur:\n pass\n else:\n continue\n if j - 1 < 0 or matrix[i][j - 1] >= cur:\n pass\n else:\n continue\n if i + 1 >= row or matrix[i + 1][j] >= cur:\n pass\n else:\n continue\n if j + 1 >= col or matrix[i][j + 1] >= cur:\n pass\n else:\n continue\n flag = True\n matrix[i][j] += 1\n if not flag:\n break\n return matrix\n\n\nif __name__ == '__main__':\n solution = Solution()\n data = [[0, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]\n print(solution.updateMatrix(data))\n data = [[1, 0, 1, 1, 0, 0, 1, 0, 0, 1], [0, 1, 1, 0, 1, 0, 1, 0, 1, 1],\n [0, 0, 1, 0, 1, 0, 0, 1, 0, 0], [1, 0, 1, 0, 1, 1, 1, 1, 1, 1], [0,\n 1, 0, 1, 1, 0, 0, 0, 0, 1], [0, 0, 1, 0, 1, 1, 1, 0, 1, 0], [0, 1, \n 0, 1, 0, 1, 0, 0, 1, 1], [1, 0, 0, 0, 1, 1, 1, 1, 0, 1], [1, 1, 1, \n 1, 1, 1, 1, 0, 1, 0], [1, 1, 1, 1, 0, 1, 0, 0, 1, 1]]\n result = [[1, 0, 1, 1, 0, 0, 1, 0, 0, 1], [0, 1, 1, 0, 1, 0, 1, 0, 1, 1\n ], [0, 0, 1, 0, 1, 0, 0, 1, 0, 0], [1, 0, 1, 0, 1, 1, 1, 1, 1, 1],\n [0, 1, 0, 1, 1, 0, 0, 0, 0, 1], [0, 0, 1, 0, 1, 1, 1, 0, 1, 0], [0,\n 1, 0, 1, 0, 1, 0, 0, 1, 1], [1, 0, 0, 0, 1, 2, 1, 1, 0, 1], [2, 1, \n 1, 1, 1, 1, 1, 0, 1, 0], [1, 2, 1, 1, 0, 1, 0, 0, 1, 1]]\n true_result = [[1, 0, 1, 1, 0, 0, 1, 0, 0, 1], [0, 1, 1, 0, 1, 0, 1, 0,\n 1, 1], [0, 0, 1, 0, 1, 0, 0, 1, 0, 0], [1, 0, 1, 0, 1, 1, 1, 1, 1, \n 1], [0, 1, 0, 1, 1, 0, 0, 0, 0, 1], [0, 0, 1, 0, 1, 1, 1, 0, 1, 0],\n [0, 1, 0, 1, 0, 1, 0, 0, 1, 1], [1, 0, 0, 0, 1, 2, 1, 1, 0, 1], [2,\n 1, 1, 1, 1, 2, 1, 0, 1, 0], [3, 2, 2, 1, 0, 1, 0, 0, 1, 1]]\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\n# @Time : 2018/6/11 下午6:45\n# @Author : zhanzecheng\n# @File : 542.01矩阵1.py\n# @Software: PyCharm\n\"\"\"\n\n# 一个简单的循环方式来解决这个问题\n# 这一题的思路不错,用多次循环来计数\n# TODO: check 1\nclass Solution:\n def updateMatrix(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n cur = 0\n col = len(matrix[0])\n row = len(matrix)\n while True:\n cur += 1\n flag = False\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n if matrix[i][j] == cur:\n if i - 1 < 0 or matrix[i - 1][j] >= cur:\n pass\n else:\n continue\n\n if j - 1 < 0 or matrix[i][j - 1] >= cur:\n pass\n else:\n continue\n\n if i + 1 >= row or matrix[i + 1][j] >= cur:\n pass\n else:\n continue\n\n if j + 1 >= col or matrix[i][j + 1] >= cur:\n pass\n else:\n continue\n flag = True\n matrix[i][j] += 1\n if not flag:\n break\n return matrix\n\nif __name__ == '__main__':\n solution = Solution()\n data = [\n [0, 0, 0, 0],\n [1, 1, 1, 1],\n [1, 1, 1, 1],\n [1, 1, 1, 1]\n ]\n print(solution.updateMatrix(data))\n data =[\n [1, 0, 1, 1, 0, 0, 1, 0, 0, 1],\n [0, 1, 1, 0, 1, 0, 1, 0, 1, 1],\n [0, 0, 1, 0, 1, 0, 0, 1, 0, 0],\n [1, 0, 1, 0, 1, 1, 1, 1, 1, 1],\n [0, 1, 0, 1, 1, 0, 0, 0, 0, 1],\n [0, 0, 1, 0, 1, 1, 1, 0, 1, 0],\n [0, 1, 0, 1, 0, 1, 0, 0, 1, 1],\n [1, 0, 0, 0, 1, 1, 1, 1, 0, 1],\n [1, 1, 1, 1, 1, 1, 1, 0, 1, 0],\n [1, 1, 1, 1, 0, 1, 0, 0, 1, 1]\n ]\n\n result = [\n [1,0,1,1,0,0,1,0,0,1],\n [0,1,1,0,1,0,1,0,1,1],\n [0,0,1,0,1,0,0,1,0,0],\n [1,0,1,0,1,1,1,1,1,1],\n [0,1,0,1,1,0,0,0,0,1],\n [0,0,1,0,1,1,1,0,1,0],\n [0,1,0,1,0,1,0,0,1,1],\n [1,0,0,0,1,2,1,1,0,1],\n [2,1,1,1,1,1,1,0,1,0],\n [1,2,1,1,0,1,0,0,1,1]\n ]\n true_result = [\n [1,0,1,1,0,0,1,0,0,1],\n [0,1,1,0,1,0,1,0,1,1],\n [0,0,1,0,1,0,0,1,0,0],\n [1,0,1,0,1,1,1,1,1,1],\n [0,1,0,1,1,0,0,0,0,1],\n [0,0,1,0,1,1,1,0,1,0],\n [0,1,0,1,0,1,0,0,1,1],\n [1,0,0,0,1,2,1,1,0,1],\n [2,1,1,1,1,2,1,0,1,0],\n [3,2,2,1,0,1,0,0,1,1]\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding:utf-8 -*-
import time
from abc import ABCMeta, abstractmethod
from xlreportform.worksheet import WorkSheet
__author__ = "Andy Yang"
class Bases(metaclass=ABCMeta):
def __init__(self):
pass
@abstractmethod
def set_style(self):
"""set workshet's style, indent,border,font,and so on"""
@abstractmethod
def query(self):
"""query from mysql, sqlserver"""
@abstractmethod
def clean(self):
"""clean data"""
@abstractmethod
def export(self):
"""export data"""
class ReportForm(Bases, WorkSheet):
def __init__(self, visible=False, filename=None, sheetname=None):
WorkSheet.__init__(self, visible, filename, sheetname)
def __new__(cls, *args, **kwargs):
cls.query(cls)
cls.clean(cls)
cls.set_style(cls)
cls.export(cls)
return object.__new__(cls)
class DayRport(ReportForm):
def query(self):
print('query')
def set_style(self):
print('set_style')
def export(self):
print('export')
if __name__ == '__main__':
d = DayRport(visible=True, filename='okok.xlsx', sheetname='dageda')
time.sleep(5)
print(d)
|
normal
|
{
"blob_id": "092c6d637fe85136b4184d05f0ac7db17a8efb3b",
"index": 6087,
"step-1": "<mask token>\n\n\nclass Bases(metaclass=ABCMeta):\n\n def __init__(self):\n pass\n\n @abstractmethod\n def set_style(self):\n \"\"\"set workshet's style, indent,border,font,and so on\"\"\"\n\n @abstractmethod\n def query(self):\n \"\"\"query from mysql, sqlserver\"\"\"\n\n @abstractmethod\n def clean(self):\n \"\"\"clean data\"\"\"\n\n @abstractmethod\n def export(self):\n \"\"\"export data\"\"\"\n\n\nclass ReportForm(Bases, WorkSheet):\n\n def __init__(self, visible=False, filename=None, sheetname=None):\n WorkSheet.__init__(self, visible, filename, sheetname)\n\n def __new__(cls, *args, **kwargs):\n cls.query(cls)\n cls.clean(cls)\n cls.set_style(cls)\n cls.export(cls)\n return object.__new__(cls)\n\n\nclass DayRport(ReportForm):\n\n def query(self):\n print('query')\n\n def set_style(self):\n print('set_style')\n\n def export(self):\n print('export')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Bases(metaclass=ABCMeta):\n\n def __init__(self):\n pass\n\n @abstractmethod\n def set_style(self):\n \"\"\"set workshet's style, indent,border,font,and so on\"\"\"\n\n @abstractmethod\n def query(self):\n \"\"\"query from mysql, sqlserver\"\"\"\n\n @abstractmethod\n def clean(self):\n \"\"\"clean data\"\"\"\n\n @abstractmethod\n def export(self):\n \"\"\"export data\"\"\"\n\n\nclass ReportForm(Bases, WorkSheet):\n\n def __init__(self, visible=False, filename=None, sheetname=None):\n WorkSheet.__init__(self, visible, filename, sheetname)\n\n def __new__(cls, *args, **kwargs):\n cls.query(cls)\n cls.clean(cls)\n cls.set_style(cls)\n cls.export(cls)\n return object.__new__(cls)\n\n\nclass DayRport(ReportForm):\n\n def query(self):\n print('query')\n\n def set_style(self):\n print('set_style')\n\n def export(self):\n print('export')\n\n\nif __name__ == '__main__':\n d = DayRport(visible=True, filename='okok.xlsx', sheetname='dageda')\n time.sleep(5)\n print(d)\n",
"step-3": "<mask token>\n__author__ = 'Andy Yang'\n\n\nclass Bases(metaclass=ABCMeta):\n\n def __init__(self):\n pass\n\n @abstractmethod\n def set_style(self):\n \"\"\"set workshet's style, indent,border,font,and so on\"\"\"\n\n @abstractmethod\n def query(self):\n \"\"\"query from mysql, sqlserver\"\"\"\n\n @abstractmethod\n def clean(self):\n \"\"\"clean data\"\"\"\n\n @abstractmethod\n def export(self):\n \"\"\"export data\"\"\"\n\n\nclass ReportForm(Bases, WorkSheet):\n\n def __init__(self, visible=False, filename=None, sheetname=None):\n WorkSheet.__init__(self, visible, filename, sheetname)\n\n def __new__(cls, *args, **kwargs):\n cls.query(cls)\n cls.clean(cls)\n cls.set_style(cls)\n cls.export(cls)\n return object.__new__(cls)\n\n\nclass DayRport(ReportForm):\n\n def query(self):\n print('query')\n\n def set_style(self):\n print('set_style')\n\n def export(self):\n print('export')\n\n\nif __name__ == '__main__':\n d = DayRport(visible=True, filename='okok.xlsx', sheetname='dageda')\n time.sleep(5)\n print(d)\n",
"step-4": "import time\nfrom abc import ABCMeta, abstractmethod\nfrom xlreportform.worksheet import WorkSheet\n__author__ = 'Andy Yang'\n\n\nclass Bases(metaclass=ABCMeta):\n\n def __init__(self):\n pass\n\n @abstractmethod\n def set_style(self):\n \"\"\"set workshet's style, indent,border,font,and so on\"\"\"\n\n @abstractmethod\n def query(self):\n \"\"\"query from mysql, sqlserver\"\"\"\n\n @abstractmethod\n def clean(self):\n \"\"\"clean data\"\"\"\n\n @abstractmethod\n def export(self):\n \"\"\"export data\"\"\"\n\n\nclass ReportForm(Bases, WorkSheet):\n\n def __init__(self, visible=False, filename=None, sheetname=None):\n WorkSheet.__init__(self, visible, filename, sheetname)\n\n def __new__(cls, *args, **kwargs):\n cls.query(cls)\n cls.clean(cls)\n cls.set_style(cls)\n cls.export(cls)\n return object.__new__(cls)\n\n\nclass DayRport(ReportForm):\n\n def query(self):\n print('query')\n\n def set_style(self):\n print('set_style')\n\n def export(self):\n print('export')\n\n\nif __name__ == '__main__':\n d = DayRport(visible=True, filename='okok.xlsx', sheetname='dageda')\n time.sleep(5)\n print(d)\n",
"step-5": "# -*- coding:utf-8 -*-\r\nimport time\r\nfrom abc import ABCMeta, abstractmethod\r\nfrom xlreportform.worksheet import WorkSheet\r\n\r\n__author__ = \"Andy Yang\"\r\n\r\n\r\nclass Bases(metaclass=ABCMeta):\r\n def __init__(self):\r\n pass\r\n\r\n @abstractmethod\r\n def set_style(self):\r\n \"\"\"set workshet's style, indent,border,font,and so on\"\"\"\r\n\r\n @abstractmethod\r\n def query(self):\r\n \"\"\"query from mysql, sqlserver\"\"\"\r\n\r\n @abstractmethod\r\n def clean(self):\r\n \"\"\"clean data\"\"\"\r\n\r\n @abstractmethod\r\n def export(self):\r\n \"\"\"export data\"\"\"\r\n\r\n\r\nclass ReportForm(Bases, WorkSheet):\r\n def __init__(self, visible=False, filename=None, sheetname=None):\r\n WorkSheet.__init__(self, visible, filename, sheetname)\r\n\r\n def __new__(cls, *args, **kwargs):\r\n cls.query(cls)\r\n cls.clean(cls)\r\n cls.set_style(cls)\r\n cls.export(cls)\r\n return object.__new__(cls)\r\n\r\n\r\nclass DayRport(ReportForm):\r\n def query(self):\r\n print('query')\r\n def set_style(self):\r\n print('set_style')\r\n def export(self):\r\n print('export')\r\n\r\n\r\nif __name__ == '__main__':\r\n d = DayRport(visible=True, filename='okok.xlsx', sheetname='dageda')\r\n time.sleep(5)\r\n print(d)",
"step-ids": [
13,
14,
15,
16,
17
]
}
|
[
13,
14,
15,
16,
17
] |
# encoding: utf-8
# module Revit.GeometryConversion calls itself GeometryConversion
# from RevitNodes,Version=1.2.1.3083,Culture=neutral,PublicKeyToken=null
# by generator 1.145
# no doc
# no imports
# no functions
# classes
class CurveUtils(object):
# no doc
@staticmethod
def CurvesAreSimilar(a,b):
"""
CurvesAreSimilar(a: Curve,b: Curve) -> bool
This method uses basic checks to compare curves for similarity.
It
starts by comparing the curves' end points. Curves which have similar
end points but different directions will not be regarded as similar,
because directionality is important in Revit for other purposes.
Depending on the curve type,other comparisons are then performed.
a: The first curve.
b: The second curve.
Returns: Returns true if the curves are similar within Tolerance,and
false if they are not.
"""
pass
@staticmethod
def GetPlaneFromCurve(c,planarOnly):
""" GetPlaneFromCurve(c: Curve,planarOnly: bool) -> Plane """
pass
@staticmethod
def IsLineLike(crv):
""" IsLineLike(crv: Curve) -> bool """
pass
@staticmethod
def PointArraysAreSame(pnts1,pnts2):
""" PointArraysAreSame(pnts1: ReferencePointArray,pnts2: ReferencePointArray) -> bool """
pass
@staticmethod
def ReferencePointsAreSame(pnt1,pnt2):
""" ReferencePointsAreSame(pnt1: ReferencePoint,pnt2: ReferencePoint) -> bool """
pass
Tolerance=9.9999999999999995e-07
__all__=[
'CurvesAreSimilar',
'GetPlaneFromCurve',
'IsLineLike',
'PointArraysAreSame',
'ReferencePointsAreSame',
'Tolerance',
]
class DynamoToRevitBRep(object):
# no doc
@staticmethod
def ToRevitType(*__args):
"""
ToRevitType(surf: Surface,performHostUnitConversion: bool,materialId: ElementId) -> GeometryObject
this method attempts to construct a BRep from a surface.
ToRevitType(sol: Solid,performHostUnitConversion: bool,materialId: ElementId) -> GeometryObject
this method attempts to construct a BRep from a closed solid.
"""
pass
__all__=[
'ToRevitType',
]
class GeometryObjectConverter(object):
# no doc
@staticmethod
def Convert(geom,reference,transform):
"""
Convert(geom: GeometryObject,reference: Reference,transform: CoordinateSystem) -> object
Convert a GeometryObject to an applicable ProtoGeometry type.
Returns: A Geometry type. Null if there's no suitable conversion.
"""
pass
@staticmethod
def ConvertToMany(solid,reference,transform):
"""
ConvertToMany(solid: Solid,reference: Reference,transform: CoordinateSystem) -> IEnumerable[object]
Get the edges and faces from the solid and convert them
"""
pass
@staticmethod
def InternalConvert(geom):
""" InternalConvert(geom: PolyLine) -> PolyCurve """
pass
__all__=[
'Convert',
'ConvertToMany',
'InternalConvert',
]
class GeometryPrimitiveConverter(object):
# no doc
@staticmethod
def GetPerpendicular(*__args):
"""
GetPerpendicular(vector: Vector) -> Vector
GetPerpendicular(xyz: XYZ) -> XYZ
"""
pass
@staticmethod
def ToCoordinateSystem(t,convertUnits):
""" ToCoordinateSystem(t: Transform,convertUnits: bool) -> CoordinateSystem """
pass
@staticmethod
def ToDegrees(degrees):
""" ToDegrees(degrees: float) -> float """
pass
@staticmethod
def ToDoubleArray(list):
""" ToDoubleArray(list: Array[float]) -> DoubleArray """
pass
@staticmethod
def ToPlane(plane,convertUnits):
"""
ToPlane(plane: Plane,convertUnits: bool) -> Plane
ToPlane(plane: Plane,convertUnits: bool) -> Plane
"""
pass
@staticmethod
def ToPoint(xyz,convertUnits):
""" ToPoint(xyz: XYZ,convertUnits: bool) -> Point """
pass
@staticmethod
def ToPoints(list,convertUnits):
""" ToPoints(list: List[XYZ],convertUnits: bool) -> List[Point] """
pass
@staticmethod
def ToProtoType(*__args):
"""
ToProtoType(uv: UV) -> UV
ToProtoType(point: Point,convertUnits: bool) -> Point
ToProtoType(xyz: BoundingBoxXYZ,convertUnits: bool) -> BoundingBox
"""
pass
@staticmethod
def ToRadians(degrees):
""" ToRadians(degrees: float) -> float """
pass
@staticmethod
def ToRevitBoundingBox(cs,minPoint,maxPoint,convertUnits):
""" ToRevitBoundingBox(cs: CoordinateSystem,minPoint: Point,maxPoint: Point,convertUnits: bool) -> BoundingBoxXYZ """
pass
@staticmethod
def ToRevitType(*__args):
"""
ToRevitType(vec: Vector,convertUnits: bool) -> XYZ
ToRevitType(pt: Point,convertUnits: bool) -> XYZ
ToRevitType(bb: BoundingBox,convertUnits: bool) -> BoundingBoxXYZ
"""
pass
@staticmethod
def ToTransform(cs,convertUnits):
""" ToTransform(cs: CoordinateSystem,convertUnits: bool) -> Transform """
pass
@staticmethod
def ToVector(xyz,convertUnits):
""" ToVector(xyz: XYZ,convertUnits: bool) -> Vector """
pass
@staticmethod
def ToXyz(*__args):
"""
ToXyz(vec: Vector,convertUnits: bool) -> XYZ
ToXyz(pt: Point,convertUnits: bool) -> XYZ
"""
pass
@staticmethod
def ToXyzs(list,convertUnits):
"""
ToXyzs(list: Array[Vector],convertUnits: bool) -> Array[XYZ]
ToXyzs(list: Array[Point],convertUnits: bool) -> Array[XYZ]
ToXyzs(list: List[Point],convertUnits: bool) -> List[XYZ]
"""
pass
__all__=[
'GetPerpendicular',
'ToCoordinateSystem',
'ToDegrees',
'ToDoubleArray',
'ToPlane',
'ToPoint',
'ToPoints',
'ToProtoType',
'ToRadians',
'ToRevitBoundingBox',
'ToRevitType',
'ToTransform',
'ToVector',
'ToXyz',
'ToXyzs',
]
class NurbsUtils(object):
# no doc
@staticmethod
def ElevateBezierDegree(crv,finalDegree):
"""
ElevateBezierDegree(crv: NurbsCurve,finalDegree: int) -> NurbsCurve
Elevate the degree of a Bezier curve (represented in NURBS form) to a given
degree
without changing the shape
crv: The curve
finalDegree: The requested degree
"""
pass
__all__=[
'ElevateBezierDegree',
]
class PolygonContainment(object):
# no doc
@staticmethod
def AdjustDelta(delta,vertex,next_vertex,p):
""" AdjustDelta(delta: int,vertex: UV,next_vertex: UV,p: UV) -> int """
pass
@staticmethod
def GetQuadrant(vertex,p):
"""
GetQuadrant(vertex: UV,p: UV) -> int
Determine the quadrant of a polygon vertex
relative to the test
point.
"""
pass
@staticmethod
def GetXIntercept(p,q,y):
"""
GetXIntercept(p: UV,q: UV,y: float) -> float
Determine the X intercept of a polygon edge
with a horizontal
line at the Y value of the
test point.
"""
pass
@staticmethod
def PolygonContains(polygon,point):
""" PolygonContains(polygon: List[UV],point: UV) -> bool """
pass
__all__=[
'AdjustDelta',
'GetQuadrant',
'GetXIntercept',
'PolygonContains',
]
class ProtoToRevitCurve(object):
# no doc
@staticmethod
def ToRevitType(*__args):
"""
ToRevitType(pcrv: PolyCurve,performHostUnitConversion: bool) -> CurveLoop
ToRevitType(crv: Curve,performHostUnitConversion: bool) -> Curve
"""
pass
__all__=[
'ToRevitType',
]
class ProtoToRevitMesh(object):
# no doc
@staticmethod
def CreateBoundingBoxMeshForErrors(minPoint,maxPoint,performHostUnitConversion):
"""
CreateBoundingBoxMeshForErrors(minPoint: Point,maxPoint: Point,performHostUnitConversion: bool) -> IList[GeometryObject]
This is to create a bounding box mesh for geometries which have errors during
the tessellating process
"""
pass
@staticmethod
def ToRevitType(*__args):
"""
ToRevitType(mesh: Mesh,target: TessellatedShapeBuilderTarget,fallback: TessellatedShapeBuilderFallback,MaterialId: ElementId,performHostUnitConversion: bool) -> IList[GeometryObject]
ToRevitType(solid: Solid,target: TessellatedShapeBuilderTarget,fallback: TessellatedShapeBuilderFallback,MaterialId: ElementId,performHostUnitConversion: bool) -> IList[GeometryObject]
ToRevitType(srf: Surface,target: TessellatedShapeBuilderTarget,fallback: TessellatedShapeBuilderFallback,MaterialId: ElementId,performHostUnitConversion: bool) -> IList[GeometryObject]
"""
pass
__all__=[
'CreateBoundingBoxMeshForErrors',
'ToRevitType',
]
class RevitToProtoCurve(object):
# no doc
@staticmethod
def ToProtoType(*__args):
"""
ToProtoType(geom: PolyLine,performHostUnitConversion: bool) -> PolyCurve
ToProtoType(revitCurves: CurveArray,performHostUnitConversion: bool) -> PolyCurve
ToProtoType(revitCurve: Curve,performHostUnitConversion: bool,referenceOverride: Reference) -> Curve
"""
pass
__all__=[
'ToProtoType',
]
class RevitToProtoFace(object):
# no doc
@staticmethod
def ToProtoType(revitFace,performHostUnitConversion,referenceOverride):
""" ToProtoType(revitFace: Face,performHostUnitConversion: bool,referenceOverride: Reference) -> IEnumerable[Surface] """
pass
__all__=[
'ToProtoType',
]
class RevitToProtoMesh(object):
# no doc
@staticmethod
def ToProtoType(*__args):
"""
ToProtoType(meshArray: IEnumerable[Mesh],performHostUnitConversion: bool) -> Array[Mesh]
ToProtoType(mesh: Mesh,performHostUnitConversion: bool) -> Mesh
"""
pass
__all__=[
'ToProtoType',
]
class RevitToProtoSolid(object):
# no doc
@staticmethod
def ToProtoType(solid,performHostUnitConversion):
""" ToProtoType(solid: Solid,performHostUnitConversion: bool) -> Solid """
pass
__all__=[
'ToProtoType',
]
class SurfaceExtractor(object):
"""
This class is required to extract the underlying surface representation from a Revit Face.
All Face types are supported.
"""
@staticmethod
def ExtractSurface(face,edgeLoops):
"""
ExtractSurface(face: HermiteFace,edgeLoops: IEnumerable[PolyCurve]) -> Surface
ExtractSurface(face: RevolvedFace,edgeLoops: IEnumerable[PolyCurve]) -> Surface
ExtractSurface(face: RuledFace,edgeLoops: IEnumerable[PolyCurve]) -> Surface
ExtractSurface(face: PlanarFace,edgeLoops: IEnumerable[PolyCurve]) -> Surface
ExtractSurface(face: CylindricalFace,edgeLoops: IEnumerable[PolyCurve]) -> Surface
ExtractSurface(face: ConicalFace,edgeLoops: IEnumerable[PolyCurve]) -> Surface
"""
pass
__all__=[
'ExtractSurface',
]
class UnitConverter(object):
# no doc
@staticmethod
def ConvertToDynamoUnits(geometry):
# Error generating skeleton for function ConvertToDynamoUnits: Method must be called on a Type for which Type.IsGenericParameter is false.
@staticmethod
def ConvertToHostUnits(geometry):
# Error generating skeleton for function ConvertToHostUnits: Method must be called on a Type for which Type.IsGenericParameter is false.
@staticmethod
def DynamoToHostFactor(unitType):
""" DynamoToHostFactor(unitType: UnitType) -> float """
pass
@staticmethod
def HostToDynamoFactor(unitType):
""" HostToDynamoFactor(unitType: UnitType) -> float """
pass
@staticmethod
def InDynamoUnits(geometry):
# Error generating skeleton for function InDynamoUnits: Method must be called on a Type for which Type.IsGenericParameter is false.
@staticmethod
def InHostUnits(geometry):
# Error generating skeleton for function InHostUnits: Method must be called on a Type for which Type.IsGenericParameter is false.
__all__=[
'ConvertToDynamoUnits',
'ConvertToHostUnits',
'DynamoToHostFactor',
'HostToDynamoFactor',
'InDynamoUnits',
'InHostUnits',
]
|
normal
|
{
"blob_id": "f5ca2fb2ce8bcb7a67abe3123d4c50949e9c2f2f",
"index": 2029,
"step-1": "# encoding: utf-8\r\n# module Revit.GeometryConversion calls itself GeometryConversion\r\n# from RevitNodes,Version=1.2.1.3083,Culture=neutral,PublicKeyToken=null\r\n# by generator 1.145\r\n# no doc\r\n# no imports\r\n\r\n# no functions\r\n# classes\r\n\r\nclass CurveUtils(object):\r\n # no doc\r\n @staticmethod\r\n def CurvesAreSimilar(a,b):\r\n \"\"\"\r\n CurvesAreSimilar(a: Curve,b: Curve) -> bool\r\n\r\n \r\n\r\n This method uses basic checks to compare curves for similarity.\r\n\r\n It \r\n\r\n starts by comparing the curves' end points. Curves which have similar\r\n\r\n \r\n\r\n end points but different directions will not be regarded as similar,\r\n\r\n \r\n\r\n because directionality is important in Revit for other purposes. \r\n\r\n \r\n\r\n Depending on the curve type,other comparisons are then performed.\r\n\r\n \r\n\r\n \r\n\r\n a: The first curve.\r\n\r\n b: The second curve.\r\n\r\n Returns: Returns true if the curves are similar within Tolerance,and \r\n\r\n \r\n\r\n false if they are not.\r\n \"\"\"\r\n pass\r\n @staticmethod\r\n def GetPlaneFromCurve(c,planarOnly):\r\n \"\"\" GetPlaneFromCurve(c: Curve,planarOnly: bool) -> Plane \"\"\"\r\n pass\r\n @staticmethod\r\n def IsLineLike(crv):\r\n \"\"\" IsLineLike(crv: Curve) -> bool \"\"\"\r\n pass\r\n @staticmethod\r\n def PointArraysAreSame(pnts1,pnts2):\r\n \"\"\" PointArraysAreSame(pnts1: ReferencePointArray,pnts2: ReferencePointArray) -> bool \"\"\"\r\n pass\r\n @staticmethod\r\n def ReferencePointsAreSame(pnt1,pnt2):\r\n \"\"\" ReferencePointsAreSame(pnt1: ReferencePoint,pnt2: ReferencePoint) -> bool \"\"\"\r\n pass\r\n Tolerance=9.9999999999999995e-07\r\n __all__=[\r\n 'CurvesAreSimilar',\r\n 'GetPlaneFromCurve',\r\n 'IsLineLike',\r\n 'PointArraysAreSame',\r\n 'ReferencePointsAreSame',\r\n 'Tolerance',\r\n ]\r\n\r\n\r\nclass DynamoToRevitBRep(object):\r\n # no doc\r\n @staticmethod\r\n def ToRevitType(*__args):\r\n \"\"\"\r\n ToRevitType(surf: Surface,performHostUnitConversion: bool,materialId: ElementId) -> GeometryObject\r\n\r\n \r\n\r\n this method attempts to construct a BRep from a surface.\r\n\r\n ToRevitType(sol: Solid,performHostUnitConversion: bool,materialId: ElementId) -> GeometryObject\r\n\r\n \r\n\r\n this method attempts to construct a BRep from a closed solid.\r\n \"\"\"\r\n pass\r\n __all__=[\r\n 'ToRevitType',\r\n ]\r\n\r\n\r\nclass GeometryObjectConverter(object):\r\n # no doc\r\n @staticmethod\r\n def Convert(geom,reference,transform):\r\n \"\"\"\r\n Convert(geom: GeometryObject,reference: Reference,transform: CoordinateSystem) -> object\r\n\r\n \r\n\r\n Convert a GeometryObject to an applicable ProtoGeometry type.\r\n\r\n Returns: A Geometry type. Null if there's no suitable conversion.\r\n \"\"\"\r\n pass\r\n @staticmethod\r\n def ConvertToMany(solid,reference,transform):\r\n \"\"\"\r\n ConvertToMany(solid: Solid,reference: Reference,transform: CoordinateSystem) -> IEnumerable[object]\r\n\r\n \r\n\r\n Get the edges and faces from the solid and convert them\r\n \"\"\"\r\n pass\r\n @staticmethod\r\n def InternalConvert(geom):\r\n \"\"\" InternalConvert(geom: PolyLine) -> PolyCurve \"\"\"\r\n pass\r\n __all__=[\r\n 'Convert',\r\n 'ConvertToMany',\r\n 'InternalConvert',\r\n ]\r\n\r\n\r\nclass GeometryPrimitiveConverter(object):\r\n # no doc\r\n @staticmethod\r\n def GetPerpendicular(*__args):\r\n \"\"\"\r\n GetPerpendicular(vector: Vector) -> Vector\r\n\r\n GetPerpendicular(xyz: XYZ) -> XYZ\r\n \"\"\"\r\n pass\r\n @staticmethod\r\n def ToCoordinateSystem(t,convertUnits):\r\n \"\"\" ToCoordinateSystem(t: Transform,convertUnits: bool) -> CoordinateSystem \"\"\"\r\n pass\r\n @staticmethod\r\n def ToDegrees(degrees):\r\n \"\"\" ToDegrees(degrees: float) -> float \"\"\"\r\n pass\r\n @staticmethod\r\n def ToDoubleArray(list):\r\n \"\"\" ToDoubleArray(list: Array[float]) -> DoubleArray \"\"\"\r\n pass\r\n @staticmethod\r\n def ToPlane(plane,convertUnits):\r\n \"\"\"\r\n ToPlane(plane: Plane,convertUnits: bool) -> Plane\r\n\r\n ToPlane(plane: Plane,convertUnits: bool) -> Plane\r\n \"\"\"\r\n pass\r\n @staticmethod\r\n def ToPoint(xyz,convertUnits):\r\n \"\"\" ToPoint(xyz: XYZ,convertUnits: bool) -> Point \"\"\"\r\n pass\r\n @staticmethod\r\n def ToPoints(list,convertUnits):\r\n \"\"\" ToPoints(list: List[XYZ],convertUnits: bool) -> List[Point] \"\"\"\r\n pass\r\n @staticmethod\r\n def ToProtoType(*__args):\r\n \"\"\"\r\n ToProtoType(uv: UV) -> UV\r\n\r\n ToProtoType(point: Point,convertUnits: bool) -> Point\r\n\r\n ToProtoType(xyz: BoundingBoxXYZ,convertUnits: bool) -> BoundingBox\r\n \"\"\"\r\n pass\r\n @staticmethod\r\n def ToRadians(degrees):\r\n \"\"\" ToRadians(degrees: float) -> float \"\"\"\r\n pass\r\n @staticmethod\r\n def ToRevitBoundingBox(cs,minPoint,maxPoint,convertUnits):\r\n \"\"\" ToRevitBoundingBox(cs: CoordinateSystem,minPoint: Point,maxPoint: Point,convertUnits: bool) -> BoundingBoxXYZ \"\"\"\r\n pass\r\n @staticmethod\r\n def ToRevitType(*__args):\r\n \"\"\"\r\n ToRevitType(vec: Vector,convertUnits: bool) -> XYZ\r\n\r\n ToRevitType(pt: Point,convertUnits: bool) -> XYZ\r\n\r\n ToRevitType(bb: BoundingBox,convertUnits: bool) -> BoundingBoxXYZ\r\n \"\"\"\r\n pass\r\n @staticmethod\r\n def ToTransform(cs,convertUnits):\r\n \"\"\" ToTransform(cs: CoordinateSystem,convertUnits: bool) -> Transform \"\"\"\r\n pass\r\n @staticmethod\r\n def ToVector(xyz,convertUnits):\r\n \"\"\" ToVector(xyz: XYZ,convertUnits: bool) -> Vector \"\"\"\r\n pass\r\n @staticmethod\r\n def ToXyz(*__args):\r\n \"\"\"\r\n ToXyz(vec: Vector,convertUnits: bool) -> XYZ\r\n\r\n ToXyz(pt: Point,convertUnits: bool) -> XYZ\r\n \"\"\"\r\n pass\r\n @staticmethod\r\n def ToXyzs(list,convertUnits):\r\n \"\"\"\r\n ToXyzs(list: Array[Vector],convertUnits: bool) -> Array[XYZ]\r\n\r\n ToXyzs(list: Array[Point],convertUnits: bool) -> Array[XYZ]\r\n\r\n ToXyzs(list: List[Point],convertUnits: bool) -> List[XYZ]\r\n \"\"\"\r\n pass\r\n __all__=[\r\n 'GetPerpendicular',\r\n 'ToCoordinateSystem',\r\n 'ToDegrees',\r\n 'ToDoubleArray',\r\n 'ToPlane',\r\n 'ToPoint',\r\n 'ToPoints',\r\n 'ToProtoType',\r\n 'ToRadians',\r\n 'ToRevitBoundingBox',\r\n 'ToRevitType',\r\n 'ToTransform',\r\n 'ToVector',\r\n 'ToXyz',\r\n 'ToXyzs',\r\n ]\r\n\r\n\r\nclass NurbsUtils(object):\r\n # no doc\r\n @staticmethod\r\n def ElevateBezierDegree(crv,finalDegree):\r\n \"\"\"\r\n ElevateBezierDegree(crv: NurbsCurve,finalDegree: int) -> NurbsCurve\r\n\r\n \r\n\r\n Elevate the degree of a Bezier curve (represented in NURBS form) to a given \r\n\r\n degree\r\n\r\n without changing the shape\r\n\r\n \r\n\r\n \r\n\r\n crv: The curve\r\n\r\n finalDegree: The requested degree\r\n \"\"\"\r\n pass\r\n __all__=[\r\n 'ElevateBezierDegree',\r\n ]\r\n\r\n\r\nclass PolygonContainment(object):\r\n # no doc\r\n @staticmethod\r\n def AdjustDelta(delta,vertex,next_vertex,p):\r\n \"\"\" AdjustDelta(delta: int,vertex: UV,next_vertex: UV,p: UV) -> int \"\"\"\r\n pass\r\n @staticmethod\r\n def GetQuadrant(vertex,p):\r\n \"\"\"\r\n GetQuadrant(vertex: UV,p: UV) -> int\r\n\r\n \r\n\r\n Determine the quadrant of a polygon vertex \r\n\r\n relative to the test \r\n\r\n point.\r\n \"\"\"\r\n pass\r\n @staticmethod\r\n def GetXIntercept(p,q,y):\r\n \"\"\"\r\n GetXIntercept(p: UV,q: UV,y: float) -> float\r\n\r\n \r\n\r\n Determine the X intercept of a polygon edge \r\n\r\n with a horizontal \r\n\r\n line at the Y value of the \r\n\r\n test point.\r\n \"\"\"\r\n pass\r\n @staticmethod\r\n def PolygonContains(polygon,point):\r\n \"\"\" PolygonContains(polygon: List[UV],point: UV) -> bool \"\"\"\r\n pass\r\n __all__=[\r\n 'AdjustDelta',\r\n 'GetQuadrant',\r\n 'GetXIntercept',\r\n 'PolygonContains',\r\n ]\r\n\r\n\r\nclass ProtoToRevitCurve(object):\r\n # no doc\r\n @staticmethod\r\n def ToRevitType(*__args):\r\n \"\"\"\r\n ToRevitType(pcrv: PolyCurve,performHostUnitConversion: bool) -> CurveLoop\r\n\r\n ToRevitType(crv: Curve,performHostUnitConversion: bool) -> Curve\r\n \"\"\"\r\n pass\r\n __all__=[\r\n 'ToRevitType',\r\n ]\r\n\r\n\r\nclass ProtoToRevitMesh(object):\r\n # no doc\r\n @staticmethod\r\n def CreateBoundingBoxMeshForErrors(minPoint,maxPoint,performHostUnitConversion):\r\n \"\"\"\r\n CreateBoundingBoxMeshForErrors(minPoint: Point,maxPoint: Point,performHostUnitConversion: bool) -> IList[GeometryObject]\r\n\r\n \r\n\r\n This is to create a bounding box mesh for geometries which have errors during \r\n\r\n the tessellating process\r\n \"\"\"\r\n pass\r\n @staticmethod\r\n def ToRevitType(*__args):\r\n \"\"\"\r\n ToRevitType(mesh: Mesh,target: TessellatedShapeBuilderTarget,fallback: TessellatedShapeBuilderFallback,MaterialId: ElementId,performHostUnitConversion: bool) -> IList[GeometryObject]\r\n\r\n ToRevitType(solid: Solid,target: TessellatedShapeBuilderTarget,fallback: TessellatedShapeBuilderFallback,MaterialId: ElementId,performHostUnitConversion: bool) -> IList[GeometryObject]\r\n\r\n ToRevitType(srf: Surface,target: TessellatedShapeBuilderTarget,fallback: TessellatedShapeBuilderFallback,MaterialId: ElementId,performHostUnitConversion: bool) -> IList[GeometryObject]\r\n \"\"\"\r\n pass\r\n __all__=[\r\n 'CreateBoundingBoxMeshForErrors',\r\n 'ToRevitType',\r\n ]\r\n\r\n\r\nclass RevitToProtoCurve(object):\r\n # no doc\r\n @staticmethod\r\n def ToProtoType(*__args):\r\n \"\"\"\r\n ToProtoType(geom: PolyLine,performHostUnitConversion: bool) -> PolyCurve\r\n\r\n ToProtoType(revitCurves: CurveArray,performHostUnitConversion: bool) -> PolyCurve\r\n\r\n ToProtoType(revitCurve: Curve,performHostUnitConversion: bool,referenceOverride: Reference) -> Curve\r\n \"\"\"\r\n pass\r\n __all__=[\r\n 'ToProtoType',\r\n ]\r\n\r\n\r\nclass RevitToProtoFace(object):\r\n # no doc\r\n @staticmethod\r\n def ToProtoType(revitFace,performHostUnitConversion,referenceOverride):\r\n \"\"\" ToProtoType(revitFace: Face,performHostUnitConversion: bool,referenceOverride: Reference) -> IEnumerable[Surface] \"\"\"\r\n pass\r\n __all__=[\r\n 'ToProtoType',\r\n ]\r\n\r\n\r\nclass RevitToProtoMesh(object):\r\n # no doc\r\n @staticmethod\r\n def ToProtoType(*__args):\r\n \"\"\"\r\n ToProtoType(meshArray: IEnumerable[Mesh],performHostUnitConversion: bool) -> Array[Mesh]\r\n\r\n ToProtoType(mesh: Mesh,performHostUnitConversion: bool) -> Mesh\r\n \"\"\"\r\n pass\r\n __all__=[\r\n 'ToProtoType',\r\n ]\r\n\r\n\r\nclass RevitToProtoSolid(object):\r\n # no doc\r\n @staticmethod\r\n def ToProtoType(solid,performHostUnitConversion):\r\n \"\"\" ToProtoType(solid: Solid,performHostUnitConversion: bool) -> Solid \"\"\"\r\n pass\r\n __all__=[\r\n 'ToProtoType',\r\n ]\r\n\r\n\r\nclass SurfaceExtractor(object):\r\n \"\"\"\r\n This class is required to extract the underlying surface representation from a Revit Face.\r\n\r\n All Face types are supported.\r\n \"\"\"\r\n @staticmethod\r\n def ExtractSurface(face,edgeLoops):\r\n \"\"\"\r\n ExtractSurface(face: HermiteFace,edgeLoops: IEnumerable[PolyCurve]) -> Surface\r\n\r\n ExtractSurface(face: RevolvedFace,edgeLoops: IEnumerable[PolyCurve]) -> Surface\r\n\r\n ExtractSurface(face: RuledFace,edgeLoops: IEnumerable[PolyCurve]) -> Surface\r\n\r\n ExtractSurface(face: PlanarFace,edgeLoops: IEnumerable[PolyCurve]) -> Surface\r\n\r\n ExtractSurface(face: CylindricalFace,edgeLoops: IEnumerable[PolyCurve]) -> Surface\r\n\r\n ExtractSurface(face: ConicalFace,edgeLoops: IEnumerable[PolyCurve]) -> Surface\r\n \"\"\"\r\n pass\r\n __all__=[\r\n 'ExtractSurface',\r\n ]\r\n\r\n\r\nclass UnitConverter(object):\r\n # no doc\r\n @staticmethod\r\n def ConvertToDynamoUnits(geometry):\r\n# Error generating skeleton for function ConvertToDynamoUnits: Method must be called on a Type for which Type.IsGenericParameter is false.\r\n\r\n @staticmethod\r\n def ConvertToHostUnits(geometry):\r\n# Error generating skeleton for function ConvertToHostUnits: Method must be called on a Type for which Type.IsGenericParameter is false.\r\n\r\n @staticmethod\r\n def DynamoToHostFactor(unitType):\r\n \"\"\" DynamoToHostFactor(unitType: UnitType) -> float \"\"\"\r\n pass\r\n @staticmethod\r\n def HostToDynamoFactor(unitType):\r\n \"\"\" HostToDynamoFactor(unitType: UnitType) -> float \"\"\"\r\n pass\r\n @staticmethod\r\n def InDynamoUnits(geometry):\r\n# Error generating skeleton for function InDynamoUnits: Method must be called on a Type for which Type.IsGenericParameter is false.\r\n\r\n @staticmethod\r\n def InHostUnits(geometry):\r\n# Error generating skeleton for function InHostUnits: Method must be called on a Type for which Type.IsGenericParameter is false.\r\n\r\n __all__=[\r\n 'ConvertToDynamoUnits',\r\n 'ConvertToHostUnits',\r\n 'DynamoToHostFactor',\r\n 'HostToDynamoFactor',\r\n 'InDynamoUnits',\r\n 'InHostUnits',\r\n ]\r\n\r\n\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('barriers', '0011_auto_20170904_1658')]
operations = [migrations.CreateModel(name='BarrierCountry', fields=[(
'id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('created_date', models.
DateTimeField(auto_now_add=True)), ('updated_date', models.
DateTimeField(auto_now=True)), ('is_deleted', models.BooleanField(
default=False)), ('name', models.CharField(max_length=100,
verbose_name='Country or Territory Name')), ('code', models.
CharField(blank=True, max_length=100, null=True, verbose_name=
'Country or Territory Code')), ('official_name', models.CharField(
blank=True, max_length=100, null=True, verbose_name=
'Offical Country or Territory name')), ('govuk_index_entry_code',
models.CharField(blank=True, max_length=10, null=True, verbose_name
='GOV.UK index code')), ('country_or_territory', models.CharField(
choices=[('CO', 'Country'), ('TE', 'Territory')], default='CO',
max_length=2, verbose_name='Country or Territory flag'))], options=
{'verbose_name_plural': 'countries or territories'}), migrations.
CreateModel(name='BarrierNotification', fields=[('id', models.
AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('created_date', models.DateTimeField(
auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now
=True)), ('is_deleted', models.BooleanField(default=False)), (
'title', models.TextField(blank=True, verbose_name='Title')), (
'description', models.TextField(blank=True, verbose_name=
'Description')), ('distribution_date', models.DateField(blank=True,
null=True, verbose_name='Distribution Date')), ('barrier_symbol',
models.CharField(blank=True, max_length=500, verbose_name=
'Barrier Symbol')), ('core_symbol', models.CharField(blank=True,
max_length=500, verbose_name='Core Symbol')), ('mab_type', models.
CharField(blank=True, max_length=500, verbose_name='Barrier type')),
('products_text', models.TextField(blank=True, verbose_name=
'Products')), ('product_codes', models.TextField(blank=True,
verbose_name='Product codes')), ('objectives', models.TextField(
blank=True, verbose_name='Objectives')), ('keywords', models.
TextField(blank=True, verbose_name='Keywords')), (
'regions_affected', models.TextField(blank=True, verbose_name=
'Regions affected')), ('comments_due_date', models.DateField(blank=
True, null=True, verbose_name='Final date for comments')), (
'notification_type', models.CharField(blank=True, max_length=50,
verbose_name='Notification type')), ('document_link', models.
CharField(blank=True, max_length=1500, verbose_name='Document link'
)), ('external_link', models.CharField(blank=True, max_length=1500,
verbose_name='External site link'))], options={'abstract': False}),
migrations.CreateModel(name='BarrierRecord', fields=[('id', models.
AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('created_date', models.DateTimeField(
auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now
=True)), ('is_deleted', models.BooleanField(default=False)), (
'status', models.CharField(blank=True, choices=[('Active', 'Active'
)], default=None, max_length=10, null=True)), ('title', models.
TextField(blank=True, verbose_name='Title')), ('description',
models.TextField(blank=True, verbose_name='Description')), (
'products_text', models.TextField(blank=True, verbose_name=
'Products affected')), ('sectors_text', models.TextField(blank=True,
verbose_name='Sectors affected')), ('source_id', models.CharField(
blank=True, max_length=20, null=True, verbose_name=
'ID in source system')), ('distribution_date', models.DateField(
blank=True, null=True, verbose_name='Distribution Date')), (
'external_link', models.CharField(blank=True, max_length=1500,
verbose_name='External site link'))], options={'abstract': False}),
migrations.CreateModel(name='BarrierReport', fields=[('id', models.
AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('created_date', models.DateTimeField(
auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now
=True)), ('is_deleted', models.BooleanField(default=False)), (
'status', models.CharField(blank=True, choices=[('Draft', 'Draft'),
('Submitted', 'Submitted')], default=None, max_length=10, null=True
)), ('name', models.CharField(blank=True, max_length=200, null=True
)), ('problem_description', models.TextField(blank=True, null=True)
), ('product_text', models.TextField(blank=True, null=True)), (
'product_code', models.CharField(blank=True, max_length=500, null=
True)), ('business_impact_description', models.TextField(blank=True,
null=True)), ('problem_duration_description', models.TextField(
blank=True, null=True)), ('other_companies_affected_choice', models
.CharField(blank=True, choices=[('Yes', 'Yes'), ('No', 'No'), (
'DontKnow', "Don't know")], default=None, max_length=10, null=True)
), ('other_countries_affected_description', models.TextField(blank=
True, null=True)), ('steps_taken_to_resolve', models.TextField(
blank=True, null=True)), ('outcome_looking_for', models.TextField(
blank=True, null=True)), ('support_desired_choice', models.
CharField(blank=True, choices=[('SUPPORT_DESIRED_NONE',
'None - this is for your information only'), (
'SUPPORT_DESIRED_LOCAL',
'Local engagement only with UK Government officials in the country I am trying to export to'
), ('SUPPORT_DESIRED_BROAD', 'Broader UK Government involvement'),
('SUPPORT_DESIRED_NOT_SURE', 'Not sure')], default=None, max_length
=10, null=True)), ('confidentiality_issues_description', models.
TextField(blank=True, null=True)), ('happy_to_publish_choice',
models.CharField(blank=True, choices=[('HAPPY_TO_PUBLISH_YES',
'Yes'), ('HAPPY_TO_PUBLISH_NO', 'No'), ('HAPPY_TO_PUBLISH_MAYBE',
'Maybe, following consultation with me')], default=None, max_length
=10, null=True)), ('any_other_details_description', models.
TextField(blank=True, null=True)), ('country', models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
to='barriers.BarrierCountry'))], options={'abstract': False}),
migrations.CreateModel(name='BarrierReporter', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('created_date', models.DateTimeField(
auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now
=True)), ('is_deleted', models.BooleanField(default=False)), (
'name', models.CharField(blank=True, max_length=1500, verbose_name=
'Reporter name')), ('company', models.CharField(blank=True,
max_length=1500, verbose_name='Company name'))], options={
'abstract': False}), migrations.CreateModel(name='BarrierSource',
fields=[('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('created_date', models.
DateTimeField(auto_now_add=True)), ('updated_date', models.
DateTimeField(auto_now=True)), ('is_deleted', models.BooleanField(
default=False)), ('name', models.CharField(max_length=100)), (
'description', models.CharField(blank=True, max_length=500, null=
True)), ('short_name', models.CharField(blank=True, max_length=20,
null=True)), ('remote_url', models.URLField(blank=True, max_length=
20, null=True))], options={'abstract': False}), migrations.
CreateModel(name='BarrierTypeMapping', fields=[('id', models.
AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('created_date', models.DateTimeField(
auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now
=True)), ('is_deleted', models.BooleanField(default=False)), (
'destination_barrier_list', models.ForeignKey(on_delete=django.db.
models.deletion.CASCADE, related_name='destination_barrier_list',
to='barriers.BarrierSource'))], options={'abstract': False}),
migrations.RemoveField(model_name='marketaccessbarrier', name=
'barrier_types'), migrations.RenameField(model_name='barriertype',
old_name='ec_barrier_code', new_name='barrier_code'), migrations.
AlterField(model_name='barriertype', name='name', field=models.
CharField(max_length=200)), migrations.DeleteModel(name=
'MarketAccessBarrier'), migrations.AddField(model_name=
'barriertypemapping', name='destination_barrier_type', field=models
.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='destination_barrier_type', to='barriers.BarrierType')
), migrations.AddField(model_name='barriertypemapping', name=
'source_barrier_list', field=models.ForeignKey(on_delete=django.db.
models.deletion.CASCADE, related_name='source_barrier_list', to=
'barriers.BarrierSource')), migrations.AddField(model_name=
'barriertypemapping', name='source_barrier_type', field=models.
ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='source_barrier_type', to='barriers.BarrierType')),
migrations.AddField(model_name='barrierreport', name='reporter',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.
models.deletion.CASCADE, to='barriers.BarrierReporter')),
migrations.AddField(model_name='barrierreport', name=
'top_level_barrier_type', field=models.ForeignKey(blank=True, null=
True, on_delete=django.db.models.deletion.CASCADE, related_name=
'barrier_reports', to='barriers.BarrierType')), migrations.AddField
(model_name='barrierrecord', name='barrier_source', field=models.
ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
'barriers.BarrierSource')), migrations.AddField(model_name=
'barrierrecord', name='barrier_types', field=mptt.fields.
TreeManyToManyField(blank=True, db_index=True, related_name='types',
to='barriers.BarrierType')), migrations.AddField(model_name=
'barrierrecord', name='country', field=models.ForeignKey(blank=True,
null=True, on_delete=django.db.models.deletion.CASCADE, to=
'barriers.BarrierCountry')), migrations.AddField(model_name=
'barriernotification', name='barrier_source', field=models.
ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
'barriers.BarrierSource')), migrations.AddField(model_name=
'barriernotification', name='barrier_types', field=mptt.fields.
TreeManyToManyField(blank=True, db_index=True, related_name=
'barrier_types', to='barriers.BarrierType')), migrations.AddField(
model_name='barriernotification', name='country', field=models.
ForeignKey(blank=True, null=True, on_delete=django.db.models.
deletion.CASCADE, related_name='notification_countries', to=
'barriers.BarrierCountry')), migrations.AddField(model_name=
'barriertype', name='barrier_source', field=models.ForeignKey(
default=1, on_delete=django.db.models.deletion.CASCADE, to=
'barriers.BarrierSource'))]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [('barriers', '0011_auto_20170904_1658')]
operations = [migrations.CreateModel(name='BarrierCountry', fields=[(
'id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('created_date', models.
DateTimeField(auto_now_add=True)), ('updated_date', models.
DateTimeField(auto_now=True)), ('is_deleted', models.BooleanField(
default=False)), ('name', models.CharField(max_length=100,
verbose_name='Country or Territory Name')), ('code', models.
CharField(blank=True, max_length=100, null=True, verbose_name=
'Country or Territory Code')), ('official_name', models.CharField(
blank=True, max_length=100, null=True, verbose_name=
'Offical Country or Territory name')), ('govuk_index_entry_code',
models.CharField(blank=True, max_length=10, null=True, verbose_name
='GOV.UK index code')), ('country_or_territory', models.CharField(
choices=[('CO', 'Country'), ('TE', 'Territory')], default='CO',
max_length=2, verbose_name='Country or Territory flag'))], options=
{'verbose_name_plural': 'countries or territories'}), migrations.
CreateModel(name='BarrierNotification', fields=[('id', models.
AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('created_date', models.DateTimeField(
auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now
=True)), ('is_deleted', models.BooleanField(default=False)), (
'title', models.TextField(blank=True, verbose_name='Title')), (
'description', models.TextField(blank=True, verbose_name=
'Description')), ('distribution_date', models.DateField(blank=True,
null=True, verbose_name='Distribution Date')), ('barrier_symbol',
models.CharField(blank=True, max_length=500, verbose_name=
'Barrier Symbol')), ('core_symbol', models.CharField(blank=True,
max_length=500, verbose_name='Core Symbol')), ('mab_type', models.
CharField(blank=True, max_length=500, verbose_name='Barrier type')),
('products_text', models.TextField(blank=True, verbose_name=
'Products')), ('product_codes', models.TextField(blank=True,
verbose_name='Product codes')), ('objectives', models.TextField(
blank=True, verbose_name='Objectives')), ('keywords', models.
TextField(blank=True, verbose_name='Keywords')), (
'regions_affected', models.TextField(blank=True, verbose_name=
'Regions affected')), ('comments_due_date', models.DateField(blank=
True, null=True, verbose_name='Final date for comments')), (
'notification_type', models.CharField(blank=True, max_length=50,
verbose_name='Notification type')), ('document_link', models.
CharField(blank=True, max_length=1500, verbose_name='Document link'
)), ('external_link', models.CharField(blank=True, max_length=1500,
verbose_name='External site link'))], options={'abstract': False}),
migrations.CreateModel(name='BarrierRecord', fields=[('id', models.
AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('created_date', models.DateTimeField(
auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now
=True)), ('is_deleted', models.BooleanField(default=False)), (
'status', models.CharField(blank=True, choices=[('Active', 'Active'
)], default=None, max_length=10, null=True)), ('title', models.
TextField(blank=True, verbose_name='Title')), ('description',
models.TextField(blank=True, verbose_name='Description')), (
'products_text', models.TextField(blank=True, verbose_name=
'Products affected')), ('sectors_text', models.TextField(blank=True,
verbose_name='Sectors affected')), ('source_id', models.CharField(
blank=True, max_length=20, null=True, verbose_name=
'ID in source system')), ('distribution_date', models.DateField(
blank=True, null=True, verbose_name='Distribution Date')), (
'external_link', models.CharField(blank=True, max_length=1500,
verbose_name='External site link'))], options={'abstract': False}),
migrations.CreateModel(name='BarrierReport', fields=[('id', models.
AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('created_date', models.DateTimeField(
auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now
=True)), ('is_deleted', models.BooleanField(default=False)), (
'status', models.CharField(blank=True, choices=[('Draft', 'Draft'),
('Submitted', 'Submitted')], default=None, max_length=10, null=True
)), ('name', models.CharField(blank=True, max_length=200, null=True
)), ('problem_description', models.TextField(blank=True, null=True)
), ('product_text', models.TextField(blank=True, null=True)), (
'product_code', models.CharField(blank=True, max_length=500, null=
True)), ('business_impact_description', models.TextField(blank=True,
null=True)), ('problem_duration_description', models.TextField(
blank=True, null=True)), ('other_companies_affected_choice', models
.CharField(blank=True, choices=[('Yes', 'Yes'), ('No', 'No'), (
'DontKnow', "Don't know")], default=None, max_length=10, null=True)
), ('other_countries_affected_description', models.TextField(blank=
True, null=True)), ('steps_taken_to_resolve', models.TextField(
blank=True, null=True)), ('outcome_looking_for', models.TextField(
blank=True, null=True)), ('support_desired_choice', models.
CharField(blank=True, choices=[('SUPPORT_DESIRED_NONE',
'None - this is for your information only'), (
'SUPPORT_DESIRED_LOCAL',
'Local engagement only with UK Government officials in the country I am trying to export to'
), ('SUPPORT_DESIRED_BROAD', 'Broader UK Government involvement'),
('SUPPORT_DESIRED_NOT_SURE', 'Not sure')], default=None, max_length
=10, null=True)), ('confidentiality_issues_description', models.
TextField(blank=True, null=True)), ('happy_to_publish_choice',
models.CharField(blank=True, choices=[('HAPPY_TO_PUBLISH_YES',
'Yes'), ('HAPPY_TO_PUBLISH_NO', 'No'), ('HAPPY_TO_PUBLISH_MAYBE',
'Maybe, following consultation with me')], default=None, max_length
=10, null=True)), ('any_other_details_description', models.
TextField(blank=True, null=True)), ('country', models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
to='barriers.BarrierCountry'))], options={'abstract': False}),
migrations.CreateModel(name='BarrierReporter', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('created_date', models.DateTimeField(
auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now
=True)), ('is_deleted', models.BooleanField(default=False)), (
'name', models.CharField(blank=True, max_length=1500, verbose_name=
'Reporter name')), ('company', models.CharField(blank=True,
max_length=1500, verbose_name='Company name'))], options={
'abstract': False}), migrations.CreateModel(name='BarrierSource',
fields=[('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('created_date', models.
DateTimeField(auto_now_add=True)), ('updated_date', models.
DateTimeField(auto_now=True)), ('is_deleted', models.BooleanField(
default=False)), ('name', models.CharField(max_length=100)), (
'description', models.CharField(blank=True, max_length=500, null=
True)), ('short_name', models.CharField(blank=True, max_length=20,
null=True)), ('remote_url', models.URLField(blank=True, max_length=
20, null=True))], options={'abstract': False}), migrations.
CreateModel(name='BarrierTypeMapping', fields=[('id', models.
AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('created_date', models.DateTimeField(
auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now
=True)), ('is_deleted', models.BooleanField(default=False)), (
'destination_barrier_list', models.ForeignKey(on_delete=django.db.
models.deletion.CASCADE, related_name='destination_barrier_list',
to='barriers.BarrierSource'))], options={'abstract': False}),
migrations.RemoveField(model_name='marketaccessbarrier', name=
'barrier_types'), migrations.RenameField(model_name='barriertype',
old_name='ec_barrier_code', new_name='barrier_code'), migrations.
AlterField(model_name='barriertype', name='name', field=models.
CharField(max_length=200)), migrations.DeleteModel(name=
'MarketAccessBarrier'), migrations.AddField(model_name=
'barriertypemapping', name='destination_barrier_type', field=models
.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='destination_barrier_type', to='barriers.BarrierType')
), migrations.AddField(model_name='barriertypemapping', name=
'source_barrier_list', field=models.ForeignKey(on_delete=django.db.
models.deletion.CASCADE, related_name='source_barrier_list', to=
'barriers.BarrierSource')), migrations.AddField(model_name=
'barriertypemapping', name='source_barrier_type', field=models.
ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='source_barrier_type', to='barriers.BarrierType')),
migrations.AddField(model_name='barrierreport', name='reporter',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.
models.deletion.CASCADE, to='barriers.BarrierReporter')),
migrations.AddField(model_name='barrierreport', name=
'top_level_barrier_type', field=models.ForeignKey(blank=True, null=
True, on_delete=django.db.models.deletion.CASCADE, related_name=
'barrier_reports', to='barriers.BarrierType')), migrations.AddField
(model_name='barrierrecord', name='barrier_source', field=models.
ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
'barriers.BarrierSource')), migrations.AddField(model_name=
'barrierrecord', name='barrier_types', field=mptt.fields.
TreeManyToManyField(blank=True, db_index=True, related_name='types',
to='barriers.BarrierType')), migrations.AddField(model_name=
'barrierrecord', name='country', field=models.ForeignKey(blank=True,
null=True, on_delete=django.db.models.deletion.CASCADE, to=
'barriers.BarrierCountry')), migrations.AddField(model_name=
'barriernotification', name='barrier_source', field=models.
ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
'barriers.BarrierSource')), migrations.AddField(model_name=
'barriernotification', name='barrier_types', field=mptt.fields.
TreeManyToManyField(blank=True, db_index=True, related_name=
'barrier_types', to='barriers.BarrierType')), migrations.AddField(
model_name='barriernotification', name='country', field=models.
ForeignKey(blank=True, null=True, on_delete=django.db.models.
deletion.CASCADE, related_name='notification_countries', to=
'barriers.BarrierCountry')), migrations.AddField(model_name=
'barriertype', name='barrier_source', field=models.ForeignKey(
default=1, on_delete=django.db.models.deletion.CASCADE, to=
'barriers.BarrierSource'))]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-10-02 14:41
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('barriers', '0011_auto_20170904_1658'),
]
operations = [
migrations.CreateModel(
name='BarrierCountry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True)),
('updated_date', models.DateTimeField(auto_now=True)),
('is_deleted', models.BooleanField(default=False)),
('name', models.CharField(max_length=100, verbose_name='Country or Territory Name')),
('code', models.CharField(blank=True, max_length=100, null=True, verbose_name='Country or Territory Code')),
('official_name', models.CharField(blank=True, max_length=100, null=True, verbose_name='Offical Country or Territory name')),
('govuk_index_entry_code', models.CharField(blank=True, max_length=10, null=True, verbose_name='GOV.UK index code')),
('country_or_territory', models.CharField(choices=[('CO', 'Country'), ('TE', 'Territory')], default='CO', max_length=2, verbose_name='Country or Territory flag')),
],
options={
'verbose_name_plural': 'countries or territories',
},
),
migrations.CreateModel(
name='BarrierNotification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True)),
('updated_date', models.DateTimeField(auto_now=True)),
('is_deleted', models.BooleanField(default=False)),
('title', models.TextField(blank=True, verbose_name='Title')),
('description', models.TextField(blank=True, verbose_name='Description')),
('distribution_date', models.DateField(blank=True, null=True, verbose_name='Distribution Date')),
('barrier_symbol', models.CharField(blank=True, max_length=500, verbose_name='Barrier Symbol')),
('core_symbol', models.CharField(blank=True, max_length=500, verbose_name='Core Symbol')),
('mab_type', models.CharField(blank=True, max_length=500, verbose_name='Barrier type')),
('products_text', models.TextField(blank=True, verbose_name='Products')),
('product_codes', models.TextField(blank=True, verbose_name='Product codes')),
('objectives', models.TextField(blank=True, verbose_name='Objectives')),
('keywords', models.TextField(blank=True, verbose_name='Keywords')),
('regions_affected', models.TextField(blank=True, verbose_name='Regions affected')),
('comments_due_date', models.DateField(blank=True, null=True, verbose_name='Final date for comments')),
('notification_type', models.CharField(blank=True, max_length=50, verbose_name='Notification type')),
('document_link', models.CharField(blank=True, max_length=1500, verbose_name='Document link')),
('external_link', models.CharField(blank=True, max_length=1500, verbose_name='External site link')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='BarrierRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True)),
('updated_date', models.DateTimeField(auto_now=True)),
('is_deleted', models.BooleanField(default=False)),
('status', models.CharField(blank=True, choices=[('Active', 'Active')], default=None, max_length=10, null=True)),
('title', models.TextField(blank=True, verbose_name='Title')),
('description', models.TextField(blank=True, verbose_name='Description')),
('products_text', models.TextField(blank=True, verbose_name='Products affected')),
('sectors_text', models.TextField(blank=True, verbose_name='Sectors affected')),
('source_id', models.CharField(blank=True, max_length=20, null=True, verbose_name='ID in source system')),
('distribution_date', models.DateField(blank=True, null=True, verbose_name='Distribution Date')),
('external_link', models.CharField(blank=True, max_length=1500, verbose_name='External site link')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='BarrierReport',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True)),
('updated_date', models.DateTimeField(auto_now=True)),
('is_deleted', models.BooleanField(default=False)),
('status', models.CharField(blank=True, choices=[('Draft', 'Draft'), ('Submitted', 'Submitted')], default=None, max_length=10, null=True)),
('name', models.CharField(blank=True, max_length=200, null=True)),
('problem_description', models.TextField(blank=True, null=True)),
('product_text', models.TextField(blank=True, null=True)),
('product_code', models.CharField(blank=True, max_length=500, null=True)),
('business_impact_description', models.TextField(blank=True, null=True)),
('problem_duration_description', models.TextField(blank=True, null=True)),
('other_companies_affected_choice', models.CharField(blank=True, choices=[('Yes', 'Yes'), ('No', 'No'), ('DontKnow', "Don't know")], default=None, max_length=10, null=True)),
('other_countries_affected_description', models.TextField(blank=True, null=True)),
('steps_taken_to_resolve', models.TextField(blank=True, null=True)),
('outcome_looking_for', models.TextField(blank=True, null=True)),
('support_desired_choice', models.CharField(blank=True, choices=[('SUPPORT_DESIRED_NONE', 'None - this is for your information only'), ('SUPPORT_DESIRED_LOCAL', 'Local engagement only with UK Government officials in the country I am trying to export to'), ('SUPPORT_DESIRED_BROAD', 'Broader UK Government involvement'), ('SUPPORT_DESIRED_NOT_SURE', 'Not sure')], default=None, max_length=10, null=True)),
('confidentiality_issues_description', models.TextField(blank=True, null=True)),
('happy_to_publish_choice', models.CharField(blank=True, choices=[('HAPPY_TO_PUBLISH_YES', 'Yes'), ('HAPPY_TO_PUBLISH_NO', 'No'), ('HAPPY_TO_PUBLISH_MAYBE', 'Maybe, following consultation with me')], default=None, max_length=10, null=True)),
('any_other_details_description', models.TextField(blank=True, null=True)),
('country', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='barriers.BarrierCountry')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='BarrierReporter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True)),
('updated_date', models.DateTimeField(auto_now=True)),
('is_deleted', models.BooleanField(default=False)),
('name', models.CharField(blank=True, max_length=1500, verbose_name='Reporter name')),
('company', models.CharField(blank=True, max_length=1500, verbose_name='Company name')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='BarrierSource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True)),
('updated_date', models.DateTimeField(auto_now=True)),
('is_deleted', models.BooleanField(default=False)),
('name', models.CharField(max_length=100)),
('description', models.CharField(blank=True, max_length=500, null=True)),
('short_name', models.CharField(blank=True, max_length=20, null=True)),
('remote_url', models.URLField(blank=True, max_length=20, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='BarrierTypeMapping',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True)),
('updated_date', models.DateTimeField(auto_now=True)),
('is_deleted', models.BooleanField(default=False)),
('destination_barrier_list', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='destination_barrier_list', to='barriers.BarrierSource')),
],
options={
'abstract': False,
},
),
migrations.RemoveField(
model_name='marketaccessbarrier',
name='barrier_types',
),
migrations.RenameField(
model_name='barriertype',
old_name='ec_barrier_code',
new_name='barrier_code',
),
migrations.AlterField(
model_name='barriertype',
name='name',
field=models.CharField(max_length=200),
),
migrations.DeleteModel(
name='MarketAccessBarrier',
),
migrations.AddField(
model_name='barriertypemapping',
name='destination_barrier_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='destination_barrier_type', to='barriers.BarrierType'),
),
migrations.AddField(
model_name='barriertypemapping',
name='source_barrier_list',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='source_barrier_list', to='barriers.BarrierSource'),
),
migrations.AddField(
model_name='barriertypemapping',
name='source_barrier_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='source_barrier_type', to='barriers.BarrierType'),
),
migrations.AddField(
model_name='barrierreport',
name='reporter',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='barriers.BarrierReporter'),
),
migrations.AddField(
model_name='barrierreport',
name='top_level_barrier_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='barrier_reports', to='barriers.BarrierType'),
),
migrations.AddField(
model_name='barrierrecord',
name='barrier_source',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='barriers.BarrierSource'),
),
migrations.AddField(
model_name='barrierrecord',
name='barrier_types',
field=mptt.fields.TreeManyToManyField(blank=True, db_index=True, related_name='types', to='barriers.BarrierType'),
),
migrations.AddField(
model_name='barrierrecord',
name='country',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='barriers.BarrierCountry'),
),
migrations.AddField(
model_name='barriernotification',
name='barrier_source',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='barriers.BarrierSource'),
),
migrations.AddField(
model_name='barriernotification',
name='barrier_types',
field=mptt.fields.TreeManyToManyField(blank=True, db_index=True, related_name='barrier_types', to='barriers.BarrierType'),
),
migrations.AddField(
model_name='barriernotification',
name='country',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='notification_countries', to='barriers.BarrierCountry'),
),
migrations.AddField(
model_name='barriertype',
name='barrier_source',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='barriers.BarrierSource'),
),
]
|
flexible
|
{
"blob_id": "645f8f1ebd3bfa0ba32d5be8058b07e2a30ba9b5",
"index": 1314,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('barriers', '0011_auto_20170904_1658')]\n operations = [migrations.CreateModel(name='BarrierCountry', fields=[(\n 'id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('created_date', models.\n DateTimeField(auto_now_add=True)), ('updated_date', models.\n DateTimeField(auto_now=True)), ('is_deleted', models.BooleanField(\n default=False)), ('name', models.CharField(max_length=100,\n verbose_name='Country or Territory Name')), ('code', models.\n CharField(blank=True, max_length=100, null=True, verbose_name=\n 'Country or Territory Code')), ('official_name', models.CharField(\n blank=True, max_length=100, null=True, verbose_name=\n 'Offical Country or Territory name')), ('govuk_index_entry_code',\n models.CharField(blank=True, max_length=10, null=True, verbose_name\n ='GOV.UK index code')), ('country_or_territory', models.CharField(\n choices=[('CO', 'Country'), ('TE', 'Territory')], default='CO',\n max_length=2, verbose_name='Country or Territory flag'))], options=\n {'verbose_name_plural': 'countries or territories'}), migrations.\n CreateModel(name='BarrierNotification', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('created_date', models.DateTimeField(\n auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now\n =True)), ('is_deleted', models.BooleanField(default=False)), (\n 'title', models.TextField(blank=True, verbose_name='Title')), (\n 'description', models.TextField(blank=True, verbose_name=\n 'Description')), ('distribution_date', models.DateField(blank=True,\n null=True, verbose_name='Distribution Date')), ('barrier_symbol',\n models.CharField(blank=True, max_length=500, verbose_name=\n 'Barrier Symbol')), ('core_symbol', models.CharField(blank=True,\n max_length=500, verbose_name='Core Symbol')), ('mab_type', models.\n CharField(blank=True, max_length=500, verbose_name='Barrier type')),\n ('products_text', models.TextField(blank=True, verbose_name=\n 'Products')), ('product_codes', models.TextField(blank=True,\n verbose_name='Product codes')), ('objectives', models.TextField(\n blank=True, verbose_name='Objectives')), ('keywords', models.\n TextField(blank=True, verbose_name='Keywords')), (\n 'regions_affected', models.TextField(blank=True, verbose_name=\n 'Regions affected')), ('comments_due_date', models.DateField(blank=\n True, null=True, verbose_name='Final date for comments')), (\n 'notification_type', models.CharField(blank=True, max_length=50,\n verbose_name='Notification type')), ('document_link', models.\n CharField(blank=True, max_length=1500, verbose_name='Document link'\n )), ('external_link', models.CharField(blank=True, max_length=1500,\n verbose_name='External site link'))], options={'abstract': False}),\n migrations.CreateModel(name='BarrierRecord', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('created_date', models.DateTimeField(\n auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now\n =True)), ('is_deleted', models.BooleanField(default=False)), (\n 'status', models.CharField(blank=True, choices=[('Active', 'Active'\n )], default=None, max_length=10, null=True)), ('title', models.\n TextField(blank=True, verbose_name='Title')), ('description',\n models.TextField(blank=True, verbose_name='Description')), (\n 'products_text', models.TextField(blank=True, verbose_name=\n 'Products affected')), ('sectors_text', models.TextField(blank=True,\n verbose_name='Sectors affected')), ('source_id', models.CharField(\n blank=True, max_length=20, null=True, verbose_name=\n 'ID in source system')), ('distribution_date', models.DateField(\n blank=True, null=True, verbose_name='Distribution Date')), (\n 'external_link', models.CharField(blank=True, max_length=1500,\n verbose_name='External site link'))], options={'abstract': False}),\n migrations.CreateModel(name='BarrierReport', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('created_date', models.DateTimeField(\n auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now\n =True)), ('is_deleted', models.BooleanField(default=False)), (\n 'status', models.CharField(blank=True, choices=[('Draft', 'Draft'),\n ('Submitted', 'Submitted')], default=None, max_length=10, null=True\n )), ('name', models.CharField(blank=True, max_length=200, null=True\n )), ('problem_description', models.TextField(blank=True, null=True)\n ), ('product_text', models.TextField(blank=True, null=True)), (\n 'product_code', models.CharField(blank=True, max_length=500, null=\n True)), ('business_impact_description', models.TextField(blank=True,\n null=True)), ('problem_duration_description', models.TextField(\n blank=True, null=True)), ('other_companies_affected_choice', models\n .CharField(blank=True, choices=[('Yes', 'Yes'), ('No', 'No'), (\n 'DontKnow', \"Don't know\")], default=None, max_length=10, null=True)\n ), ('other_countries_affected_description', models.TextField(blank=\n True, null=True)), ('steps_taken_to_resolve', models.TextField(\n blank=True, null=True)), ('outcome_looking_for', models.TextField(\n blank=True, null=True)), ('support_desired_choice', models.\n CharField(blank=True, choices=[('SUPPORT_DESIRED_NONE',\n 'None - this is for your information only'), (\n 'SUPPORT_DESIRED_LOCAL',\n 'Local engagement only with UK Government officials in the country I am trying to export to'\n ), ('SUPPORT_DESIRED_BROAD', 'Broader UK Government involvement'),\n ('SUPPORT_DESIRED_NOT_SURE', 'Not sure')], default=None, max_length\n =10, null=True)), ('confidentiality_issues_description', models.\n TextField(blank=True, null=True)), ('happy_to_publish_choice',\n models.CharField(blank=True, choices=[('HAPPY_TO_PUBLISH_YES',\n 'Yes'), ('HAPPY_TO_PUBLISH_NO', 'No'), ('HAPPY_TO_PUBLISH_MAYBE',\n 'Maybe, following consultation with me')], default=None, max_length\n =10, null=True)), ('any_other_details_description', models.\n TextField(blank=True, null=True)), ('country', models.ForeignKey(\n blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,\n to='barriers.BarrierCountry'))], options={'abstract': False}),\n migrations.CreateModel(name='BarrierReporter', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('created_date', models.DateTimeField(\n auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now\n =True)), ('is_deleted', models.BooleanField(default=False)), (\n 'name', models.CharField(blank=True, max_length=1500, verbose_name=\n 'Reporter name')), ('company', models.CharField(blank=True,\n max_length=1500, verbose_name='Company name'))], options={\n 'abstract': False}), migrations.CreateModel(name='BarrierSource',\n fields=[('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('created_date', models.\n DateTimeField(auto_now_add=True)), ('updated_date', models.\n DateTimeField(auto_now=True)), ('is_deleted', models.BooleanField(\n default=False)), ('name', models.CharField(max_length=100)), (\n 'description', models.CharField(blank=True, max_length=500, null=\n True)), ('short_name', models.CharField(blank=True, max_length=20,\n null=True)), ('remote_url', models.URLField(blank=True, max_length=\n 20, null=True))], options={'abstract': False}), migrations.\n CreateModel(name='BarrierTypeMapping', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('created_date', models.DateTimeField(\n auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now\n =True)), ('is_deleted', models.BooleanField(default=False)), (\n 'destination_barrier_list', models.ForeignKey(on_delete=django.db.\n models.deletion.CASCADE, related_name='destination_barrier_list',\n to='barriers.BarrierSource'))], options={'abstract': False}),\n migrations.RemoveField(model_name='marketaccessbarrier', name=\n 'barrier_types'), migrations.RenameField(model_name='barriertype',\n old_name='ec_barrier_code', new_name='barrier_code'), migrations.\n AlterField(model_name='barriertype', name='name', field=models.\n CharField(max_length=200)), migrations.DeleteModel(name=\n 'MarketAccessBarrier'), migrations.AddField(model_name=\n 'barriertypemapping', name='destination_barrier_type', field=models\n .ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='destination_barrier_type', to='barriers.BarrierType')\n ), migrations.AddField(model_name='barriertypemapping', name=\n 'source_barrier_list', field=models.ForeignKey(on_delete=django.db.\n models.deletion.CASCADE, related_name='source_barrier_list', to=\n 'barriers.BarrierSource')), migrations.AddField(model_name=\n 'barriertypemapping', name='source_barrier_type', field=models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='source_barrier_type', to='barriers.BarrierType')),\n migrations.AddField(model_name='barrierreport', name='reporter',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.\n models.deletion.CASCADE, to='barriers.BarrierReporter')),\n migrations.AddField(model_name='barrierreport', name=\n 'top_level_barrier_type', field=models.ForeignKey(blank=True, null=\n True, on_delete=django.db.models.deletion.CASCADE, related_name=\n 'barrier_reports', to='barriers.BarrierType')), migrations.AddField\n (model_name='barrierrecord', name='barrier_source', field=models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'barriers.BarrierSource')), migrations.AddField(model_name=\n 'barrierrecord', name='barrier_types', field=mptt.fields.\n TreeManyToManyField(blank=True, db_index=True, related_name='types',\n to='barriers.BarrierType')), migrations.AddField(model_name=\n 'barrierrecord', name='country', field=models.ForeignKey(blank=True,\n null=True, on_delete=django.db.models.deletion.CASCADE, to=\n 'barriers.BarrierCountry')), migrations.AddField(model_name=\n 'barriernotification', name='barrier_source', field=models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'barriers.BarrierSource')), migrations.AddField(model_name=\n 'barriernotification', name='barrier_types', field=mptt.fields.\n TreeManyToManyField(blank=True, db_index=True, related_name=\n 'barrier_types', to='barriers.BarrierType')), migrations.AddField(\n model_name='barriernotification', name='country', field=models.\n ForeignKey(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='notification_countries', to=\n 'barriers.BarrierCountry')), migrations.AddField(model_name=\n 'barriertype', name='barrier_source', field=models.ForeignKey(\n default=1, on_delete=django.db.models.deletion.CASCADE, to=\n 'barriers.BarrierSource'))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport mptt.fields\n\n\nclass Migration(migrations.Migration):\n dependencies = [('barriers', '0011_auto_20170904_1658')]\n operations = [migrations.CreateModel(name='BarrierCountry', fields=[(\n 'id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('created_date', models.\n DateTimeField(auto_now_add=True)), ('updated_date', models.\n DateTimeField(auto_now=True)), ('is_deleted', models.BooleanField(\n default=False)), ('name', models.CharField(max_length=100,\n verbose_name='Country or Territory Name')), ('code', models.\n CharField(blank=True, max_length=100, null=True, verbose_name=\n 'Country or Territory Code')), ('official_name', models.CharField(\n blank=True, max_length=100, null=True, verbose_name=\n 'Offical Country or Territory name')), ('govuk_index_entry_code',\n models.CharField(blank=True, max_length=10, null=True, verbose_name\n ='GOV.UK index code')), ('country_or_territory', models.CharField(\n choices=[('CO', 'Country'), ('TE', 'Territory')], default='CO',\n max_length=2, verbose_name='Country or Territory flag'))], options=\n {'verbose_name_plural': 'countries or territories'}), migrations.\n CreateModel(name='BarrierNotification', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('created_date', models.DateTimeField(\n auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now\n =True)), ('is_deleted', models.BooleanField(default=False)), (\n 'title', models.TextField(blank=True, verbose_name='Title')), (\n 'description', models.TextField(blank=True, verbose_name=\n 'Description')), ('distribution_date', models.DateField(blank=True,\n null=True, verbose_name='Distribution Date')), ('barrier_symbol',\n models.CharField(blank=True, max_length=500, verbose_name=\n 'Barrier Symbol')), ('core_symbol', models.CharField(blank=True,\n max_length=500, verbose_name='Core Symbol')), ('mab_type', models.\n CharField(blank=True, max_length=500, verbose_name='Barrier type')),\n ('products_text', models.TextField(blank=True, verbose_name=\n 'Products')), ('product_codes', models.TextField(blank=True,\n verbose_name='Product codes')), ('objectives', models.TextField(\n blank=True, verbose_name='Objectives')), ('keywords', models.\n TextField(blank=True, verbose_name='Keywords')), (\n 'regions_affected', models.TextField(blank=True, verbose_name=\n 'Regions affected')), ('comments_due_date', models.DateField(blank=\n True, null=True, verbose_name='Final date for comments')), (\n 'notification_type', models.CharField(blank=True, max_length=50,\n verbose_name='Notification type')), ('document_link', models.\n CharField(blank=True, max_length=1500, verbose_name='Document link'\n )), ('external_link', models.CharField(blank=True, max_length=1500,\n verbose_name='External site link'))], options={'abstract': False}),\n migrations.CreateModel(name='BarrierRecord', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('created_date', models.DateTimeField(\n auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now\n =True)), ('is_deleted', models.BooleanField(default=False)), (\n 'status', models.CharField(blank=True, choices=[('Active', 'Active'\n )], default=None, max_length=10, null=True)), ('title', models.\n TextField(blank=True, verbose_name='Title')), ('description',\n models.TextField(blank=True, verbose_name='Description')), (\n 'products_text', models.TextField(blank=True, verbose_name=\n 'Products affected')), ('sectors_text', models.TextField(blank=True,\n verbose_name='Sectors affected')), ('source_id', models.CharField(\n blank=True, max_length=20, null=True, verbose_name=\n 'ID in source system')), ('distribution_date', models.DateField(\n blank=True, null=True, verbose_name='Distribution Date')), (\n 'external_link', models.CharField(blank=True, max_length=1500,\n verbose_name='External site link'))], options={'abstract': False}),\n migrations.CreateModel(name='BarrierReport', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('created_date', models.DateTimeField(\n auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now\n =True)), ('is_deleted', models.BooleanField(default=False)), (\n 'status', models.CharField(blank=True, choices=[('Draft', 'Draft'),\n ('Submitted', 'Submitted')], default=None, max_length=10, null=True\n )), ('name', models.CharField(blank=True, max_length=200, null=True\n )), ('problem_description', models.TextField(blank=True, null=True)\n ), ('product_text', models.TextField(blank=True, null=True)), (\n 'product_code', models.CharField(blank=True, max_length=500, null=\n True)), ('business_impact_description', models.TextField(blank=True,\n null=True)), ('problem_duration_description', models.TextField(\n blank=True, null=True)), ('other_companies_affected_choice', models\n .CharField(blank=True, choices=[('Yes', 'Yes'), ('No', 'No'), (\n 'DontKnow', \"Don't know\")], default=None, max_length=10, null=True)\n ), ('other_countries_affected_description', models.TextField(blank=\n True, null=True)), ('steps_taken_to_resolve', models.TextField(\n blank=True, null=True)), ('outcome_looking_for', models.TextField(\n blank=True, null=True)), ('support_desired_choice', models.\n CharField(blank=True, choices=[('SUPPORT_DESIRED_NONE',\n 'None - this is for your information only'), (\n 'SUPPORT_DESIRED_LOCAL',\n 'Local engagement only with UK Government officials in the country I am trying to export to'\n ), ('SUPPORT_DESIRED_BROAD', 'Broader UK Government involvement'),\n ('SUPPORT_DESIRED_NOT_SURE', 'Not sure')], default=None, max_length\n =10, null=True)), ('confidentiality_issues_description', models.\n TextField(blank=True, null=True)), ('happy_to_publish_choice',\n models.CharField(blank=True, choices=[('HAPPY_TO_PUBLISH_YES',\n 'Yes'), ('HAPPY_TO_PUBLISH_NO', 'No'), ('HAPPY_TO_PUBLISH_MAYBE',\n 'Maybe, following consultation with me')], default=None, max_length\n =10, null=True)), ('any_other_details_description', models.\n TextField(blank=True, null=True)), ('country', models.ForeignKey(\n blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,\n to='barriers.BarrierCountry'))], options={'abstract': False}),\n migrations.CreateModel(name='BarrierReporter', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('created_date', models.DateTimeField(\n auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now\n =True)), ('is_deleted', models.BooleanField(default=False)), (\n 'name', models.CharField(blank=True, max_length=1500, verbose_name=\n 'Reporter name')), ('company', models.CharField(blank=True,\n max_length=1500, verbose_name='Company name'))], options={\n 'abstract': False}), migrations.CreateModel(name='BarrierSource',\n fields=[('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('created_date', models.\n DateTimeField(auto_now_add=True)), ('updated_date', models.\n DateTimeField(auto_now=True)), ('is_deleted', models.BooleanField(\n default=False)), ('name', models.CharField(max_length=100)), (\n 'description', models.CharField(blank=True, max_length=500, null=\n True)), ('short_name', models.CharField(blank=True, max_length=20,\n null=True)), ('remote_url', models.URLField(blank=True, max_length=\n 20, null=True))], options={'abstract': False}), migrations.\n CreateModel(name='BarrierTypeMapping', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('created_date', models.DateTimeField(\n auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now\n =True)), ('is_deleted', models.BooleanField(default=False)), (\n 'destination_barrier_list', models.ForeignKey(on_delete=django.db.\n models.deletion.CASCADE, related_name='destination_barrier_list',\n to='barriers.BarrierSource'))], options={'abstract': False}),\n migrations.RemoveField(model_name='marketaccessbarrier', name=\n 'barrier_types'), migrations.RenameField(model_name='barriertype',\n old_name='ec_barrier_code', new_name='barrier_code'), migrations.\n AlterField(model_name='barriertype', name='name', field=models.\n CharField(max_length=200)), migrations.DeleteModel(name=\n 'MarketAccessBarrier'), migrations.AddField(model_name=\n 'barriertypemapping', name='destination_barrier_type', field=models\n .ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='destination_barrier_type', to='barriers.BarrierType')\n ), migrations.AddField(model_name='barriertypemapping', name=\n 'source_barrier_list', field=models.ForeignKey(on_delete=django.db.\n models.deletion.CASCADE, related_name='source_barrier_list', to=\n 'barriers.BarrierSource')), migrations.AddField(model_name=\n 'barriertypemapping', name='source_barrier_type', field=models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='source_barrier_type', to='barriers.BarrierType')),\n migrations.AddField(model_name='barrierreport', name='reporter',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.\n models.deletion.CASCADE, to='barriers.BarrierReporter')),\n migrations.AddField(model_name='barrierreport', name=\n 'top_level_barrier_type', field=models.ForeignKey(blank=True, null=\n True, on_delete=django.db.models.deletion.CASCADE, related_name=\n 'barrier_reports', to='barriers.BarrierType')), migrations.AddField\n (model_name='barrierrecord', name='barrier_source', field=models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'barriers.BarrierSource')), migrations.AddField(model_name=\n 'barrierrecord', name='barrier_types', field=mptt.fields.\n TreeManyToManyField(blank=True, db_index=True, related_name='types',\n to='barriers.BarrierType')), migrations.AddField(model_name=\n 'barrierrecord', name='country', field=models.ForeignKey(blank=True,\n null=True, on_delete=django.db.models.deletion.CASCADE, to=\n 'barriers.BarrierCountry')), migrations.AddField(model_name=\n 'barriernotification', name='barrier_source', field=models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'barriers.BarrierSource')), migrations.AddField(model_name=\n 'barriernotification', name='barrier_types', field=mptt.fields.\n TreeManyToManyField(blank=True, db_index=True, related_name=\n 'barrier_types', to='barriers.BarrierType')), migrations.AddField(\n model_name='barriernotification', name='country', field=models.\n ForeignKey(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='notification_countries', to=\n 'barriers.BarrierCountry')), migrations.AddField(model_name=\n 'barriertype', name='barrier_source', field=models.ForeignKey(\n default=1, on_delete=django.db.models.deletion.CASCADE, to=\n 'barriers.BarrierSource'))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.4 on 2017-10-02 14:41\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport mptt.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('barriers', '0011_auto_20170904_1658'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='BarrierCountry',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_date', models.DateTimeField(auto_now_add=True)),\n ('updated_date', models.DateTimeField(auto_now=True)),\n ('is_deleted', models.BooleanField(default=False)),\n ('name', models.CharField(max_length=100, verbose_name='Country or Territory Name')),\n ('code', models.CharField(blank=True, max_length=100, null=True, verbose_name='Country or Territory Code')),\n ('official_name', models.CharField(blank=True, max_length=100, null=True, verbose_name='Offical Country or Territory name')),\n ('govuk_index_entry_code', models.CharField(blank=True, max_length=10, null=True, verbose_name='GOV.UK index code')),\n ('country_or_territory', models.CharField(choices=[('CO', 'Country'), ('TE', 'Territory')], default='CO', max_length=2, verbose_name='Country or Territory flag')),\n ],\n options={\n 'verbose_name_plural': 'countries or territories',\n },\n ),\n migrations.CreateModel(\n name='BarrierNotification',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_date', models.DateTimeField(auto_now_add=True)),\n ('updated_date', models.DateTimeField(auto_now=True)),\n ('is_deleted', models.BooleanField(default=False)),\n ('title', models.TextField(blank=True, verbose_name='Title')),\n ('description', models.TextField(blank=True, verbose_name='Description')),\n ('distribution_date', models.DateField(blank=True, null=True, verbose_name='Distribution Date')),\n ('barrier_symbol', models.CharField(blank=True, max_length=500, verbose_name='Barrier Symbol')),\n ('core_symbol', models.CharField(blank=True, max_length=500, verbose_name='Core Symbol')),\n ('mab_type', models.CharField(blank=True, max_length=500, verbose_name='Barrier type')),\n ('products_text', models.TextField(blank=True, verbose_name='Products')),\n ('product_codes', models.TextField(blank=True, verbose_name='Product codes')),\n ('objectives', models.TextField(blank=True, verbose_name='Objectives')),\n ('keywords', models.TextField(blank=True, verbose_name='Keywords')),\n ('regions_affected', models.TextField(blank=True, verbose_name='Regions affected')),\n ('comments_due_date', models.DateField(blank=True, null=True, verbose_name='Final date for comments')),\n ('notification_type', models.CharField(blank=True, max_length=50, verbose_name='Notification type')),\n ('document_link', models.CharField(blank=True, max_length=1500, verbose_name='Document link')),\n ('external_link', models.CharField(blank=True, max_length=1500, verbose_name='External site link')),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='BarrierRecord',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_date', models.DateTimeField(auto_now_add=True)),\n ('updated_date', models.DateTimeField(auto_now=True)),\n ('is_deleted', models.BooleanField(default=False)),\n ('status', models.CharField(blank=True, choices=[('Active', 'Active')], default=None, max_length=10, null=True)),\n ('title', models.TextField(blank=True, verbose_name='Title')),\n ('description', models.TextField(blank=True, verbose_name='Description')),\n ('products_text', models.TextField(blank=True, verbose_name='Products affected')),\n ('sectors_text', models.TextField(blank=True, verbose_name='Sectors affected')),\n ('source_id', models.CharField(blank=True, max_length=20, null=True, verbose_name='ID in source system')),\n ('distribution_date', models.DateField(blank=True, null=True, verbose_name='Distribution Date')),\n ('external_link', models.CharField(blank=True, max_length=1500, verbose_name='External site link')),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='BarrierReport',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_date', models.DateTimeField(auto_now_add=True)),\n ('updated_date', models.DateTimeField(auto_now=True)),\n ('is_deleted', models.BooleanField(default=False)),\n ('status', models.CharField(blank=True, choices=[('Draft', 'Draft'), ('Submitted', 'Submitted')], default=None, max_length=10, null=True)),\n ('name', models.CharField(blank=True, max_length=200, null=True)),\n ('problem_description', models.TextField(blank=True, null=True)),\n ('product_text', models.TextField(blank=True, null=True)),\n ('product_code', models.CharField(blank=True, max_length=500, null=True)),\n ('business_impact_description', models.TextField(blank=True, null=True)),\n ('problem_duration_description', models.TextField(blank=True, null=True)),\n ('other_companies_affected_choice', models.CharField(blank=True, choices=[('Yes', 'Yes'), ('No', 'No'), ('DontKnow', \"Don't know\")], default=None, max_length=10, null=True)),\n ('other_countries_affected_description', models.TextField(blank=True, null=True)),\n ('steps_taken_to_resolve', models.TextField(blank=True, null=True)),\n ('outcome_looking_for', models.TextField(blank=True, null=True)),\n ('support_desired_choice', models.CharField(blank=True, choices=[('SUPPORT_DESIRED_NONE', 'None - this is for your information only'), ('SUPPORT_DESIRED_LOCAL', 'Local engagement only with UK Government officials in the country I am trying to export to'), ('SUPPORT_DESIRED_BROAD', 'Broader UK Government involvement'), ('SUPPORT_DESIRED_NOT_SURE', 'Not sure')], default=None, max_length=10, null=True)),\n ('confidentiality_issues_description', models.TextField(blank=True, null=True)),\n ('happy_to_publish_choice', models.CharField(blank=True, choices=[('HAPPY_TO_PUBLISH_YES', 'Yes'), ('HAPPY_TO_PUBLISH_NO', 'No'), ('HAPPY_TO_PUBLISH_MAYBE', 'Maybe, following consultation with me')], default=None, max_length=10, null=True)),\n ('any_other_details_description', models.TextField(blank=True, null=True)),\n ('country', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='barriers.BarrierCountry')),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='BarrierReporter',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_date', models.DateTimeField(auto_now_add=True)),\n ('updated_date', models.DateTimeField(auto_now=True)),\n ('is_deleted', models.BooleanField(default=False)),\n ('name', models.CharField(blank=True, max_length=1500, verbose_name='Reporter name')),\n ('company', models.CharField(blank=True, max_length=1500, verbose_name='Company name')),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='BarrierSource',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_date', models.DateTimeField(auto_now_add=True)),\n ('updated_date', models.DateTimeField(auto_now=True)),\n ('is_deleted', models.BooleanField(default=False)),\n ('name', models.CharField(max_length=100)),\n ('description', models.CharField(blank=True, max_length=500, null=True)),\n ('short_name', models.CharField(blank=True, max_length=20, null=True)),\n ('remote_url', models.URLField(blank=True, max_length=20, null=True)),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='BarrierTypeMapping',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_date', models.DateTimeField(auto_now_add=True)),\n ('updated_date', models.DateTimeField(auto_now=True)),\n ('is_deleted', models.BooleanField(default=False)),\n ('destination_barrier_list', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='destination_barrier_list', to='barriers.BarrierSource')),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.RemoveField(\n model_name='marketaccessbarrier',\n name='barrier_types',\n ),\n migrations.RenameField(\n model_name='barriertype',\n old_name='ec_barrier_code',\n new_name='barrier_code',\n ),\n migrations.AlterField(\n model_name='barriertype',\n name='name',\n field=models.CharField(max_length=200),\n ),\n migrations.DeleteModel(\n name='MarketAccessBarrier',\n ),\n migrations.AddField(\n model_name='barriertypemapping',\n name='destination_barrier_type',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='destination_barrier_type', to='barriers.BarrierType'),\n ),\n migrations.AddField(\n model_name='barriertypemapping',\n name='source_barrier_list',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='source_barrier_list', to='barriers.BarrierSource'),\n ),\n migrations.AddField(\n model_name='barriertypemapping',\n name='source_barrier_type',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='source_barrier_type', to='barriers.BarrierType'),\n ),\n migrations.AddField(\n model_name='barrierreport',\n name='reporter',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='barriers.BarrierReporter'),\n ),\n migrations.AddField(\n model_name='barrierreport',\n name='top_level_barrier_type',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='barrier_reports', to='barriers.BarrierType'),\n ),\n migrations.AddField(\n model_name='barrierrecord',\n name='barrier_source',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='barriers.BarrierSource'),\n ),\n migrations.AddField(\n model_name='barrierrecord',\n name='barrier_types',\n field=mptt.fields.TreeManyToManyField(blank=True, db_index=True, related_name='types', to='barriers.BarrierType'),\n ),\n migrations.AddField(\n model_name='barrierrecord',\n name='country',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='barriers.BarrierCountry'),\n ),\n migrations.AddField(\n model_name='barriernotification',\n name='barrier_source',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='barriers.BarrierSource'),\n ),\n migrations.AddField(\n model_name='barriernotification',\n name='barrier_types',\n field=mptt.fields.TreeManyToManyField(blank=True, db_index=True, related_name='barrier_types', to='barriers.BarrierType'),\n ),\n migrations.AddField(\n model_name='barriernotification',\n name='country',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='notification_countries', to='barriers.BarrierCountry'),\n ),\n migrations.AddField(\n model_name='barriertype',\n name='barrier_source',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='barriers.BarrierSource'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from catalyst_rl.contrib.registry import (
Criterion, CRITERIONS, GRAD_CLIPPERS, Model, MODELS, Module, MODULES,
Optimizer, OPTIMIZERS, Sampler, SAMPLERS, Scheduler, SCHEDULERS, Transform,
TRANSFORMS
)
from catalyst_rl.core.registry import Callback, CALLBACKS
from catalyst_rl.utils.tools.registry import Registry
def _callbacks_loader(r: Registry):
from catalyst_rl.dl import callbacks as m
r.add_from_module(m)
CALLBACKS.late_add(_callbacks_loader)
__all__ = [
"Callback",
"Criterion",
"Optimizer",
"Scheduler",
"Module",
"Model",
"Sampler",
"Transform",
"CALLBACKS",
"CRITERIONS",
"GRAD_CLIPPERS",
"MODELS",
"MODULES",
"OPTIMIZERS",
"SAMPLERS",
"SCHEDULERS",
"TRANSFORMS",
]
|
normal
|
{
"blob_id": "09d13fe6b090850782feb601412cf135d497136f",
"index": 6206,
"step-1": "<mask token>\n\n\ndef _callbacks_loader(r: Registry):\n from catalyst_rl.dl import callbacks as m\n r.add_from_module(m)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _callbacks_loader(r: Registry):\n from catalyst_rl.dl import callbacks as m\n r.add_from_module(m)\n\n\nCALLBACKS.late_add(_callbacks_loader)\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef _callbacks_loader(r: Registry):\n from catalyst_rl.dl import callbacks as m\n r.add_from_module(m)\n\n\nCALLBACKS.late_add(_callbacks_loader)\n__all__ = ['Callback', 'Criterion', 'Optimizer', 'Scheduler', 'Module',\n 'Model', 'Sampler', 'Transform', 'CALLBACKS', 'CRITERIONS',\n 'GRAD_CLIPPERS', 'MODELS', 'MODULES', 'OPTIMIZERS', 'SAMPLERS',\n 'SCHEDULERS', 'TRANSFORMS']\n",
"step-4": "from catalyst_rl.contrib.registry import Criterion, CRITERIONS, GRAD_CLIPPERS, Model, MODELS, Module, MODULES, Optimizer, OPTIMIZERS, Sampler, SAMPLERS, Scheduler, SCHEDULERS, Transform, TRANSFORMS\nfrom catalyst_rl.core.registry import Callback, CALLBACKS\nfrom catalyst_rl.utils.tools.registry import Registry\n\n\ndef _callbacks_loader(r: Registry):\n from catalyst_rl.dl import callbacks as m\n r.add_from_module(m)\n\n\nCALLBACKS.late_add(_callbacks_loader)\n__all__ = ['Callback', 'Criterion', 'Optimizer', 'Scheduler', 'Module',\n 'Model', 'Sampler', 'Transform', 'CALLBACKS', 'CRITERIONS',\n 'GRAD_CLIPPERS', 'MODELS', 'MODULES', 'OPTIMIZERS', 'SAMPLERS',\n 'SCHEDULERS', 'TRANSFORMS']\n",
"step-5": "from catalyst_rl.contrib.registry import (\n Criterion, CRITERIONS, GRAD_CLIPPERS, Model, MODELS, Module, MODULES,\n Optimizer, OPTIMIZERS, Sampler, SAMPLERS, Scheduler, SCHEDULERS, Transform,\n TRANSFORMS\n)\nfrom catalyst_rl.core.registry import Callback, CALLBACKS\nfrom catalyst_rl.utils.tools.registry import Registry\n\n\ndef _callbacks_loader(r: Registry):\n from catalyst_rl.dl import callbacks as m\n r.add_from_module(m)\n\n\nCALLBACKS.late_add(_callbacks_loader)\n\n__all__ = [\n \"Callback\",\n \"Criterion\",\n \"Optimizer\",\n \"Scheduler\",\n \"Module\",\n \"Model\",\n \"Sampler\",\n \"Transform\",\n \"CALLBACKS\",\n \"CRITERIONS\",\n \"GRAD_CLIPPERS\",\n \"MODELS\",\n \"MODULES\",\n \"OPTIMIZERS\",\n \"SAMPLERS\",\n \"SCHEDULERS\",\n \"TRANSFORMS\",\n]\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python3
#coding=utf8
from __future__ import (division,absolute_import,print_function,unicode_literals)
import argparse, csv, sys,subprocess,time
NR_THREAD=20
def shell(cmd):
subprocess.call(cmd,shell=True)
print("Done! {0}.".format(cmd))
start=time.time()
cmd = 'mkdir FTRL/tmp -p'
shell(cmd)
cmd = 'mkdir FTRL/data -p'
shell(cmd)
#cmd = 'FTRL/ensamble/ensamble.py -s {nr_thread} -f 5 ffmData/Filter100/click_train.ffm ffmData/Filter100/click_test.ffm FTRL/data/click_train_out.txt FTRL/data/click_test_out.txt '.format(nr_thread=NR_THREAD)
#shell(cmd)
cmd = 'FTRL/FTRLStarter.py ffmData/filter100/split_train.ffm ffmData/filter100/split_test.ffm FTRL/tmp/split_test_cv.out ffmData/filter100/click_test.ffm FTRL/tmp/click_test_cv.out'
shell(cmd)
cmd='util/map.py FTRL/tmp/split_test_cv.out data/split_test.csv'
shell(cmd)
print('time used = {0:.0f}'.format(time.time()-start))
|
normal
|
{
"blob_id": "2a0172641c48c47f048bf5e9f1889b29abbb0b7c",
"index": 767,
"step-1": "<mask token>\n\n\ndef shell(cmd):\n subprocess.call(cmd, shell=True)\n print('Done! {0}.'.format(cmd))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef shell(cmd):\n subprocess.call(cmd, shell=True)\n print('Done! {0}.'.format(cmd))\n\n\n<mask token>\nshell(cmd)\n<mask token>\nshell(cmd)\n<mask token>\nshell(cmd)\n<mask token>\nshell(cmd)\nprint('time used = {0:.0f}'.format(time.time() - start))\n",
"step-3": "<mask token>\nNR_THREAD = 20\n\n\ndef shell(cmd):\n subprocess.call(cmd, shell=True)\n print('Done! {0}.'.format(cmd))\n\n\nstart = time.time()\ncmd = 'mkdir FTRL/tmp -p'\nshell(cmd)\ncmd = 'mkdir FTRL/data -p'\nshell(cmd)\ncmd = (\n 'FTRL/FTRLStarter.py ffmData/filter100/split_train.ffm ffmData/filter100/split_test.ffm FTRL/tmp/split_test_cv.out ffmData/filter100/click_test.ffm FTRL/tmp/click_test_cv.out'\n )\nshell(cmd)\ncmd = 'util/map.py FTRL/tmp/split_test_cv.out data/split_test.csv'\nshell(cmd)\nprint('time used = {0:.0f}'.format(time.time() - start))\n",
"step-4": "from __future__ import division, absolute_import, print_function, unicode_literals\nimport argparse, csv, sys, subprocess, time\nNR_THREAD = 20\n\n\ndef shell(cmd):\n subprocess.call(cmd, shell=True)\n print('Done! {0}.'.format(cmd))\n\n\nstart = time.time()\ncmd = 'mkdir FTRL/tmp -p'\nshell(cmd)\ncmd = 'mkdir FTRL/data -p'\nshell(cmd)\ncmd = (\n 'FTRL/FTRLStarter.py ffmData/filter100/split_train.ffm ffmData/filter100/split_test.ffm FTRL/tmp/split_test_cv.out ffmData/filter100/click_test.ffm FTRL/tmp/click_test_cv.out'\n )\nshell(cmd)\ncmd = 'util/map.py FTRL/tmp/split_test_cv.out data/split_test.csv'\nshell(cmd)\nprint('time used = {0:.0f}'.format(time.time() - start))\n",
"step-5": "#!/usr/bin/env python3\n#coding=utf8\nfrom __future__ import (division,absolute_import,print_function,unicode_literals)\nimport argparse, csv, sys,subprocess,time\n\nNR_THREAD=20\ndef shell(cmd):\n subprocess.call(cmd,shell=True)\n print(\"Done! {0}.\".format(cmd))\n\nstart=time.time()\n\ncmd = 'mkdir FTRL/tmp -p'\nshell(cmd)\n\ncmd = 'mkdir FTRL/data -p'\nshell(cmd)\n\n#cmd = 'FTRL/ensamble/ensamble.py -s {nr_thread} -f 5 ffmData/Filter100/click_train.ffm ffmData/Filter100/click_test.ffm FTRL/data/click_train_out.txt FTRL/data/click_test_out.txt '.format(nr_thread=NR_THREAD)\n#shell(cmd)\n\ncmd = 'FTRL/FTRLStarter.py ffmData/filter100/split_train.ffm ffmData/filter100/split_test.ffm FTRL/tmp/split_test_cv.out ffmData/filter100/click_test.ffm FTRL/tmp/click_test_cv.out'\nshell(cmd)\n\ncmd='util/map.py FTRL/tmp/split_test_cv.out data/split_test.csv'\nshell(cmd)\n\nprint('time used = {0:.0f}'.format(time.time()-start))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import pandas as pd
import random
import string
import names
def generatetest(n=100, filename="test_data"):
ids = []
names_list = []
for _ in range(n):
ids.append(''.join(random.choices(
string.ascii_letters + string.digits, k=9)))
names_list.append(names.get_full_name())
df = pd.DataFrame({
'id': ids,
'names': names_list,
})
df.to_csv('tmp/{}.csv'.format(filename), index=False)
if __name__ == "__main__":
generatetest()
print("test set generated!")
|
normal
|
{
"blob_id": "aa913fd40a710cfd7288fd59c4039c4b6a5745cc",
"index": 4569,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef generatetest(n=100, filename='test_data'):\n ids = []\n names_list = []\n for _ in range(n):\n ids.append(''.join(random.choices(string.ascii_letters + string.\n digits, k=9)))\n names_list.append(names.get_full_name())\n df = pd.DataFrame({'id': ids, 'names': names_list})\n df.to_csv('tmp/{}.csv'.format(filename), index=False)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef generatetest(n=100, filename='test_data'):\n ids = []\n names_list = []\n for _ in range(n):\n ids.append(''.join(random.choices(string.ascii_letters + string.\n digits, k=9)))\n names_list.append(names.get_full_name())\n df = pd.DataFrame({'id': ids, 'names': names_list})\n df.to_csv('tmp/{}.csv'.format(filename), index=False)\n\n\nif __name__ == '__main__':\n generatetest()\n print('test set generated!')\n",
"step-4": "import pandas as pd\nimport random\nimport string\nimport names\n\n\ndef generatetest(n=100, filename='test_data'):\n ids = []\n names_list = []\n for _ in range(n):\n ids.append(''.join(random.choices(string.ascii_letters + string.\n digits, k=9)))\n names_list.append(names.get_full_name())\n df = pd.DataFrame({'id': ids, 'names': names_list})\n df.to_csv('tmp/{}.csv'.format(filename), index=False)\n\n\nif __name__ == '__main__':\n generatetest()\n print('test set generated!')\n",
"step-5": "import pandas as pd\nimport random\nimport string\nimport names\n\n\ndef generatetest(n=100, filename=\"test_data\"):\n ids = []\n names_list = []\n for _ in range(n):\n ids.append(''.join(random.choices(\n string.ascii_letters + string.digits, k=9)))\n names_list.append(names.get_full_name())\n\n df = pd.DataFrame({\n 'id': ids,\n 'names': names_list,\n })\n df.to_csv('tmp/{}.csv'.format(filename), index=False)\n\n\nif __name__ == \"__main__\":\n generatetest()\n print(\"test set generated!\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import scrapy
import time
import os.path
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from tempfile import mkstemp
from shutil import move
from os import fdopen, remove
from datetime import datetime
import logging
output_timestamp = datetime.today().strftime('%Y-%m-%d-%H%M')
log_output_file = 'scrape-order-images-{}.log'.format(output_timestamp)
class ProductSpider(scrapy.Spider):
name = "tekniknet_new"
allowed_domains = ['www.tekniknet.se']
start_urls = ['https://www.tekniknet.se/#']
def __init__(self):
# self.driver = webdriver.Chrome("./chromedriver.exe")
options = webdriver.ChromeOptions()
# options.add_argument("--headless")
options.add_argument("--start-maximized")
self.driver = webdriver.Chrome(chrome_options=options)
def parse(self, response):
# Quiet down all the unnecessary logging.
fh = logging.FileHandler(log_output_file)
fh.setLevel(logging.INFO)
logging.getLogger('selenium.webdriver.remote.remote_connection').setLevel(logging.WARNING)
logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)
logging.getLogger('selenium.webdriver.remote.remote_connection').addHandler(fh)
logging.getLogger('urllib3.connectionpool').addHandler(fh)
logging.getLogger().addHandler(fh)
self.loggger = logging.getLogger()
self.driver.get(response.url)
list1 = []
list2 = []
list3 = []
list4 = []
list5 = []
list3_categories = []
list4_categories = []
csv_categories1 = ''
csv_heading = ''
csv_stock = ''
csv_price_new = ''
csv_price_old = ''
csv_desc = ''
csv_article_number = ''
# article_number_list = []
csv_image_url = []
# file_exist = False
old_product_url = []
WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.ID,'email')))
username = self.driver.find_element_by_id('email')
username.send_keys("[email protected]")
username = self.driver.find_element_by_id('password')
username.send_keys("order88")
login = self.driver.find_element_by_class_name('button-confirm')
login.click()
time.sleep(5)
#Create temp file
fh, abs_path = mkstemp()
with fdopen(fh,'w') as new_file:
with open("tekniknet.csv") as old_file:
for line in old_file:
new_file.write(line.replace('NEW', 'old'))
#Remove original file
remove("tekniknet.csv")
#Move new file
move(abs_path, "tekniknet.csv")
with open('tekniknet.csv', 'r') as ins:
for line in ins:
old_product_url.append(line.split(',')[-1])
file = open("tekniknet.csv", "a", errors ='replace')
# file.write('OLD/NEW' + ',' + 'article number' + ',' + 'category1' + ',' + 'category2' + ',' + 'category3' + ',' + 'heading' + ',' + 'description' + ',' + 'current price' + ',' + 'previous price' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'EAN code' + ',' + 'stock' + ',' + 'product url' + '\n')
for wrapper1 in self.driver.find_elements_by_class_name('level-0'):
child_wrapper1 = wrapper1.find_element_by_xpath('./a')
link1 = child_wrapper1.get_attribute('href')
list1.append(link1)
self.loggger.info('*************************************************')
self.loggger.info(link1)
for i in range(0, len(list1)-4):
self.driver.get(list1[i])
try:
WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.CLASS_NAME,'inner')))
for wrapper2 in self.driver.find_elements_by_class_name('inner'):
try:
sub2 = wrapper2.find_element_by_class_name('subLinks')
child_wrapper2 = sub2.find_elements_by_xpath('.//a')
for child2 in child_wrapper2:
link2 = child2.get_attribute('href')
list2.append(link2)
self.loggger.info('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')
self.loggger.info(link2)
except Exception as e:
self.loggger.info(e)
self.loggger.info('error')
except:
try:
WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.ID,'categorySubCategories')))
subcategory = self.driver.find_element_by_id('categorySubCategories')
wrapper2_1 = subcategory.find_elements_by_xpath('.//a')
for child3 in wrapper2_1:
link2_1 = child3.get_attribute('href')
list5.append(link2_1)
for n in range(0, len(list5)):
self.driver.get(list5[n])
WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.ID,'categorySubCategories')))
subcategory = self.driver.find_element_by_id('categorySubCategories')
wrapper2_1_1 = subcategory.find_elements_by_xpath('.//a')
for child3_1 in wrapper2_1_1:
if child3_1.text != 'Visa alla':
link2_1_1 = child3_1.get_attribute('href')
list2.append(link2_1_1)
except:
try:
breadcrumbs2 = self.driver.find_element_by_id('breadcrumbs')
categories2 = breadcrumbs2.find_elements_by_xpath('.//li')
csv_categories2 = ''
for category2 in categories2:
csv_categories2 = csv_categories2 + category2.text + '/'
WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.CLASS_NAME,'listProduct')))
for wrapper2_2 in self.driver.find_elements_by_class_name('listProduct'):
wrapper2_3 = wrapper2_2.find_element_by_xpath(".//a")
link2_2 = wrapper2_3.get_attribute('href')
list4.append(link2_2)
list4_categories.append(csv_categories2)
self.loggger.info('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')
self.loggger.info(link2_2)
self.loggger.info('error')
except Exception as e:
self.loggger.info(e)
self.loggger.info('error')
# for m in range(0, 5): IF YOU WANT TO DOWNLOAD CERTAIN PRODUCTS, YOU CAN WRITE LIKE THIS.
# for m in range(0, len(list2)): IF YOU WANT TO DOWNLOAD ALL PRODUCTS, YOU CAN WRITE LIKE THIS.
for j in range(0, len(list2)):
try:
self.loggger.info('**********-------------- ' + str(j) + ' ******************************')
self.driver.get(list2[j])
WebDriverWait(self.driver, 30).until(EC.presence_of_element_located((By.ID,'breadcrumbs')))
breadcrumbs1 = self.driver.find_element_by_id('breadcrumbs')
categories1 = breadcrumbs1.find_elements_by_xpath('.//li')
csv_categories1 = ''
for category1 in categories1:
csv_categories1 = csv_categories1 + category1.text + '/'
WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.CLASS_NAME,'listProduct')))
for wrapper3 in self.driver.find_elements_by_class_name('listProduct'):
child_wrapper3 = wrapper3.find_element_by_xpath(".//a")
link3 = child_wrapper3.get_attribute('href')
list3.append(link3)
list3_categories.append(csv_categories1)
self.loggger.info('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')
self.loggger.info(link3)
except Exception as e:
self.loggger.info(e)
self.loggger.info('error')
for k in range(0, len(list3)):
try:
if list3[k] not in old_product_url:
self.loggger.info('----------------------- ' + str(k) + ' ******************************')
self.driver.get(list3[k])
WebDriverWait(self.driver, 30).until(EC.presence_of_element_located((By.ID,'breadcrumbs')))
# breadcrumbs = self.driver.find_element_by_id('breadcrumbs')
# categories = breadcrumbs.find_elements_by_xpath('.//a')
# for category in categories:
# csv_categories.append(category.text)
offer = self.driver.find_element_by_id('productPageUpper')
try:
heading = offer.find_element_by_class_name('pHeader')
csv_heading = heading.text.replace(',', '.')
except:
self.loggger.info('heading3 non-exist')
csv_heading = ''
try:
stock = offer.find_element_by_class_name('instock')
csv_stock = stock.text
except:
csv_stock = 'Out of stock'
self.loggger.info('stock3 non-exist')
csv_stock = ''
try:
price_new = offer.find_element_by_class_name('priceRegular')
csv_price_new = price_new.text.split(' ')[0]
except:
try:
price_new = offer.find_element_by_class_name('priceNew')
csv_price_new = price_new.text.split(' ')[0]
price_old = offer.find_element_by_class_name('priceOld')
csv_price_old = price_old.text.split(' ')[0]
except:
self.loggger.info('price3 non-exist')
csv_price_old = ''
csv_price_new = ''
try:
desc = offer.find_element_by_id('pDesc')
csv_desc = desc.get_attribute('innerHTML').replace(',', '-').replace('\n', ' ').replace('\r', '').rstrip().lstrip()
except:
self.loggger.info('description3 non-exist')
csv_desc = ''
try:
article_number = offer.find_element_by_id('pManufacturer')
csv_article_number = article_number.text.split(' ')[-1].replace(',', '.')
except:
self.loggger.info('article number3 non-exist')
csv_article_number = ''
try:
pimages = offer.find_elements_by_xpath('.//img')
csv_image_url = []
for pimage in pimages:
image_url = pimage.get_attribute('src')
if image_url not in csv_image_url:
csv_image_url.append(image_url)
except:
self.loggger.info('image3 non-exist')
######################################### CSV File Writing #########################################
# if csv_article_number not in article_number_list:
if len(csv_image_url) == 1:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 2:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 3:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 4:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 5:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 6:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 7:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 8:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 9:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 10:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 11:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 12:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 13:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 14:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 15:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 16:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 17:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + csv_image_url[16] + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) >= 18:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + csv_image_url[16] + ',' + csv_image_url[17] + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
# article_number_list.append(csv_article_number)
except Exception as e:
self.loggger.info(e)
self.loggger.info('error3')
# for m in range(0, 20): IF YOU WANT TO DOWNLOAD CERTAIN PRODUCTS, YOU CAN WRITE LIKE THIS.
# for m in range(0, len(list4)): IF YOU WANT TO DOWNLOAD ALL PRODUCTS, YOU CAN WRITE LIKE THIS.
for m in range(0, len(list4)):
try:
if list4[m] not in old_product_url:
self.loggger.info('********************** ' + str(k) + ' ******************************')
self.driver.get(list4[m])
WebDriverWait(self.driver, 30).until(EC.presence_of_element_located((By.ID,'breadcrumbs')))
# breadcrumbs = self.driver.find_element_by_id('breadcrumbs')
# categories = breadcrumbs.find_elements_by_xpath('.//a')
# for category in categories:
# csv_categories.append(category.text)
offer = self.driver.find_element_by_id('productPageUpper')
try:
heading = offer.find_element_by_class_name('pHeader')
csv_heading = heading.text.replace(',', '.')
except:
self.loggger.info('heading4 non-exist')
csv_heading = ''
try:
stock = offer.find_element_by_class_name('instock')
csv_stock = stock.text
except:
csv_stock = 'Out of stock'
self.loggger.info('stock4 non-exist')
try:
price_new = offer.find_element_by_class_name('priceRegular')
csv_price_new = price_new.text.split(' ')[0]
except:
try:
price_new = offer.find_element_by_class_name('priceNew')
csv_price_new = price_new.text.split(' ')[0]
price_old = offer.find_element_by_class_name('priceOld')
csv_price_old = price_old.text.split(' ')[0]
except:
self.loggger.info('price4 non-exist')
csv_price_new = ''
csv_price_old = ''
try:
desc = offer.find_element_by_id('pDesc')
csv_desc = desc.get_attribute('innerHTML').replace(',', '-').replace('\n', ' ').replace('\r', '').rstrip().lstrip()
except:
self.loggger.info('description4 non-exist')
csv_desc = ''
try:
article_number = offer.find_element_by_id('pManufacturer')
csv_article_number = article_number.text.split(' ')[-1].replace(',', '.')
except:
self.loggger.info('article number4 non-exist')
csv_article_number = ''
try:
pimages = offer.find_elements_by_xpath('.//img')
csv_image_url = []
for pimage in pimages:
image_url = pimage.get_attribute('src')
if image_url not in csv_image_url:
csv_image_url.append(image_url)
except:
self.loggger.info('image4 non-exist')
######################################### CSV File Writing #########################################
# if csv_article_number not in article_number_list:
if len(csv_image_url) == 1:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 2:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 3:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 4:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 5:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 6:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 7:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 8:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 9:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 10:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 11:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 12:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 13:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 14:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 15:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 16:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 17:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + csv_image_url[16] + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) >= 18:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + csv_image_url[16] + ',' + csv_image_url[17] + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
# article_number_list.append(csv_article_number)
except Exception as e:
self.loggger.info(e)
self.loggger.info('error4')
file.close()
self.driver.close()
|
normal
|
{
"blob_id": "237a93ff73cb98fd9d4006f14d3cadbdc09259a4",
"index": 9885,
"step-1": "<mask token>\n\n\nclass ProductSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self):\n options = webdriver.ChromeOptions()\n options.add_argument('--start-maximized')\n self.driver = webdriver.Chrome(chrome_options=options)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ProductSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self):\n options = webdriver.ChromeOptions()\n options.add_argument('--start-maximized')\n self.driver = webdriver.Chrome(chrome_options=options)\n\n def parse(self, response):\n fh = logging.FileHandler(log_output_file)\n fh.setLevel(logging.INFO)\n logging.getLogger('selenium.webdriver.remote.remote_connection'\n ).setLevel(logging.WARNING)\n logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)\n logging.getLogger('selenium.webdriver.remote.remote_connection'\n ).addHandler(fh)\n logging.getLogger('urllib3.connectionpool').addHandler(fh)\n logging.getLogger().addHandler(fh)\n self.loggger = logging.getLogger()\n self.driver.get(response.url)\n list1 = []\n list2 = []\n list3 = []\n list4 = []\n list5 = []\n list3_categories = []\n list4_categories = []\n csv_categories1 = ''\n csv_heading = ''\n csv_stock = ''\n csv_price_new = ''\n csv_price_old = ''\n csv_desc = ''\n csv_article_number = ''\n csv_image_url = []\n old_product_url = []\n WebDriverWait(self.driver, 20).until(EC.presence_of_element_located\n ((By.ID, 'email')))\n username = self.driver.find_element_by_id('email')\n username.send_keys('[email protected]')\n username = self.driver.find_element_by_id('password')\n username.send_keys('order88')\n login = self.driver.find_element_by_class_name('button-confirm')\n login.click()\n time.sleep(5)\n fh, abs_path = mkstemp()\n with fdopen(fh, 'w') as new_file:\n with open('tekniknet.csv') as old_file:\n for line in old_file:\n new_file.write(line.replace('NEW', 'old'))\n remove('tekniknet.csv')\n move(abs_path, 'tekniknet.csv')\n with open('tekniknet.csv', 'r') as ins:\n for line in ins:\n old_product_url.append(line.split(',')[-1])\n file = open('tekniknet.csv', 'a', errors='replace')\n for wrapper1 in self.driver.find_elements_by_class_name('level-0'):\n child_wrapper1 = wrapper1.find_element_by_xpath('./a')\n link1 = child_wrapper1.get_attribute('href')\n list1.append(link1)\n self.loggger.info(\n '*************************************************')\n self.loggger.info(link1)\n for i in range(0, len(list1) - 4):\n self.driver.get(list1[i])\n try:\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.CLASS_NAME, 'inner')))\n for wrapper2 in self.driver.find_elements_by_class_name('inner'\n ):\n try:\n sub2 = wrapper2.find_element_by_class_name('subLinks')\n child_wrapper2 = sub2.find_elements_by_xpath('.//a')\n for child2 in child_wrapper2:\n link2 = child2.get_attribute('href')\n list2.append(link2)\n self.loggger.info(\n '^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^'\n )\n self.loggger.info(link2)\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error')\n except:\n try:\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.ID,\n 'categorySubCategories')))\n subcategory = self.driver.find_element_by_id(\n 'categorySubCategories')\n wrapper2_1 = subcategory.find_elements_by_xpath('.//a')\n for child3 in wrapper2_1:\n link2_1 = child3.get_attribute('href')\n list5.append(link2_1)\n for n in range(0, len(list5)):\n self.driver.get(list5[n])\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.ID,\n 'categorySubCategories')))\n subcategory = self.driver.find_element_by_id(\n 'categorySubCategories')\n wrapper2_1_1 = subcategory.find_elements_by_xpath(\n './/a')\n for child3_1 in wrapper2_1_1:\n if child3_1.text != 'Visa alla':\n link2_1_1 = child3_1.get_attribute('href')\n list2.append(link2_1_1)\n except:\n try:\n breadcrumbs2 = self.driver.find_element_by_id(\n 'breadcrumbs')\n categories2 = breadcrumbs2.find_elements_by_xpath(\n './/li')\n csv_categories2 = ''\n for category2 in categories2:\n csv_categories2 = (csv_categories2 + category2.\n text + '/')\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.CLASS_NAME,\n 'listProduct')))\n for wrapper2_2 in self.driver.find_elements_by_class_name(\n 'listProduct'):\n wrapper2_3 = wrapper2_2.find_element_by_xpath(\n './/a')\n link2_2 = wrapper2_3.get_attribute('href')\n list4.append(link2_2)\n list4_categories.append(csv_categories2)\n self.loggger.info(\n '&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'\n )\n self.loggger.info(link2_2)\n self.loggger.info('error')\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error')\n for j in range(0, len(list2)):\n try:\n self.loggger.info('**********-------------- ' + str(j) +\n ' ******************************')\n self.driver.get(list2[j])\n WebDriverWait(self.driver, 30).until(EC.\n presence_of_element_located((By.ID, 'breadcrumbs')))\n breadcrumbs1 = self.driver.find_element_by_id('breadcrumbs')\n categories1 = breadcrumbs1.find_elements_by_xpath('.//li')\n csv_categories1 = ''\n for category1 in categories1:\n csv_categories1 = csv_categories1 + category1.text + '/'\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.CLASS_NAME, 'listProduct'))\n )\n for wrapper3 in self.driver.find_elements_by_class_name(\n 'listProduct'):\n child_wrapper3 = wrapper3.find_element_by_xpath('.//a')\n link3 = child_wrapper3.get_attribute('href')\n list3.append(link3)\n list3_categories.append(csv_categories1)\n self.loggger.info(\n '&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'\n )\n self.loggger.info(link3)\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error')\n for k in range(0, len(list3)):\n try:\n if list3[k] not in old_product_url:\n self.loggger.info('----------------------- ' + str\n (k) + ' ******************************')\n self.driver.get(list3[k])\n WebDriverWait(self.driver, 30).until(EC.\n presence_of_element_located((By.ID, 'breadcrumbs')))\n offer = self.driver.find_element_by_id('productPageUpper')\n try:\n heading = offer.find_element_by_class_name('pHeader')\n csv_heading = heading.text.replace(',', '.')\n except:\n self.loggger.info('heading3 non-exist')\n csv_heading = ''\n try:\n stock = offer.find_element_by_class_name('instock')\n csv_stock = stock.text\n except:\n csv_stock = 'Out of stock'\n self.loggger.info('stock3 non-exist')\n csv_stock = ''\n try:\n price_new = offer.find_element_by_class_name(\n 'priceRegular')\n csv_price_new = price_new.text.split(' ')[0]\n except:\n try:\n price_new = offer.find_element_by_class_name(\n 'priceNew')\n csv_price_new = price_new.text.split(' ')[0]\n price_old = offer.find_element_by_class_name(\n 'priceOld')\n csv_price_old = price_old.text.split(' ')[0]\n except:\n self.loggger.info('price3 non-exist')\n csv_price_old = ''\n csv_price_new = ''\n try:\n desc = offer.find_element_by_id('pDesc')\n csv_desc = desc.get_attribute('innerHTML').replace(',',\n '-').replace('\\n', ' ').replace('\\r', '').rstrip(\n ).lstrip()\n except:\n self.loggger.info('description3 non-exist')\n csv_desc = ''\n try:\n article_number = offer.find_element_by_id(\n 'pManufacturer')\n csv_article_number = article_number.text.split(' ')[-1\n ].replace(',', '.')\n except:\n self.loggger.info('article number3 non-exist')\n csv_article_number = ''\n try:\n pimages = offer.find_elements_by_xpath('.//img')\n csv_image_url = []\n for pimage in pimages:\n image_url = pimage.get_attribute('src')\n if image_url not in csv_image_url:\n csv_image_url.append(image_url)\n except:\n self.loggger.info('image3 non-exist')\n if len(csv_image_url) == 1:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 2:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 3:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3\n [k] + '\\n')\n elif len(csv_image_url) == 4:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 5:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 6:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 7:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3\n [k] + '\\n')\n elif len(csv_image_url) == 8:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 9:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 10:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 11:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3\n [k] + '\\n')\n elif len(csv_image_url) == 12:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 13:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + ' ' + ',' + ' ' +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +\n ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 14:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3\n [k] + '\\n')\n elif len(csv_image_url) == 15:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 16:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 17:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + csv_image_url[16] +\n ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' +\n list3[k] + '\\n')\n elif len(csv_image_url) >= 18:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + csv_image_url[16] +\n ',' + csv_image_url[17] + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error3')\n for m in range(0, len(list4)):\n try:\n if list4[m] not in old_product_url:\n self.loggger.info('********************** ' + str(\n k) + ' ******************************')\n self.driver.get(list4[m])\n WebDriverWait(self.driver, 30).until(EC.\n presence_of_element_located((By.ID, 'breadcrumbs')))\n offer = self.driver.find_element_by_id('productPageUpper')\n try:\n heading = offer.find_element_by_class_name('pHeader')\n csv_heading = heading.text.replace(',', '.')\n except:\n self.loggger.info('heading4 non-exist')\n csv_heading = ''\n try:\n stock = offer.find_element_by_class_name('instock')\n csv_stock = stock.text\n except:\n csv_stock = 'Out of stock'\n self.loggger.info('stock4 non-exist')\n try:\n price_new = offer.find_element_by_class_name(\n 'priceRegular')\n csv_price_new = price_new.text.split(' ')[0]\n except:\n try:\n price_new = offer.find_element_by_class_name(\n 'priceNew')\n csv_price_new = price_new.text.split(' ')[0]\n price_old = offer.find_element_by_class_name(\n 'priceOld')\n csv_price_old = price_old.text.split(' ')[0]\n except:\n self.loggger.info('price4 non-exist')\n csv_price_new = ''\n csv_price_old = ''\n try:\n desc = offer.find_element_by_id('pDesc')\n csv_desc = desc.get_attribute('innerHTML').replace(',',\n '-').replace('\\n', ' ').replace('\\r', '').rstrip(\n ).lstrip()\n except:\n self.loggger.info('description4 non-exist')\n csv_desc = ''\n try:\n article_number = offer.find_element_by_id(\n 'pManufacturer')\n csv_article_number = article_number.text.split(' ')[-1\n ].replace(',', '.')\n except:\n self.loggger.info('article number4 non-exist')\n csv_article_number = ''\n try:\n pimages = offer.find_elements_by_xpath('.//img')\n csv_image_url = []\n for pimage in pimages:\n image_url = pimage.get_attribute('src')\n if image_url not in csv_image_url:\n csv_image_url.append(image_url)\n except:\n self.loggger.info('image4 non-exist')\n if len(csv_image_url) == 1:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 2:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 3:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4\n [m] + '\\n')\n elif len(csv_image_url) == 4:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 5:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 6:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 7:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4\n [m] + '\\n')\n elif len(csv_image_url) == 8:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 9:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 10:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 11:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4\n [m] + '\\n')\n elif len(csv_image_url) == 12:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 13:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + ' ' + ',' + ' ' +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +\n ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 14:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4\n [m] + '\\n')\n elif len(csv_image_url) == 15:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 16:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 17:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + csv_image_url[16] +\n ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' +\n list4[m] + '\\n')\n elif len(csv_image_url) >= 18:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + csv_image_url[16] +\n ',' + csv_image_url[17] + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error4')\n file.close()\n self.driver.close()\n",
"step-3": "<mask token>\noutput_timestamp = datetime.today().strftime('%Y-%m-%d-%H%M')\nlog_output_file = 'scrape-order-images-{}.log'.format(output_timestamp)\n\n\nclass ProductSpider(scrapy.Spider):\n name = 'tekniknet_new'\n allowed_domains = ['www.tekniknet.se']\n start_urls = ['https://www.tekniknet.se/#']\n\n def __init__(self):\n options = webdriver.ChromeOptions()\n options.add_argument('--start-maximized')\n self.driver = webdriver.Chrome(chrome_options=options)\n\n def parse(self, response):\n fh = logging.FileHandler(log_output_file)\n fh.setLevel(logging.INFO)\n logging.getLogger('selenium.webdriver.remote.remote_connection'\n ).setLevel(logging.WARNING)\n logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)\n logging.getLogger('selenium.webdriver.remote.remote_connection'\n ).addHandler(fh)\n logging.getLogger('urllib3.connectionpool').addHandler(fh)\n logging.getLogger().addHandler(fh)\n self.loggger = logging.getLogger()\n self.driver.get(response.url)\n list1 = []\n list2 = []\n list3 = []\n list4 = []\n list5 = []\n list3_categories = []\n list4_categories = []\n csv_categories1 = ''\n csv_heading = ''\n csv_stock = ''\n csv_price_new = ''\n csv_price_old = ''\n csv_desc = ''\n csv_article_number = ''\n csv_image_url = []\n old_product_url = []\n WebDriverWait(self.driver, 20).until(EC.presence_of_element_located\n ((By.ID, 'email')))\n username = self.driver.find_element_by_id('email')\n username.send_keys('[email protected]')\n username = self.driver.find_element_by_id('password')\n username.send_keys('order88')\n login = self.driver.find_element_by_class_name('button-confirm')\n login.click()\n time.sleep(5)\n fh, abs_path = mkstemp()\n with fdopen(fh, 'w') as new_file:\n with open('tekniknet.csv') as old_file:\n for line in old_file:\n new_file.write(line.replace('NEW', 'old'))\n remove('tekniknet.csv')\n move(abs_path, 'tekniknet.csv')\n with open('tekniknet.csv', 'r') as ins:\n for line in ins:\n old_product_url.append(line.split(',')[-1])\n file = open('tekniknet.csv', 'a', errors='replace')\n for wrapper1 in self.driver.find_elements_by_class_name('level-0'):\n child_wrapper1 = wrapper1.find_element_by_xpath('./a')\n link1 = child_wrapper1.get_attribute('href')\n list1.append(link1)\n self.loggger.info(\n '*************************************************')\n self.loggger.info(link1)\n for i in range(0, len(list1) - 4):\n self.driver.get(list1[i])\n try:\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.CLASS_NAME, 'inner')))\n for wrapper2 in self.driver.find_elements_by_class_name('inner'\n ):\n try:\n sub2 = wrapper2.find_element_by_class_name('subLinks')\n child_wrapper2 = sub2.find_elements_by_xpath('.//a')\n for child2 in child_wrapper2:\n link2 = child2.get_attribute('href')\n list2.append(link2)\n self.loggger.info(\n '^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^'\n )\n self.loggger.info(link2)\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error')\n except:\n try:\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.ID,\n 'categorySubCategories')))\n subcategory = self.driver.find_element_by_id(\n 'categorySubCategories')\n wrapper2_1 = subcategory.find_elements_by_xpath('.//a')\n for child3 in wrapper2_1:\n link2_1 = child3.get_attribute('href')\n list5.append(link2_1)\n for n in range(0, len(list5)):\n self.driver.get(list5[n])\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.ID,\n 'categorySubCategories')))\n subcategory = self.driver.find_element_by_id(\n 'categorySubCategories')\n wrapper2_1_1 = subcategory.find_elements_by_xpath(\n './/a')\n for child3_1 in wrapper2_1_1:\n if child3_1.text != 'Visa alla':\n link2_1_1 = child3_1.get_attribute('href')\n list2.append(link2_1_1)\n except:\n try:\n breadcrumbs2 = self.driver.find_element_by_id(\n 'breadcrumbs')\n categories2 = breadcrumbs2.find_elements_by_xpath(\n './/li')\n csv_categories2 = ''\n for category2 in categories2:\n csv_categories2 = (csv_categories2 + category2.\n text + '/')\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.CLASS_NAME,\n 'listProduct')))\n for wrapper2_2 in self.driver.find_elements_by_class_name(\n 'listProduct'):\n wrapper2_3 = wrapper2_2.find_element_by_xpath(\n './/a')\n link2_2 = wrapper2_3.get_attribute('href')\n list4.append(link2_2)\n list4_categories.append(csv_categories2)\n self.loggger.info(\n '&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'\n )\n self.loggger.info(link2_2)\n self.loggger.info('error')\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error')\n for j in range(0, len(list2)):\n try:\n self.loggger.info('**********-------------- ' + str(j) +\n ' ******************************')\n self.driver.get(list2[j])\n WebDriverWait(self.driver, 30).until(EC.\n presence_of_element_located((By.ID, 'breadcrumbs')))\n breadcrumbs1 = self.driver.find_element_by_id('breadcrumbs')\n categories1 = breadcrumbs1.find_elements_by_xpath('.//li')\n csv_categories1 = ''\n for category1 in categories1:\n csv_categories1 = csv_categories1 + category1.text + '/'\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.CLASS_NAME, 'listProduct'))\n )\n for wrapper3 in self.driver.find_elements_by_class_name(\n 'listProduct'):\n child_wrapper3 = wrapper3.find_element_by_xpath('.//a')\n link3 = child_wrapper3.get_attribute('href')\n list3.append(link3)\n list3_categories.append(csv_categories1)\n self.loggger.info(\n '&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'\n )\n self.loggger.info(link3)\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error')\n for k in range(0, len(list3)):\n try:\n if list3[k] not in old_product_url:\n self.loggger.info('----------------------- ' + str\n (k) + ' ******************************')\n self.driver.get(list3[k])\n WebDriverWait(self.driver, 30).until(EC.\n presence_of_element_located((By.ID, 'breadcrumbs')))\n offer = self.driver.find_element_by_id('productPageUpper')\n try:\n heading = offer.find_element_by_class_name('pHeader')\n csv_heading = heading.text.replace(',', '.')\n except:\n self.loggger.info('heading3 non-exist')\n csv_heading = ''\n try:\n stock = offer.find_element_by_class_name('instock')\n csv_stock = stock.text\n except:\n csv_stock = 'Out of stock'\n self.loggger.info('stock3 non-exist')\n csv_stock = ''\n try:\n price_new = offer.find_element_by_class_name(\n 'priceRegular')\n csv_price_new = price_new.text.split(' ')[0]\n except:\n try:\n price_new = offer.find_element_by_class_name(\n 'priceNew')\n csv_price_new = price_new.text.split(' ')[0]\n price_old = offer.find_element_by_class_name(\n 'priceOld')\n csv_price_old = price_old.text.split(' ')[0]\n except:\n self.loggger.info('price3 non-exist')\n csv_price_old = ''\n csv_price_new = ''\n try:\n desc = offer.find_element_by_id('pDesc')\n csv_desc = desc.get_attribute('innerHTML').replace(',',\n '-').replace('\\n', ' ').replace('\\r', '').rstrip(\n ).lstrip()\n except:\n self.loggger.info('description3 non-exist')\n csv_desc = ''\n try:\n article_number = offer.find_element_by_id(\n 'pManufacturer')\n csv_article_number = article_number.text.split(' ')[-1\n ].replace(',', '.')\n except:\n self.loggger.info('article number3 non-exist')\n csv_article_number = ''\n try:\n pimages = offer.find_elements_by_xpath('.//img')\n csv_image_url = []\n for pimage in pimages:\n image_url = pimage.get_attribute('src')\n if image_url not in csv_image_url:\n csv_image_url.append(image_url)\n except:\n self.loggger.info('image3 non-exist')\n if len(csv_image_url) == 1:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 2:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 3:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3\n [k] + '\\n')\n elif len(csv_image_url) == 4:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 5:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 6:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 7:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3\n [k] + '\\n')\n elif len(csv_image_url) == 8:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 9:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 10:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 11:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3\n [k] + '\\n')\n elif len(csv_image_url) == 12:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 13:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + ' ' + ',' + ' ' +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +\n ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 14:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3\n [k] + '\\n')\n elif len(csv_image_url) == 15:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 16:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 17:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + csv_image_url[16] +\n ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' +\n list3[k] + '\\n')\n elif len(csv_image_url) >= 18:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + csv_image_url[16] +\n ',' + csv_image_url[17] + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error3')\n for m in range(0, len(list4)):\n try:\n if list4[m] not in old_product_url:\n self.loggger.info('********************** ' + str(\n k) + ' ******************************')\n self.driver.get(list4[m])\n WebDriverWait(self.driver, 30).until(EC.\n presence_of_element_located((By.ID, 'breadcrumbs')))\n offer = self.driver.find_element_by_id('productPageUpper')\n try:\n heading = offer.find_element_by_class_name('pHeader')\n csv_heading = heading.text.replace(',', '.')\n except:\n self.loggger.info('heading4 non-exist')\n csv_heading = ''\n try:\n stock = offer.find_element_by_class_name('instock')\n csv_stock = stock.text\n except:\n csv_stock = 'Out of stock'\n self.loggger.info('stock4 non-exist')\n try:\n price_new = offer.find_element_by_class_name(\n 'priceRegular')\n csv_price_new = price_new.text.split(' ')[0]\n except:\n try:\n price_new = offer.find_element_by_class_name(\n 'priceNew')\n csv_price_new = price_new.text.split(' ')[0]\n price_old = offer.find_element_by_class_name(\n 'priceOld')\n csv_price_old = price_old.text.split(' ')[0]\n except:\n self.loggger.info('price4 non-exist')\n csv_price_new = ''\n csv_price_old = ''\n try:\n desc = offer.find_element_by_id('pDesc')\n csv_desc = desc.get_attribute('innerHTML').replace(',',\n '-').replace('\\n', ' ').replace('\\r', '').rstrip(\n ).lstrip()\n except:\n self.loggger.info('description4 non-exist')\n csv_desc = ''\n try:\n article_number = offer.find_element_by_id(\n 'pManufacturer')\n csv_article_number = article_number.text.split(' ')[-1\n ].replace(',', '.')\n except:\n self.loggger.info('article number4 non-exist')\n csv_article_number = ''\n try:\n pimages = offer.find_elements_by_xpath('.//img')\n csv_image_url = []\n for pimage in pimages:\n image_url = pimage.get_attribute('src')\n if image_url not in csv_image_url:\n csv_image_url.append(image_url)\n except:\n self.loggger.info('image4 non-exist')\n if len(csv_image_url) == 1:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 2:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 3:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4\n [m] + '\\n')\n elif len(csv_image_url) == 4:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 5:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 6:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 7:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4\n [m] + '\\n')\n elif len(csv_image_url) == 8:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 9:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 10:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 11:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4\n [m] + '\\n')\n elif len(csv_image_url) == 12:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 13:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + ' ' + ',' + ' ' +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +\n ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 14:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4\n [m] + '\\n')\n elif len(csv_image_url) == 15:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 16:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 17:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + csv_image_url[16] +\n ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' +\n list4[m] + '\\n')\n elif len(csv_image_url) >= 18:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + csv_image_url[16] +\n ',' + csv_image_url[17] + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error4')\n file.close()\n self.driver.close()\n",
"step-4": "import scrapy\nimport time\nimport os.path\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom tempfile import mkstemp\nfrom shutil import move\nfrom os import fdopen, remove\nfrom datetime import datetime\nimport logging\noutput_timestamp = datetime.today().strftime('%Y-%m-%d-%H%M')\nlog_output_file = 'scrape-order-images-{}.log'.format(output_timestamp)\n\n\nclass ProductSpider(scrapy.Spider):\n name = 'tekniknet_new'\n allowed_domains = ['www.tekniknet.se']\n start_urls = ['https://www.tekniknet.se/#']\n\n def __init__(self):\n options = webdriver.ChromeOptions()\n options.add_argument('--start-maximized')\n self.driver = webdriver.Chrome(chrome_options=options)\n\n def parse(self, response):\n fh = logging.FileHandler(log_output_file)\n fh.setLevel(logging.INFO)\n logging.getLogger('selenium.webdriver.remote.remote_connection'\n ).setLevel(logging.WARNING)\n logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)\n logging.getLogger('selenium.webdriver.remote.remote_connection'\n ).addHandler(fh)\n logging.getLogger('urllib3.connectionpool').addHandler(fh)\n logging.getLogger().addHandler(fh)\n self.loggger = logging.getLogger()\n self.driver.get(response.url)\n list1 = []\n list2 = []\n list3 = []\n list4 = []\n list5 = []\n list3_categories = []\n list4_categories = []\n csv_categories1 = ''\n csv_heading = ''\n csv_stock = ''\n csv_price_new = ''\n csv_price_old = ''\n csv_desc = ''\n csv_article_number = ''\n csv_image_url = []\n old_product_url = []\n WebDriverWait(self.driver, 20).until(EC.presence_of_element_located\n ((By.ID, 'email')))\n username = self.driver.find_element_by_id('email')\n username.send_keys('[email protected]')\n username = self.driver.find_element_by_id('password')\n username.send_keys('order88')\n login = self.driver.find_element_by_class_name('button-confirm')\n login.click()\n time.sleep(5)\n fh, abs_path = mkstemp()\n with fdopen(fh, 'w') as new_file:\n with open('tekniknet.csv') as old_file:\n for line in old_file:\n new_file.write(line.replace('NEW', 'old'))\n remove('tekniknet.csv')\n move(abs_path, 'tekniknet.csv')\n with open('tekniknet.csv', 'r') as ins:\n for line in ins:\n old_product_url.append(line.split(',')[-1])\n file = open('tekniknet.csv', 'a', errors='replace')\n for wrapper1 in self.driver.find_elements_by_class_name('level-0'):\n child_wrapper1 = wrapper1.find_element_by_xpath('./a')\n link1 = child_wrapper1.get_attribute('href')\n list1.append(link1)\n self.loggger.info(\n '*************************************************')\n self.loggger.info(link1)\n for i in range(0, len(list1) - 4):\n self.driver.get(list1[i])\n try:\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.CLASS_NAME, 'inner')))\n for wrapper2 in self.driver.find_elements_by_class_name('inner'\n ):\n try:\n sub2 = wrapper2.find_element_by_class_name('subLinks')\n child_wrapper2 = sub2.find_elements_by_xpath('.//a')\n for child2 in child_wrapper2:\n link2 = child2.get_attribute('href')\n list2.append(link2)\n self.loggger.info(\n '^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^'\n )\n self.loggger.info(link2)\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error')\n except:\n try:\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.ID,\n 'categorySubCategories')))\n subcategory = self.driver.find_element_by_id(\n 'categorySubCategories')\n wrapper2_1 = subcategory.find_elements_by_xpath('.//a')\n for child3 in wrapper2_1:\n link2_1 = child3.get_attribute('href')\n list5.append(link2_1)\n for n in range(0, len(list5)):\n self.driver.get(list5[n])\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.ID,\n 'categorySubCategories')))\n subcategory = self.driver.find_element_by_id(\n 'categorySubCategories')\n wrapper2_1_1 = subcategory.find_elements_by_xpath(\n './/a')\n for child3_1 in wrapper2_1_1:\n if child3_1.text != 'Visa alla':\n link2_1_1 = child3_1.get_attribute('href')\n list2.append(link2_1_1)\n except:\n try:\n breadcrumbs2 = self.driver.find_element_by_id(\n 'breadcrumbs')\n categories2 = breadcrumbs2.find_elements_by_xpath(\n './/li')\n csv_categories2 = ''\n for category2 in categories2:\n csv_categories2 = (csv_categories2 + category2.\n text + '/')\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.CLASS_NAME,\n 'listProduct')))\n for wrapper2_2 in self.driver.find_elements_by_class_name(\n 'listProduct'):\n wrapper2_3 = wrapper2_2.find_element_by_xpath(\n './/a')\n link2_2 = wrapper2_3.get_attribute('href')\n list4.append(link2_2)\n list4_categories.append(csv_categories2)\n self.loggger.info(\n '&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'\n )\n self.loggger.info(link2_2)\n self.loggger.info('error')\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error')\n for j in range(0, len(list2)):\n try:\n self.loggger.info('**********-------------- ' + str(j) +\n ' ******************************')\n self.driver.get(list2[j])\n WebDriverWait(self.driver, 30).until(EC.\n presence_of_element_located((By.ID, 'breadcrumbs')))\n breadcrumbs1 = self.driver.find_element_by_id('breadcrumbs')\n categories1 = breadcrumbs1.find_elements_by_xpath('.//li')\n csv_categories1 = ''\n for category1 in categories1:\n csv_categories1 = csv_categories1 + category1.text + '/'\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.CLASS_NAME, 'listProduct'))\n )\n for wrapper3 in self.driver.find_elements_by_class_name(\n 'listProduct'):\n child_wrapper3 = wrapper3.find_element_by_xpath('.//a')\n link3 = child_wrapper3.get_attribute('href')\n list3.append(link3)\n list3_categories.append(csv_categories1)\n self.loggger.info(\n '&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'\n )\n self.loggger.info(link3)\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error')\n for k in range(0, len(list3)):\n try:\n if list3[k] not in old_product_url:\n self.loggger.info('----------------------- ' + str\n (k) + ' ******************************')\n self.driver.get(list3[k])\n WebDriverWait(self.driver, 30).until(EC.\n presence_of_element_located((By.ID, 'breadcrumbs')))\n offer = self.driver.find_element_by_id('productPageUpper')\n try:\n heading = offer.find_element_by_class_name('pHeader')\n csv_heading = heading.text.replace(',', '.')\n except:\n self.loggger.info('heading3 non-exist')\n csv_heading = ''\n try:\n stock = offer.find_element_by_class_name('instock')\n csv_stock = stock.text\n except:\n csv_stock = 'Out of stock'\n self.loggger.info('stock3 non-exist')\n csv_stock = ''\n try:\n price_new = offer.find_element_by_class_name(\n 'priceRegular')\n csv_price_new = price_new.text.split(' ')[0]\n except:\n try:\n price_new = offer.find_element_by_class_name(\n 'priceNew')\n csv_price_new = price_new.text.split(' ')[0]\n price_old = offer.find_element_by_class_name(\n 'priceOld')\n csv_price_old = price_old.text.split(' ')[0]\n except:\n self.loggger.info('price3 non-exist')\n csv_price_old = ''\n csv_price_new = ''\n try:\n desc = offer.find_element_by_id('pDesc')\n csv_desc = desc.get_attribute('innerHTML').replace(',',\n '-').replace('\\n', ' ').replace('\\r', '').rstrip(\n ).lstrip()\n except:\n self.loggger.info('description3 non-exist')\n csv_desc = ''\n try:\n article_number = offer.find_element_by_id(\n 'pManufacturer')\n csv_article_number = article_number.text.split(' ')[-1\n ].replace(',', '.')\n except:\n self.loggger.info('article number3 non-exist')\n csv_article_number = ''\n try:\n pimages = offer.find_elements_by_xpath('.//img')\n csv_image_url = []\n for pimage in pimages:\n image_url = pimage.get_attribute('src')\n if image_url not in csv_image_url:\n csv_image_url.append(image_url)\n except:\n self.loggger.info('image3 non-exist')\n if len(csv_image_url) == 1:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 2:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 3:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3\n [k] + '\\n')\n elif len(csv_image_url) == 4:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 5:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 6:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 7:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3\n [k] + '\\n')\n elif len(csv_image_url) == 8:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 9:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 10:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 11:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3\n [k] + '\\n')\n elif len(csv_image_url) == 12:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 13:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + ' ' + ',' + ' ' +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +\n ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 14:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3\n [k] + '\\n')\n elif len(csv_image_url) == 15:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 16:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 17:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + csv_image_url[16] +\n ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' +\n list3[k] + '\\n')\n elif len(csv_image_url) >= 18:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + csv_image_url[16] +\n ',' + csv_image_url[17] + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error3')\n for m in range(0, len(list4)):\n try:\n if list4[m] not in old_product_url:\n self.loggger.info('********************** ' + str(\n k) + ' ******************************')\n self.driver.get(list4[m])\n WebDriverWait(self.driver, 30).until(EC.\n presence_of_element_located((By.ID, 'breadcrumbs')))\n offer = self.driver.find_element_by_id('productPageUpper')\n try:\n heading = offer.find_element_by_class_name('pHeader')\n csv_heading = heading.text.replace(',', '.')\n except:\n self.loggger.info('heading4 non-exist')\n csv_heading = ''\n try:\n stock = offer.find_element_by_class_name('instock')\n csv_stock = stock.text\n except:\n csv_stock = 'Out of stock'\n self.loggger.info('stock4 non-exist')\n try:\n price_new = offer.find_element_by_class_name(\n 'priceRegular')\n csv_price_new = price_new.text.split(' ')[0]\n except:\n try:\n price_new = offer.find_element_by_class_name(\n 'priceNew')\n csv_price_new = price_new.text.split(' ')[0]\n price_old = offer.find_element_by_class_name(\n 'priceOld')\n csv_price_old = price_old.text.split(' ')[0]\n except:\n self.loggger.info('price4 non-exist')\n csv_price_new = ''\n csv_price_old = ''\n try:\n desc = offer.find_element_by_id('pDesc')\n csv_desc = desc.get_attribute('innerHTML').replace(',',\n '-').replace('\\n', ' ').replace('\\r', '').rstrip(\n ).lstrip()\n except:\n self.loggger.info('description4 non-exist')\n csv_desc = ''\n try:\n article_number = offer.find_element_by_id(\n 'pManufacturer')\n csv_article_number = article_number.text.split(' ')[-1\n ].replace(',', '.')\n except:\n self.loggger.info('article number4 non-exist')\n csv_article_number = ''\n try:\n pimages = offer.find_elements_by_xpath('.//img')\n csv_image_url = []\n for pimage in pimages:\n image_url = pimage.get_attribute('src')\n if image_url not in csv_image_url:\n csv_image_url.append(image_url)\n except:\n self.loggger.info('image4 non-exist')\n if len(csv_image_url) == 1:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 2:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 3:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4\n [m] + '\\n')\n elif len(csv_image_url) == 4:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 5:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 6:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 7:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4\n [m] + '\\n')\n elif len(csv_image_url) == 8:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 9:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 10:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 11:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4\n [m] + '\\n')\n elif len(csv_image_url) == 12:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 13:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + ' ' + ',' + ' ' +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +\n ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 14:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4\n [m] + '\\n')\n elif len(csv_image_url) == 15:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 16:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 17:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + csv_image_url[16] +\n ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' +\n list4[m] + '\\n')\n elif len(csv_image_url) >= 18:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + csv_image_url[16] +\n ',' + csv_image_url[17] + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error4')\n file.close()\n self.driver.close()\n",
"step-5": "import scrapy\nimport time\nimport os.path\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom tempfile import mkstemp\nfrom shutil import move\nfrom os import fdopen, remove\n\nfrom datetime import datetime\nimport logging\n\noutput_timestamp = datetime.today().strftime('%Y-%m-%d-%H%M')\nlog_output_file = 'scrape-order-images-{}.log'.format(output_timestamp)\n\nclass ProductSpider(scrapy.Spider):\n name = \"tekniknet_new\"\n allowed_domains = ['www.tekniknet.se']\n start_urls = ['https://www.tekniknet.se/#']\n\n def __init__(self):\n # self.driver = webdriver.Chrome(\"./chromedriver.exe\")\n options = webdriver.ChromeOptions()\n # options.add_argument(\"--headless\")\n options.add_argument(\"--start-maximized\")\n self.driver = webdriver.Chrome(chrome_options=options)\n\n def parse(self, response):\n # Quiet down all the unnecessary logging. \n fh = logging.FileHandler(log_output_file)\n fh.setLevel(logging.INFO)\n \n logging.getLogger('selenium.webdriver.remote.remote_connection').setLevel(logging.WARNING)\n logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)\n\n logging.getLogger('selenium.webdriver.remote.remote_connection').addHandler(fh)\n logging.getLogger('urllib3.connectionpool').addHandler(fh)\n logging.getLogger().addHandler(fh)\n self.loggger = logging.getLogger()\n\n self.driver.get(response.url)\n list1 = []\n list2 = []\n list3 = []\n list4 = []\n list5 = []\n list3_categories = []\n list4_categories = []\n csv_categories1 = ''\n csv_heading = ''\n csv_stock = ''\n csv_price_new = ''\n csv_price_old = ''\n csv_desc = ''\n csv_article_number = ''\n # article_number_list = []\n csv_image_url = []\n # file_exist = False\n old_product_url = []\n \n WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.ID,'email')))\n username = self.driver.find_element_by_id('email')\n username.send_keys(\"[email protected]\")\n username = self.driver.find_element_by_id('password')\n username.send_keys(\"order88\")\n login = self.driver.find_element_by_class_name('button-confirm')\n login.click()\n time.sleep(5)\n\n #Create temp file\n fh, abs_path = mkstemp()\n with fdopen(fh,'w') as new_file:\n with open(\"tekniknet.csv\") as old_file:\n for line in old_file:\n new_file.write(line.replace('NEW', 'old'))\n #Remove original file\n remove(\"tekniknet.csv\")\n #Move new file\n move(abs_path, \"tekniknet.csv\")\n\n with open('tekniknet.csv', 'r') as ins:\n for line in ins:\n old_product_url.append(line.split(',')[-1])\n\n file = open(\"tekniknet.csv\", \"a\", errors ='replace')\n # file.write('OLD/NEW' + ',' + 'article number' + ',' + 'category1' + ',' + 'category2' + ',' + 'category3' + ',' + 'heading' + ',' + 'description' + ',' + 'current price' + ',' + 'previous price' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'EAN code' + ',' + 'stock' + ',' + 'product url' + '\\n')\n for wrapper1 in self.driver.find_elements_by_class_name('level-0'):\n child_wrapper1 = wrapper1.find_element_by_xpath('./a')\n link1 = child_wrapper1.get_attribute('href')\n list1.append(link1)\n self.loggger.info('*************************************************')\n self.loggger.info(link1)\n\n for i in range(0, len(list1)-4):\n self.driver.get(list1[i])\n try:\n WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.CLASS_NAME,'inner')))\n for wrapper2 in self.driver.find_elements_by_class_name('inner'):\n try:\n sub2 = wrapper2.find_element_by_class_name('subLinks')\n child_wrapper2 = sub2.find_elements_by_xpath('.//a')\n for child2 in child_wrapper2:\n link2 = child2.get_attribute('href')\n list2.append(link2)\n self.loggger.info('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')\n self.loggger.info(link2)\n \n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error')\n except:\n try:\n WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.ID,'categorySubCategories')))\n subcategory = self.driver.find_element_by_id('categorySubCategories')\n wrapper2_1 = subcategory.find_elements_by_xpath('.//a')\n for child3 in wrapper2_1:\n link2_1 = child3.get_attribute('href')\n list5.append(link2_1)\n for n in range(0, len(list5)):\n self.driver.get(list5[n])\n WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.ID,'categorySubCategories')))\n subcategory = self.driver.find_element_by_id('categorySubCategories')\n wrapper2_1_1 = subcategory.find_elements_by_xpath('.//a')\n for child3_1 in wrapper2_1_1:\n if child3_1.text != 'Visa alla':\n link2_1_1 = child3_1.get_attribute('href')\n list2.append(link2_1_1)\n except:\n try:\n breadcrumbs2 = self.driver.find_element_by_id('breadcrumbs')\n categories2 = breadcrumbs2.find_elements_by_xpath('.//li')\n csv_categories2 = ''\n for category2 in categories2:\n csv_categories2 = csv_categories2 + category2.text + '/'\n \n WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.CLASS_NAME,'listProduct')))\n for wrapper2_2 in self.driver.find_elements_by_class_name('listProduct'):\n wrapper2_3 = wrapper2_2.find_element_by_xpath(\".//a\")\n link2_2 = wrapper2_3.get_attribute('href')\n list4.append(link2_2)\n list4_categories.append(csv_categories2)\n self.loggger.info('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')\n self.loggger.info(link2_2)\n self.loggger.info('error')\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error')\n\n # for m in range(0, 5): IF YOU WANT TO DOWNLOAD CERTAIN PRODUCTS, YOU CAN WRITE LIKE THIS.\n # for m in range(0, len(list2)): IF YOU WANT TO DOWNLOAD ALL PRODUCTS, YOU CAN WRITE LIKE THIS.\n for j in range(0, len(list2)):\n try:\n self.loggger.info('**********-------------- ' + str(j) + ' ******************************')\n self.driver.get(list2[j])\n WebDriverWait(self.driver, 30).until(EC.presence_of_element_located((By.ID,'breadcrumbs')))\n\n breadcrumbs1 = self.driver.find_element_by_id('breadcrumbs')\n categories1 = breadcrumbs1.find_elements_by_xpath('.//li')\n csv_categories1 = ''\n for category1 in categories1:\n csv_categories1 = csv_categories1 + category1.text + '/'\n\n WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.CLASS_NAME,'listProduct')))\n for wrapper3 in self.driver.find_elements_by_class_name('listProduct'):\n child_wrapper3 = wrapper3.find_element_by_xpath(\".//a\")\n link3 = child_wrapper3.get_attribute('href')\n list3.append(link3)\n list3_categories.append(csv_categories1)\n self.loggger.info('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')\n self.loggger.info(link3)\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error')\n \n for k in range(0, len(list3)):\n try:\n if list3[k] not in old_product_url:\n self.loggger.info('----------------------- ' + str(k) + ' ******************************')\n self.driver.get(list3[k])\n WebDriverWait(self.driver, 30).until(EC.presence_of_element_located((By.ID,'breadcrumbs')))\n\n # breadcrumbs = self.driver.find_element_by_id('breadcrumbs')\n # categories = breadcrumbs.find_elements_by_xpath('.//a')\n # for category in categories:\n # csv_categories.append(category.text)\n\n offer = self.driver.find_element_by_id('productPageUpper')\n try:\n heading = offer.find_element_by_class_name('pHeader')\n csv_heading = heading.text.replace(',', '.')\n except:\n self.loggger.info('heading3 non-exist')\n csv_heading = ''\n\n try:\n stock = offer.find_element_by_class_name('instock')\n csv_stock = stock.text\n except:\n csv_stock = 'Out of stock'\n self.loggger.info('stock3 non-exist')\n csv_stock = ''\n\n try:\n price_new = offer.find_element_by_class_name('priceRegular')\n csv_price_new = price_new.text.split(' ')[0]\n except:\n try:\n price_new = offer.find_element_by_class_name('priceNew')\n csv_price_new = price_new.text.split(' ')[0]\n \n price_old = offer.find_element_by_class_name('priceOld')\n csv_price_old = price_old.text.split(' ')[0]\n except:\n self.loggger.info('price3 non-exist')\n csv_price_old = ''\n csv_price_new = ''\n\n try:\n desc = offer.find_element_by_id('pDesc')\n csv_desc = desc.get_attribute('innerHTML').replace(',', '-').replace('\\n', ' ').replace('\\r', '').rstrip().lstrip()\n except:\n self.loggger.info('description3 non-exist')\n csv_desc = ''\n\n try:\n article_number = offer.find_element_by_id('pManufacturer')\n csv_article_number = article_number.text.split(' ')[-1].replace(',', '.')\n except:\n self.loggger.info('article number3 non-exist')\n csv_article_number = ''\n \n try:\n pimages = offer.find_elements_by_xpath('.//img')\n csv_image_url = []\n for pimage in pimages:\n image_url = pimage.get_attribute('src')\n if image_url not in csv_image_url:\n csv_image_url.append(image_url)\n except:\n self.loggger.info('image3 non-exist')\n\n ######################################### CSV File Writing #########################################\n # if csv_article_number not in article_number_list:\n if len(csv_image_url) == 1:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 2:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 3:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 4:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 5:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 6:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 7:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 8:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 9:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 10:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 11:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 12:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 13:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 14:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 15:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 16:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 17:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + csv_image_url[16] + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) >= 18:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + csv_image_url[16] + ',' + csv_image_url[17] + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n # article_number_list.append(csv_article_number)\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error3')\n\n # for m in range(0, 20): IF YOU WANT TO DOWNLOAD CERTAIN PRODUCTS, YOU CAN WRITE LIKE THIS.\n # for m in range(0, len(list4)): IF YOU WANT TO DOWNLOAD ALL PRODUCTS, YOU CAN WRITE LIKE THIS.\n for m in range(0, len(list4)):\n try:\n if list4[m] not in old_product_url:\n self.loggger.info('********************** ' + str(k) + ' ******************************')\n self.driver.get(list4[m])\n WebDriverWait(self.driver, 30).until(EC.presence_of_element_located((By.ID,'breadcrumbs')))\n\n # breadcrumbs = self.driver.find_element_by_id('breadcrumbs')\n # categories = breadcrumbs.find_elements_by_xpath('.//a')\n # for category in categories:\n # csv_categories.append(category.text)\n\n offer = self.driver.find_element_by_id('productPageUpper')\n try:\n heading = offer.find_element_by_class_name('pHeader')\n csv_heading = heading.text.replace(',', '.')\n except:\n self.loggger.info('heading4 non-exist')\n csv_heading = ''\n\n try:\n stock = offer.find_element_by_class_name('instock')\n csv_stock = stock.text\n except:\n csv_stock = 'Out of stock'\n self.loggger.info('stock4 non-exist')\n\n try:\n price_new = offer.find_element_by_class_name('priceRegular')\n csv_price_new = price_new.text.split(' ')[0]\n except:\n try:\n price_new = offer.find_element_by_class_name('priceNew')\n csv_price_new = price_new.text.split(' ')[0]\n \n price_old = offer.find_element_by_class_name('priceOld')\n csv_price_old = price_old.text.split(' ')[0]\n except:\n self.loggger.info('price4 non-exist')\n csv_price_new = ''\n csv_price_old = ''\n\n try:\n desc = offer.find_element_by_id('pDesc')\n csv_desc = desc.get_attribute('innerHTML').replace(',', '-').replace('\\n', ' ').replace('\\r', '').rstrip().lstrip()\n except:\n self.loggger.info('description4 non-exist')\n csv_desc = ''\n\n try:\n article_number = offer.find_element_by_id('pManufacturer')\n csv_article_number = article_number.text.split(' ')[-1].replace(',', '.')\n except:\n self.loggger.info('article number4 non-exist')\n csv_article_number = ''\n \n try:\n pimages = offer.find_elements_by_xpath('.//img')\n csv_image_url = []\n for pimage in pimages:\n image_url = pimage.get_attribute('src')\n if image_url not in csv_image_url:\n csv_image_url.append(image_url)\n except:\n self.loggger.info('image4 non-exist')\n\n ######################################### CSV File Writing #########################################\n # if csv_article_number not in article_number_list:\n if len(csv_image_url) == 1:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 2:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 3:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 4:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 5:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 6:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 7:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 8:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 9:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 10:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 11:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 12:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 13:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 14:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 15:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 16:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 17:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + csv_image_url[16] + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) >= 18:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + csv_image_url[16] + ',' + csv_image_url[17] + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n # article_number_list.append(csv_article_number)\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error4')\n \n file.close()\n self.driver.close()\n",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if response.info().get('Content-Type') == 'application/x-gzip':
buf = StringIO(response.read())
f = gzip.GzipFile(fileobj=buf)
data = f.read()
else:
data = response.read()
<|reserved_special_token_0|>
f.write(data)
f.close()
print(data)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
url = 'http://api.syosetu.com/novelapi/api/'
get = {}
get['gzip'] = 5
get['out'] = 'json'
get['of'] = 't-s-w'
get['lim'] = 500
get['type'] = 'er'
url_values = urllib.urlencode(get)
request = urllib2.Request(url + '?' + url_values)
response = urllib2.urlopen(request)
if response.info().get('Content-Type') == 'application/x-gzip':
buf = StringIO(response.read())
f = gzip.GzipFile(fileobj=buf)
data = f.read()
else:
data = response.read()
f = open('text.txt', 'w')
f.write(data)
f.close()
print(data)
<|reserved_special_token_1|>
from StringIO import StringIO
import gzip
import urllib2
import urllib
url = 'http://api.syosetu.com/novelapi/api/'
get = {}
get['gzip'] = 5
get['out'] = 'json'
get['of'] = 't-s-w'
get['lim'] = 500
get['type'] = 'er'
url_values = urllib.urlencode(get)
request = urllib2.Request(url + '?' + url_values)
response = urllib2.urlopen(request)
if response.info().get('Content-Type') == 'application/x-gzip':
buf = StringIO(response.read())
f = gzip.GzipFile(fileobj=buf)
data = f.read()
else:
data = response.read()
f = open('text.txt', 'w')
f.write(data)
f.close()
print(data)
<|reserved_special_token_1|>
from StringIO import StringIO
import gzip
import urllib2
import urllib
url="http://api.syosetu.com/novelapi/api/"
get={}
get["gzip"]=5
get["out"]="json"
get["of"]="t-s-w"
get["lim"]=500
get["type"]="er"
url_values = urllib.urlencode(get)
request = urllib2.Request(url+"?"+url_values)
response = urllib2.urlopen(request)
if response.info().get('Content-Type') == 'application/x-gzip':
buf = StringIO( response.read())
f = gzip.GzipFile(fileobj=buf)
data = f.read()
else:
data = response.read()
f = open('text.txt', 'w')
f.write(data)
f.close()
print(data)
|
flexible
|
{
"blob_id": "4b622c7f9b5caa7f88367dd1fdb0bb9e4a81477b",
"index": 2338,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif response.info().get('Content-Type') == 'application/x-gzip':\n buf = StringIO(response.read())\n f = gzip.GzipFile(fileobj=buf)\n data = f.read()\nelse:\n data = response.read()\n<mask token>\nf.write(data)\nf.close()\nprint(data)\n",
"step-3": "<mask token>\nurl = 'http://api.syosetu.com/novelapi/api/'\nget = {}\nget['gzip'] = 5\nget['out'] = 'json'\nget['of'] = 't-s-w'\nget['lim'] = 500\nget['type'] = 'er'\nurl_values = urllib.urlencode(get)\nrequest = urllib2.Request(url + '?' + url_values)\nresponse = urllib2.urlopen(request)\nif response.info().get('Content-Type') == 'application/x-gzip':\n buf = StringIO(response.read())\n f = gzip.GzipFile(fileobj=buf)\n data = f.read()\nelse:\n data = response.read()\nf = open('text.txt', 'w')\nf.write(data)\nf.close()\nprint(data)\n",
"step-4": "from StringIO import StringIO\nimport gzip\nimport urllib2\nimport urllib\nurl = 'http://api.syosetu.com/novelapi/api/'\nget = {}\nget['gzip'] = 5\nget['out'] = 'json'\nget['of'] = 't-s-w'\nget['lim'] = 500\nget['type'] = 'er'\nurl_values = urllib.urlencode(get)\nrequest = urllib2.Request(url + '?' + url_values)\nresponse = urllib2.urlopen(request)\nif response.info().get('Content-Type') == 'application/x-gzip':\n buf = StringIO(response.read())\n f = gzip.GzipFile(fileobj=buf)\n data = f.read()\nelse:\n data = response.read()\nf = open('text.txt', 'w')\nf.write(data)\nf.close()\nprint(data)\n",
"step-5": "from StringIO import StringIO\nimport gzip\nimport urllib2\nimport urllib\n\nurl=\"http://api.syosetu.com/novelapi/api/\"\n\nget={}\nget[\"gzip\"]=5\nget[\"out\"]=\"json\"\nget[\"of\"]=\"t-s-w\"\nget[\"lim\"]=500\nget[\"type\"]=\"er\"\n\nurl_values = urllib.urlencode(get)\n\nrequest = urllib2.Request(url+\"?\"+url_values)\nresponse = urllib2.urlopen(request)\n\nif response.info().get('Content-Type') == 'application/x-gzip':\n buf = StringIO( response.read())\n f = gzip.GzipFile(fileobj=buf)\n data = f.read()\nelse:\n data = response.read()\n\n\nf = open('text.txt', 'w')\nf.write(data)\nf.close()\n\nprint(data)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class ContentCategory(BaseModel):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
db_table = 'tb_content_category'
verbose_name = '广告内容类别'
verbose_name_plural = verbose_name
<|reserved_special_token_0|>
class Content(BaseModel):
"""广告内容"""
category = models.ForeignKey(ContentCategory, on_delete=models.PROTECT,
verbose_name='类别')
title = models.CharField(verbose_name='标题', max_length=100)
url = models.CharField(verbose_name='内容链接', max_length=300)
image = models.ImageField(verbose_name='图片', null=True, blank=True)
text = models.TextField(verbose_name='内容', null=True, blank=True)
sequence = models.IntegerField(verbose_name='排序')
status = models.BooleanField(verbose_name='是否展示', default=True)
class Meta:
db_table = 'tb_content'
verbose_name = '广告内容'
verbose_name_plural = verbose_name
def __str__(self):
return self.category.name + ':' + self.title
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ContentCategory(BaseModel):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
db_table = 'tb_content_category'
verbose_name = '广告内容类别'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Content(BaseModel):
"""广告内容"""
category = models.ForeignKey(ContentCategory, on_delete=models.PROTECT,
verbose_name='类别')
title = models.CharField(verbose_name='标题', max_length=100)
url = models.CharField(verbose_name='内容链接', max_length=300)
image = models.ImageField(verbose_name='图片', null=True, blank=True)
text = models.TextField(verbose_name='内容', null=True, blank=True)
sequence = models.IntegerField(verbose_name='排序')
status = models.BooleanField(verbose_name='是否展示', default=True)
class Meta:
db_table = 'tb_content'
verbose_name = '广告内容'
verbose_name_plural = verbose_name
def __str__(self):
return self.category.name + ':' + self.title
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ContentCategory(BaseModel):
<|reserved_special_token_0|>
name = models.CharField(verbose_name='名称', max_length=50)
key = models.CharField(verbose_name='类别键名', max_length=50)
class Meta:
db_table = 'tb_content_category'
verbose_name = '广告内容类别'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Content(BaseModel):
"""广告内容"""
category = models.ForeignKey(ContentCategory, on_delete=models.PROTECT,
verbose_name='类别')
title = models.CharField(verbose_name='标题', max_length=100)
url = models.CharField(verbose_name='内容链接', max_length=300)
image = models.ImageField(verbose_name='图片', null=True, blank=True)
text = models.TextField(verbose_name='内容', null=True, blank=True)
sequence = models.IntegerField(verbose_name='排序')
status = models.BooleanField(verbose_name='是否展示', default=True)
class Meta:
db_table = 'tb_content'
verbose_name = '广告内容'
verbose_name_plural = verbose_name
def __str__(self):
return self.category.name + ':' + self.title
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ContentCategory(BaseModel):
"""广告内容类别"""
name = models.CharField(verbose_name='名称', max_length=50)
key = models.CharField(verbose_name='类别键名', max_length=50)
class Meta:
db_table = 'tb_content_category'
verbose_name = '广告内容类别'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Content(BaseModel):
"""广告内容"""
category = models.ForeignKey(ContentCategory, on_delete=models.PROTECT,
verbose_name='类别')
title = models.CharField(verbose_name='标题', max_length=100)
url = models.CharField(verbose_name='内容链接', max_length=300)
image = models.ImageField(verbose_name='图片', null=True, blank=True)
text = models.TextField(verbose_name='内容', null=True, blank=True)
sequence = models.IntegerField(verbose_name='排序')
status = models.BooleanField(verbose_name='是否展示', default=True)
class Meta:
db_table = 'tb_content'
verbose_name = '广告内容'
verbose_name_plural = verbose_name
def __str__(self):
return self.category.name + ':' + self.title
<|reserved_special_token_1|>
from django.db import models
from utils.models import BaseModel
# Create your models here.
class ContentCategory(BaseModel):
'''广告内容类别'''
name = models.CharField(verbose_name='名称',max_length=50)
key = models.CharField(verbose_name='类别键名',max_length=50)
class Meta:
db_table = 'tb_content_category'
verbose_name = '广告内容类别'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Content(BaseModel):
'''广告内容'''
category = models.ForeignKey(ContentCategory,on_delete=models.PROTECT,verbose_name='类别')
title = models.CharField(verbose_name='标题',max_length=100)
url = models.CharField(verbose_name='内容链接',max_length=300)
image = models.ImageField(verbose_name='图片',null=True,blank=True)
text = models.TextField(verbose_name='内容',null=True,blank=True)
sequence = models.IntegerField(verbose_name='排序')
status = models.BooleanField(verbose_name='是否展示',default=True)
class Meta:
db_table = 'tb_content'
verbose_name = '广告内容'
verbose_name_plural = verbose_name
def __str__(self):
return self.category.name + ':' + self.title
|
flexible
|
{
"blob_id": "fd96bf5595ce6ec1f95d0f7a9d1c4ff582826ac0",
"index": 1439,
"step-1": "<mask token>\n\n\nclass ContentCategory(BaseModel):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n db_table = 'tb_content_category'\n verbose_name = '广告内容类别'\n verbose_name_plural = verbose_name\n <mask token>\n\n\nclass Content(BaseModel):\n \"\"\"广告内容\"\"\"\n category = models.ForeignKey(ContentCategory, on_delete=models.PROTECT,\n verbose_name='类别')\n title = models.CharField(verbose_name='标题', max_length=100)\n url = models.CharField(verbose_name='内容链接', max_length=300)\n image = models.ImageField(verbose_name='图片', null=True, blank=True)\n text = models.TextField(verbose_name='内容', null=True, blank=True)\n sequence = models.IntegerField(verbose_name='排序')\n status = models.BooleanField(verbose_name='是否展示', default=True)\n\n\n class Meta:\n db_table = 'tb_content'\n verbose_name = '广告内容'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.category.name + ':' + self.title\n",
"step-2": "<mask token>\n\n\nclass ContentCategory(BaseModel):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n db_table = 'tb_content_category'\n verbose_name = '广告内容类别'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.name\n\n\nclass Content(BaseModel):\n \"\"\"广告内容\"\"\"\n category = models.ForeignKey(ContentCategory, on_delete=models.PROTECT,\n verbose_name='类别')\n title = models.CharField(verbose_name='标题', max_length=100)\n url = models.CharField(verbose_name='内容链接', max_length=300)\n image = models.ImageField(verbose_name='图片', null=True, blank=True)\n text = models.TextField(verbose_name='内容', null=True, blank=True)\n sequence = models.IntegerField(verbose_name='排序')\n status = models.BooleanField(verbose_name='是否展示', default=True)\n\n\n class Meta:\n db_table = 'tb_content'\n verbose_name = '广告内容'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.category.name + ':' + self.title\n",
"step-3": "<mask token>\n\n\nclass ContentCategory(BaseModel):\n <mask token>\n name = models.CharField(verbose_name='名称', max_length=50)\n key = models.CharField(verbose_name='类别键名', max_length=50)\n\n\n class Meta:\n db_table = 'tb_content_category'\n verbose_name = '广告内容类别'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.name\n\n\nclass Content(BaseModel):\n \"\"\"广告内容\"\"\"\n category = models.ForeignKey(ContentCategory, on_delete=models.PROTECT,\n verbose_name='类别')\n title = models.CharField(verbose_name='标题', max_length=100)\n url = models.CharField(verbose_name='内容链接', max_length=300)\n image = models.ImageField(verbose_name='图片', null=True, blank=True)\n text = models.TextField(verbose_name='内容', null=True, blank=True)\n sequence = models.IntegerField(verbose_name='排序')\n status = models.BooleanField(verbose_name='是否展示', default=True)\n\n\n class Meta:\n db_table = 'tb_content'\n verbose_name = '广告内容'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.category.name + ':' + self.title\n",
"step-4": "<mask token>\n\n\nclass ContentCategory(BaseModel):\n \"\"\"广告内容类别\"\"\"\n name = models.CharField(verbose_name='名称', max_length=50)\n key = models.CharField(verbose_name='类别键名', max_length=50)\n\n\n class Meta:\n db_table = 'tb_content_category'\n verbose_name = '广告内容类别'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.name\n\n\nclass Content(BaseModel):\n \"\"\"广告内容\"\"\"\n category = models.ForeignKey(ContentCategory, on_delete=models.PROTECT,\n verbose_name='类别')\n title = models.CharField(verbose_name='标题', max_length=100)\n url = models.CharField(verbose_name='内容链接', max_length=300)\n image = models.ImageField(verbose_name='图片', null=True, blank=True)\n text = models.TextField(verbose_name='内容', null=True, blank=True)\n sequence = models.IntegerField(verbose_name='排序')\n status = models.BooleanField(verbose_name='是否展示', default=True)\n\n\n class Meta:\n db_table = 'tb_content'\n verbose_name = '广告内容'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.category.name + ':' + self.title\n",
"step-5": "from django.db import models\nfrom utils.models import BaseModel\n\n# Create your models here.\nclass ContentCategory(BaseModel):\n '''广告内容类别'''\n name = models.CharField(verbose_name='名称',max_length=50)\n key = models.CharField(verbose_name='类别键名',max_length=50)\n\n class Meta:\n db_table = 'tb_content_category'\n verbose_name = '广告内容类别'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.name\n\nclass Content(BaseModel):\n '''广告内容'''\n category = models.ForeignKey(ContentCategory,on_delete=models.PROTECT,verbose_name='类别')\n title = models.CharField(verbose_name='标题',max_length=100)\n url = models.CharField(verbose_name='内容链接',max_length=300)\n image = models.ImageField(verbose_name='图片',null=True,blank=True)\n text = models.TextField(verbose_name='内容',null=True,blank=True)\n sequence = models.IntegerField(verbose_name='排序')\n status = models.BooleanField(verbose_name='是否展示',default=True)\n\n class Meta:\n db_table = 'tb_content'\n verbose_name = '广告内容'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.category.name + ':' + self.title",
"step-ids": [
5,
6,
7,
8,
10
]
}
|
[
5,
6,
7,
8,
10
] |
import os
import RPi.GPIO as GPIO
from google.cloud import firestore
import time
############Explicit Credential environment
path="/home/pi/Desktop/Parking.json"
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] =path
#GPIO starts
s1=2
s2=21
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(s1,GPIO.IN)
GPIO.setup(s2,GPIO.IN)
#firestore initialization
db = firestore.Client()
doc_ref_s1 = db.collection(u'sensors').document(u'sensor1')
doc_ref_s2 = db.collection(u'sensors').document(u'sensor2')
#here starts main
data1=0
data2=0
counter=0
while 1:
if(GPIO.input(s1)==False): #car found in slot 1
data1=1
counter+=1
else: data1=0
print("Received from 1: %s" % data1)
###Now starts for sensor 2
if(GPIO.input(s2)==False): #car found in slot 2
data2=1
counter-=1
else: data2=0
print("Received from 2: %s" % data2)
if(counter>8):
counter=8
elif(counter<0):
counter=0
print("Counter= %s" % counter)
doc_ref_s1.update({
u'priority': counter
})
|
normal
|
{
"blob_id": "e1cc4e17bffcbbae3e7785e4c55acde167a8a50a",
"index": 6482,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\nGPIO.setup(s1, GPIO.IN)\nGPIO.setup(s2, GPIO.IN)\n<mask token>\nwhile 1:\n if GPIO.input(s1) == False:\n data1 = 1\n counter += 1\n else:\n data1 = 0\n print('Received from 1: %s' % data1)\n if GPIO.input(s2) == False:\n data2 = 1\n counter -= 1\n else:\n data2 = 0\n print('Received from 2: %s' % data2)\n if counter > 8:\n counter = 8\n elif counter < 0:\n counter = 0\n print('Counter= %s' % counter)\n doc_ref_s1.update({u'priority': counter})\n",
"step-3": "<mask token>\npath = '/home/pi/Desktop/Parking.json'\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = path\ns1 = 2\ns2 = 21\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\nGPIO.setup(s1, GPIO.IN)\nGPIO.setup(s2, GPIO.IN)\ndb = firestore.Client()\ndoc_ref_s1 = db.collection(u'sensors').document(u'sensor1')\ndoc_ref_s2 = db.collection(u'sensors').document(u'sensor2')\ndata1 = 0\ndata2 = 0\ncounter = 0\nwhile 1:\n if GPIO.input(s1) == False:\n data1 = 1\n counter += 1\n else:\n data1 = 0\n print('Received from 1: %s' % data1)\n if GPIO.input(s2) == False:\n data2 = 1\n counter -= 1\n else:\n data2 = 0\n print('Received from 2: %s' % data2)\n if counter > 8:\n counter = 8\n elif counter < 0:\n counter = 0\n print('Counter= %s' % counter)\n doc_ref_s1.update({u'priority': counter})\n",
"step-4": "import os\nimport RPi.GPIO as GPIO\nfrom google.cloud import firestore\nimport time\npath = '/home/pi/Desktop/Parking.json'\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = path\ns1 = 2\ns2 = 21\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\nGPIO.setup(s1, GPIO.IN)\nGPIO.setup(s2, GPIO.IN)\ndb = firestore.Client()\ndoc_ref_s1 = db.collection(u'sensors').document(u'sensor1')\ndoc_ref_s2 = db.collection(u'sensors').document(u'sensor2')\ndata1 = 0\ndata2 = 0\ncounter = 0\nwhile 1:\n if GPIO.input(s1) == False:\n data1 = 1\n counter += 1\n else:\n data1 = 0\n print('Received from 1: %s' % data1)\n if GPIO.input(s2) == False:\n data2 = 1\n counter -= 1\n else:\n data2 = 0\n print('Received from 2: %s' % data2)\n if counter > 8:\n counter = 8\n elif counter < 0:\n counter = 0\n print('Counter= %s' % counter)\n doc_ref_s1.update({u'priority': counter})\n",
"step-5": "import os\nimport RPi.GPIO as GPIO\nfrom google.cloud import firestore\nimport time \n\n############Explicit Credential environment\npath=\"/home/pi/Desktop/Parking.json\"\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] =path\n\n#GPIO starts\ns1=2\ns2=21\nGPIO.setmode(GPIO.BCM) \nGPIO.setwarnings(False)\nGPIO.setup(s1,GPIO.IN)\nGPIO.setup(s2,GPIO.IN)\n\n\n#firestore initialization\ndb = firestore.Client()\ndoc_ref_s1 = db.collection(u'sensors').document(u'sensor1')\t\t\t\ndoc_ref_s2 = db.collection(u'sensors').document(u'sensor2')\n#here starts main\ndata1=0\ndata2=0\ncounter=0\nwhile 1:\n\t\n\tif(GPIO.input(s1)==False): #car found in slot 1\n\t\tdata1=1\n\t\tcounter+=1\n\telse: data1=0\n \n\tprint(\"Received from 1: %s\" % data1)\n\t###Now starts for sensor 2\t\n\tif(GPIO.input(s2)==False): #car found in slot 2\n\t\tdata2=1\n\t\tcounter-=1\n\telse: data2=0\n\tprint(\"Received from 2: %s\" % data2)\n\tif(counter>8):\n\t\tcounter=8\n\telif(counter<0):\n\t\tcounter=0\n\tprint(\"Counter= %s\" % counter)\n\tdoc_ref_s1.update({\n\t\tu'priority': counter\n\t\t})\n\t\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from WeatherDL.data_maker import dataset_maker
from WeatherDL.model_maker import model_3
# Extract data from data_maker
X, y = dataset_maker(window=5, forecast_day=1)
(X_train, X_test, y_train, y_test) = train_test_split(X, y, test_size=0.2, shuffle=False)
# Open model from model_maker
model = model_3((5, 8, 20, 6))
print(model.summary())
# Fit model, and extract training & validation metrics
history = model.fit(X_train, y_train,
validation_data=(X_test, y_test),
batch_size=5,
epochs=30,
verbose=2,
shuffle=False)
# Prediction
y_pred = model.predict(X_test)
# Data Visualization
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('MSE')
plt.legend(['Train', 'Test'], loc='upper right')
plt.show()
plt.plot(history.history['mean_absolute_error'])
plt.plot(history.history['val_mean_absolute_error'])
plt.title('MAE')
plt.legend(['Train', 'Test'], loc='upper right')
plt.show()
plt.plot(history.history['mean_absolute_percentage_error'])
plt.plot(history.history['val_mean_absolute_percentage_error'])
plt.title('MAPE')
plt.legend(['Train', 'Test'], loc='upper right')
plt.show()
|
normal
|
{
"blob_id": "011dd579bb076ec094e9e3085aa321883c484f1c",
"index": 5296,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(model.summary())\n<mask token>\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('MSE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\nplt.plot(history.history['mean_absolute_error'])\nplt.plot(history.history['val_mean_absolute_error'])\nplt.title('MAE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\nplt.plot(history.history['mean_absolute_percentage_error'])\nplt.plot(history.history['val_mean_absolute_percentage_error'])\nplt.title('MAPE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\n",
"step-3": "<mask token>\nX, y = dataset_maker(window=5, forecast_day=1)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,\n shuffle=False)\nmodel = model_3((5, 8, 20, 6))\nprint(model.summary())\nhistory = model.fit(X_train, y_train, validation_data=(X_test, y_test),\n batch_size=5, epochs=30, verbose=2, shuffle=False)\ny_pred = model.predict(X_test)\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('MSE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\nplt.plot(history.history['mean_absolute_error'])\nplt.plot(history.history['val_mean_absolute_error'])\nplt.title('MAE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\nplt.plot(history.history['mean_absolute_percentage_error'])\nplt.plot(history.history['val_mean_absolute_percentage_error'])\nplt.title('MAPE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\n",
"step-4": "import matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom WeatherDL.data_maker import dataset_maker\nfrom WeatherDL.model_maker import model_3\nX, y = dataset_maker(window=5, forecast_day=1)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,\n shuffle=False)\nmodel = model_3((5, 8, 20, 6))\nprint(model.summary())\nhistory = model.fit(X_train, y_train, validation_data=(X_test, y_test),\n batch_size=5, epochs=30, verbose=2, shuffle=False)\ny_pred = model.predict(X_test)\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('MSE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\nplt.plot(history.history['mean_absolute_error'])\nplt.plot(history.history['val_mean_absolute_error'])\nplt.title('MAE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\nplt.plot(history.history['mean_absolute_percentage_error'])\nplt.plot(history.history['val_mean_absolute_percentage_error'])\nplt.title('MAPE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\n",
"step-5": "import matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\n\nfrom WeatherDL.data_maker import dataset_maker\nfrom WeatherDL.model_maker import model_3\n\n# Extract data from data_maker\nX, y = dataset_maker(window=5, forecast_day=1)\n(X_train, X_test, y_train, y_test) = train_test_split(X, y, test_size=0.2, shuffle=False)\n\n# Open model from model_maker\nmodel = model_3((5, 8, 20, 6))\nprint(model.summary())\n\n# Fit model, and extract training & validation metrics\nhistory = model.fit(X_train, y_train,\n validation_data=(X_test, y_test),\n batch_size=5,\n epochs=30,\n verbose=2,\n shuffle=False)\n\n# Prediction\ny_pred = model.predict(X_test)\n\n# Data Visualization\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('MSE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\nplt.plot(history.history['mean_absolute_error'])\nplt.plot(history.history['val_mean_absolute_error'])\nplt.title('MAE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\nplt.plot(history.history['mean_absolute_percentage_error'])\nplt.plot(history.history['val_mean_absolute_percentage_error'])\nplt.title('MAPE')\nplt.legend(['Train', 'Test'], loc='upper right')\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
""" mupub module.
"""
__all__ = (
'__title__', '__summary__', '__version__',
'__author__', '__license__', '__copyright__',
)
__title__ = 'mupub'
__summary__ = 'Musical score publishing utility for the Mutopia Project'
"""Versioning:
This utility follows a MAJOR . MINOR . EDIT format. Upon a major
release, the MAJOR number is incremented and the MINOR is zeroed.
During development of an upcoming release, the MINOR number may be
incremented.
"""
__version__ = '1.0.8'
__author__ = 'Glen Larsen, Chris Sawer'
__author_email__= '[email protected]'
__uri__ = 'http://mutopiaproject.org/'
__license__ = 'MIT'
__copyright__ = 'Copyright 2018 The Mutopia Project'
from .assets import collect_assets
from .commands.build import build
from .commands.check import check
from .commands.init import init
from .commands.tag import tag
from .commands.clean import clean
from .config import CONFIG_DICT, CONFIG_DIR, getDBPath
from .config import test_config, saveConfig
from .core import MUTOPIA_BASE, FTP_BASE, URL_BASE
from .core import id_from_footer
from .exceptions import BadConfiguration, IncompleteBuild, TagProcessException
from .header import Loader, LYLoader, VersionLoader
from .header import RawLoader, Header, REQUIRED_FIELDS
from .header import find_header
from .lily import LyLocator, LyVersion
from .validate import Validator, DBValidator, in_repository
from .tagedit import tag_header, tag_file
from .rdfu import NS, MuRDF
from .utils import resolve_input,resolve_lysfile
|
normal
|
{
"blob_id": "eabf06481509962652812af67ad59da5cfe30fae",
"index": 1,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n__all__ = ('__title__', '__summary__', '__version__', '__author__',\n '__license__', '__copyright__')\n__title__ = 'mupub'\n__summary__ = 'Musical score publishing utility for the Mutopia Project'\n<mask token>\n__version__ = '1.0.8'\n__author__ = 'Glen Larsen, Chris Sawer'\n__author_email__ = '[email protected]'\n__uri__ = 'http://mutopiaproject.org/'\n__license__ = 'MIT'\n__copyright__ = 'Copyright 2018 The Mutopia Project'\n<mask token>\n",
"step-3": "<mask token>\n__all__ = ('__title__', '__summary__', '__version__', '__author__',\n '__license__', '__copyright__')\n__title__ = 'mupub'\n__summary__ = 'Musical score publishing utility for the Mutopia Project'\n<mask token>\n__version__ = '1.0.8'\n__author__ = 'Glen Larsen, Chris Sawer'\n__author_email__ = '[email protected]'\n__uri__ = 'http://mutopiaproject.org/'\n__license__ = 'MIT'\n__copyright__ = 'Copyright 2018 The Mutopia Project'\nfrom .assets import collect_assets\nfrom .commands.build import build\nfrom .commands.check import check\nfrom .commands.init import init\nfrom .commands.tag import tag\nfrom .commands.clean import clean\nfrom .config import CONFIG_DICT, CONFIG_DIR, getDBPath\nfrom .config import test_config, saveConfig\nfrom .core import MUTOPIA_BASE, FTP_BASE, URL_BASE\nfrom .core import id_from_footer\nfrom .exceptions import BadConfiguration, IncompleteBuild, TagProcessException\nfrom .header import Loader, LYLoader, VersionLoader\nfrom .header import RawLoader, Header, REQUIRED_FIELDS\nfrom .header import find_header\nfrom .lily import LyLocator, LyVersion\nfrom .validate import Validator, DBValidator, in_repository\nfrom .tagedit import tag_header, tag_file\nfrom .rdfu import NS, MuRDF\nfrom .utils import resolve_input, resolve_lysfile\n",
"step-4": "\"\"\" mupub module.\n\"\"\"\n\n__all__ = (\n '__title__', '__summary__', '__version__',\n '__author__', '__license__', '__copyright__',\n)\n\n\n__title__ = 'mupub'\n__summary__ = 'Musical score publishing utility for the Mutopia Project'\n\n\"\"\"Versioning:\nThis utility follows a MAJOR . MINOR . EDIT format. Upon a major\nrelease, the MAJOR number is incremented and the MINOR is zeroed.\nDuring development of an upcoming release, the MINOR number may be\nincremented.\n\n\"\"\"\n__version__ = '1.0.8'\n\n__author__ = 'Glen Larsen, Chris Sawer'\n__author_email__= '[email protected]'\n__uri__ = 'http://mutopiaproject.org/'\n\n__license__ = 'MIT'\n__copyright__ = 'Copyright 2018 The Mutopia Project'\n\nfrom .assets import collect_assets\nfrom .commands.build import build\nfrom .commands.check import check\nfrom .commands.init import init\nfrom .commands.tag import tag\nfrom .commands.clean import clean\nfrom .config import CONFIG_DICT, CONFIG_DIR, getDBPath\nfrom .config import test_config, saveConfig\nfrom .core import MUTOPIA_BASE, FTP_BASE, URL_BASE\nfrom .core import id_from_footer\nfrom .exceptions import BadConfiguration, IncompleteBuild, TagProcessException\nfrom .header import Loader, LYLoader, VersionLoader\nfrom .header import RawLoader, Header, REQUIRED_FIELDS\nfrom .header import find_header\nfrom .lily import LyLocator, LyVersion\nfrom .validate import Validator, DBValidator, in_repository\nfrom .tagedit import tag_header, tag_file\nfrom .rdfu import NS, MuRDF\nfrom .utils import resolve_input,resolve_lysfile\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print('Boolean Exercise')
print(False or False)
print(False and False)
print(not True or not False)
|
flexible
|
{
"blob_id": "2385882f040ef4bd0a3611bebfbb2ae5b3cd1dc6",
"index": 4204,
"step-1": "<mask token>\n",
"step-2": "print('Boolean Exercise')\nprint(False or False)\nprint(False and False)\nprint(not True or not False)\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
def globalVariableCheck(debug=False):
for liquor in liquorLookup:
if liquor in noGrapeLookup:
print(
'WARNING:liquorLookup regexs will never execute - they are in noGrapeLookup:'
, liquor)
if liquor in ignoreGrapeLookup:
print(
'WARNING:liquorLookup regexs will never execute - they are in ignoreGrapeLookup:'
, liquor)
for winery in ignoreGrapeLookup:
if winery in noGrapeLookup:
print(
'WARNING:ignoreGrapeLookup regexs will never execute - they are in noGrapeLookup:'
, winery)
def setOptionDictMasterFldValues(optiondict, debug=False):
for fld in ('fldWine', 'fldWineDescr'):
if not optiondict[fld + 'Master']:
optiondict[fld + 'Master'] = optiondict[fld]
<|reserved_special_token_0|>
def findWinery(rec, lastWinery, lastReWinery, fldWine, debug=False):
if lastWinery:
if debug:
try:
print('fw:new winery:', rec[fldWine])
except Exception as e:
print('debug error8-continuing:', str(e))
print('rec[fldWine]:type:', type(rec[fldWine]))
print('fw:checking if this is lastWinery:', lastWinery)
if lastReWinery.search(rec[fldWine]):
if debug:
print('fw:this matches the last winery')
return lastWinery, lastReWinery
elif debug:
print('fw:not last winery')
for winery, reWinery in wineryLookup:
if debug:
print('fw:not lastWinery-checking winery:', winery)
if fldWine not in rec:
print('not a column in this record fldWine:', fldWine)
print('rec:', rec)
if reWinery.search(rec[fldWine]):
if debug:
print('fw:winery match found:', winery)
return winery, reWinery
return None, None
<|reserved_special_token_0|>
def findStrInRecReturnOther(rec, fldWineDescr, findStr, debug=False):
matchLoc = rec[fldWineDescr].find(findStr)
if matchLoc > -1:
other = rec[fldWineDescr][matchLoc + len(findStr) + 1:].split()
if debug:
print('fsirro:findStr matched:', findStr)
if debug:
print('fsirro:findStr other:', other)
return findStr, other
if debug:
print('fsirro:findStr did not match using:', findStr)
return None, []
<|reserved_special_token_0|>
def findVintage(rec, fldWine, debug=False):
for reVintage in vintageLookup:
m = reVintage.search(rec[fldWine])
if m:
if m.group(1):
vintage = m.group(1)
if debug:
print('fv:vintage-match:', reVintage, ':group1')
elif m.group(2):
vintage = m.group(2)
if debug:
print('fv:vintage-match:', reVintage, ':group2')
elif m.group(3):
vintage = m.group(3)
if debug:
print('fv:vintage-match:', reVintage, ':group3')
else:
vintage = m.group(4)
if debug:
print('fv:vintage-match:', reVintage, ':group4')
return vintage
return None
def buildWineryGrapeLookup(wines, fldWineDescr='winedescr', fldWine='wine',
debug=False):
wgLookup = {}
lastWinery = None
lastReWinery = None
for rec in wines:
if debug:
print('bwgl:new rec:', rec[fldWineDescr])
if not fldWineDescr in rec:
print('creating-field:', fldWineDescr)
rec[fldWineDescr] = ''
winery = grape = wine = liquor = None
other = []
lastWinery, lastReWinery = winery, reWinery = findWinery(rec,
lastWinery, lastReWinery, fldWine, debug=debug)
if not winery:
if debug:
print('bwgl:did not find winery-skipping:', rec[fldWine])
continue
if winery in ignoreGrapeLookup:
wine = ''
if debug:
print('bwgl:wine check ignoreGrapeLookup on winery:', winery)
elif winery in noGrapeLookup:
if debug:
print('bwgl:wine check noGrapeLookup on winery:', winery)
wine = wineLookupByName(noGrapeLookup[winery], rec[fldWineDescr
], [], 'noGrapeLookup', debug=debug)
if False and wine == '':
if debug:
print('bwgl:nograpelookup:no-match:set wine to None')
wine = None
elif winery in liquorLookup:
if debug:
print('bwgl:liquor check on winery:', winery)
liquor, reLiquor = findLiquor(rec, winery, fldWine, debug=debug)
if liquor is not None:
wine = liquor
if debug:
print('bwgl:liquor found and put in wine:', wine)
if wine is None:
if debug:
print('bwgl:grape check because wine is None')
grape, other = findGrapeByStr(rec, fldWineDescr)
if debug:
print('bwgl:grape:', grape, ':other:', other)
elif debug:
print('bwgl:grape check skipped - we have a wine')
if wine is None and grape is None:
if debug:
print('bwgl:record skipped - no grape or wine defined')
continue
if grape is None:
if debug:
print('bwgl:build other from winery')
wineryFind, other = findStrInRecReturnOther(rec, fldWineDescr,
winery, debug=debug)
if 'case' in other:
other.remove('case')
if debug:
print('bwgl:remove case from other')
if other:
if debug:
print('bwgl:looking at other for quals, bottlesize and vintage'
)
if not other[-1].isdigit():
for qual, reQual in reQualLookup:
if qual == other[-1]:
if debug:
print('bwgl:remove qualifier from other:', qual)
del other[-1]
break
if other and not other[-1].isdigit():
for size, reSize in sizeLookup:
if size == other[-1]:
if debug:
print('bwgl:remove bottlesize from other:', size)
del other[-1]
break
if other and other[-1].isdigit():
if winery in ignoreGrapeLookup and ignoreGrapeLookup[winery
] and other[-1] in ignoreGrapeLookup[winery]:
if debug:
print(
'bwgl:value is in ignoreLookupGrape - keeping it:',
other[-1])
else:
if debug:
print('bwgl:remove vintage from other:', other[-1])
del other[-1]
if wine and wine in other:
other.remove(wine)
if debug:
print('bwgl:remove wine from other:', wine)
if debug:
try:
print('bwgl:Final-Build:', winery, ':', grape, ':', wine,
':', liquor, ':', other, ':', rec[fldWineDescr], ':',
rec[fldWine])
except Exception as e:
print('debug error2-continuing:', str(e))
print('fldWine:', fldWine)
if grape is None and wine is not None:
grape = wine
if debug:
print('bwgl:set-grape-to-wine:', grape)
if debug:
print('bwgl:create wgLookup for winery:', winery, ':grape:', grape)
if winery not in wgLookup:
wgLookup[winery] = {grape: []}
elif grape not in wgLookup[winery]:
wgLookup[winery][grape] = []
if other and other not in wgLookup[winery][grape]:
wgLookup[winery][grape].append(other)
if debug:
print('bwgl:appending to wgLookup:other:', other)
if debug:
print('bwgl:complete-read-of-master-file:sort wgLookup')
for winery in wgLookup:
for grape in wgLookup[winery]:
wgLookup[winery][grape] = sorted(wgLookup[winery][grape], key=
len, reverse=True)
if debug:
print('\n' * 5)
print('START WGLOOKUP DUMPED')
print('#' * 80)
if ppFlag:
pp.pprint(wgLookup)
else:
print('bwgl:final-wgLookup:\n', wgLookup)
print('#' * 80)
return wgLookup
def findAddAttribWgLookup(rec, winery, value, fldWine, AbbrLookup=[],
defaultorderlist=None, valueDescr='', debug=False):
singlematch = []
if debug:
try:
print('faawl:value:', valueDescr, ':match-wgLookup:', rec[
fldWine], ':', wgLookup[winery][value])
except Exception as e:
print('debug error7-continuing:', str(e))
print('fldWine:', fldWine)
for valuematchset in wgLookup[winery][value]:
if debug:
print('faawl:testing valuematchset:', valuematchset, ':length:',
len(valuematchset))
allmatch = True
for valuematch in valuematchset:
reMatch1 = re.compile('\\b' + valuematch + '\\b', re.IGNORECASE)
reMatch2 = re.compile('\\s' + valuematch + '\\s', re.IGNORECASE)
m1 = reMatch1.search(rec[fldWine])
m2 = reMatch2.search(rec[fldWine])
if m1 or m2:
allmatch = True and allmatch
elif valuematch in AbbrLookup:
if debug:
print('faawl:valuematch-abbr:', valuematch, ':',
wineAbbrLookup[valuematch])
reMatch = re.compile(wineAbbrLookup[valuematch], re.IGNORECASE)
allmatch = reMatch.search(rec[fldWine]) and allmatch
else:
allmatch = False and allmatch
if debug:
print('faawl:valuematch:', valuematch, ':allmatch:', allmatch)
if allmatch:
if debug:
print('faawl:value matched:', valuematchset)
if len(valuematchset) == 1:
if debug:
print('faawl:single-valuematch-set-added-to-singlematch:',
valuematchset)
singlematch.append(valuematchset)
else:
if debug:
print('faawl:multivalue-valuematch-set-found:done')
return valuematchset
if not singlematch:
if debug:
print('faawl:exit with singlematch NOT populated return blank')
return []
if debug:
print('faawl:exit with singlematch populated:', singlematch)
if len(singlematch) == 1 or not defaultorderlist:
if debug:
print('faawl:return first entry in singlematch:', singlematch[0])
return singlematch[0]
defaultorder = defaultorderlist[:]
if debug:
print('faawl:multiple single match value-singlematch:', singlematch)
for val in singlematch[::-1]:
if val not in defaultorder:
defaultorder.insert(0, val)
if winery == 'Mondavi' and ['Tok'] in singlematch:
if debug:
print('faawl:Change from:', valuematchset, ':to Tok for mondavi')
return ['Tok']
for val in defaultorder:
if val in singlematch:
if debug:
print('faawl:selected-singlematch-value:', val)
return val
if debug:
print('faawl:valuematchset-empty')
return []
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def globalVariableCheck(debug=False):
for liquor in liquorLookup:
if liquor in noGrapeLookup:
print(
'WARNING:liquorLookup regexs will never execute - they are in noGrapeLookup:'
, liquor)
if liquor in ignoreGrapeLookup:
print(
'WARNING:liquorLookup regexs will never execute - they are in ignoreGrapeLookup:'
, liquor)
for winery in ignoreGrapeLookup:
if winery in noGrapeLookup:
print(
'WARNING:ignoreGrapeLookup regexs will never execute - they are in noGrapeLookup:'
, winery)
def setOptionDictMasterFldValues(optiondict, debug=False):
for fld in ('fldWine', 'fldWineDescr'):
if not optiondict[fld + 'Master']:
optiondict[fld + 'Master'] = optiondict[fld]
<|reserved_special_token_0|>
def findQualifier(wine, debug=False):
for val, reSearch in reQualLookup:
if reSearch.search(wine):
if debug:
print('findQualifier:matched-returning:', val)
return val
if debug:
print('findQualifier:no-match-returning:', None)
return None
def findWinery(rec, lastWinery, lastReWinery, fldWine, debug=False):
if lastWinery:
if debug:
try:
print('fw:new winery:', rec[fldWine])
except Exception as e:
print('debug error8-continuing:', str(e))
print('rec[fldWine]:type:', type(rec[fldWine]))
print('fw:checking if this is lastWinery:', lastWinery)
if lastReWinery.search(rec[fldWine]):
if debug:
print('fw:this matches the last winery')
return lastWinery, lastReWinery
elif debug:
print('fw:not last winery')
for winery, reWinery in wineryLookup:
if debug:
print('fw:not lastWinery-checking winery:', winery)
if fldWine not in rec:
print('not a column in this record fldWine:', fldWine)
print('rec:', rec)
if reWinery.search(rec[fldWine]):
if debug:
print('fw:winery match found:', winery)
return winery, reWinery
return None, None
def findLiquor(rec, winery, fldWine, debug=False):
for liquor, reLiquor in liquorLookup[winery]:
if debug:
print('fl:checking liquor:', liquor)
if reLiquor.search(rec[fldWine]):
if debug:
print('fl:liquor match found:', liquor)
return liquor, reLiquor
return None, None
def findGrapeByRegex(rec, fldWine, debug=False):
for grape, reGrape in grapeLookup:
if debug:
print('fgbr:grape:', grape)
if grape is not None and reGrape.search(rec[fldWine]):
if debug:
print('fgbr:grape match found:', grape)
return grape, reGrape
return None, None
def findStrInRecReturnOther(rec, fldWineDescr, findStr, debug=False):
matchLoc = rec[fldWineDescr].find(findStr)
if matchLoc > -1:
other = rec[fldWineDescr][matchLoc + len(findStr) + 1:].split()
if debug:
print('fsirro:findStr matched:', findStr)
if debug:
print('fsirro:findStr other:', other)
return findStr, other
if debug:
print('fsirro:findStr did not match using:', findStr)
return None, []
def findGrapeByStr(rec, fldWineDescr, debug=False):
for grape, reGrape in grapeLookup:
if debug:
print('fg:grape:', grape)
grape, other = findStrInRecReturnOther(rec, fldWineDescr, grape,
debug=debug)
if grape:
return grape, other
return None, []
def findVintage(rec, fldWine, debug=False):
for reVintage in vintageLookup:
m = reVintage.search(rec[fldWine])
if m:
if m.group(1):
vintage = m.group(1)
if debug:
print('fv:vintage-match:', reVintage, ':group1')
elif m.group(2):
vintage = m.group(2)
if debug:
print('fv:vintage-match:', reVintage, ':group2')
elif m.group(3):
vintage = m.group(3)
if debug:
print('fv:vintage-match:', reVintage, ':group3')
else:
vintage = m.group(4)
if debug:
print('fv:vintage-match:', reVintage, ':group4')
return vintage
return None
def buildWineryGrapeLookup(wines, fldWineDescr='winedescr', fldWine='wine',
debug=False):
wgLookup = {}
lastWinery = None
lastReWinery = None
for rec in wines:
if debug:
print('bwgl:new rec:', rec[fldWineDescr])
if not fldWineDescr in rec:
print('creating-field:', fldWineDescr)
rec[fldWineDescr] = ''
winery = grape = wine = liquor = None
other = []
lastWinery, lastReWinery = winery, reWinery = findWinery(rec,
lastWinery, lastReWinery, fldWine, debug=debug)
if not winery:
if debug:
print('bwgl:did not find winery-skipping:', rec[fldWine])
continue
if winery in ignoreGrapeLookup:
wine = ''
if debug:
print('bwgl:wine check ignoreGrapeLookup on winery:', winery)
elif winery in noGrapeLookup:
if debug:
print('bwgl:wine check noGrapeLookup on winery:', winery)
wine = wineLookupByName(noGrapeLookup[winery], rec[fldWineDescr
], [], 'noGrapeLookup', debug=debug)
if False and wine == '':
if debug:
print('bwgl:nograpelookup:no-match:set wine to None')
wine = None
elif winery in liquorLookup:
if debug:
print('bwgl:liquor check on winery:', winery)
liquor, reLiquor = findLiquor(rec, winery, fldWine, debug=debug)
if liquor is not None:
wine = liquor
if debug:
print('bwgl:liquor found and put in wine:', wine)
if wine is None:
if debug:
print('bwgl:grape check because wine is None')
grape, other = findGrapeByStr(rec, fldWineDescr)
if debug:
print('bwgl:grape:', grape, ':other:', other)
elif debug:
print('bwgl:grape check skipped - we have a wine')
if wine is None and grape is None:
if debug:
print('bwgl:record skipped - no grape or wine defined')
continue
if grape is None:
if debug:
print('bwgl:build other from winery')
wineryFind, other = findStrInRecReturnOther(rec, fldWineDescr,
winery, debug=debug)
if 'case' in other:
other.remove('case')
if debug:
print('bwgl:remove case from other')
if other:
if debug:
print('bwgl:looking at other for quals, bottlesize and vintage'
)
if not other[-1].isdigit():
for qual, reQual in reQualLookup:
if qual == other[-1]:
if debug:
print('bwgl:remove qualifier from other:', qual)
del other[-1]
break
if other and not other[-1].isdigit():
for size, reSize in sizeLookup:
if size == other[-1]:
if debug:
print('bwgl:remove bottlesize from other:', size)
del other[-1]
break
if other and other[-1].isdigit():
if winery in ignoreGrapeLookup and ignoreGrapeLookup[winery
] and other[-1] in ignoreGrapeLookup[winery]:
if debug:
print(
'bwgl:value is in ignoreLookupGrape - keeping it:',
other[-1])
else:
if debug:
print('bwgl:remove vintage from other:', other[-1])
del other[-1]
if wine and wine in other:
other.remove(wine)
if debug:
print('bwgl:remove wine from other:', wine)
if debug:
try:
print('bwgl:Final-Build:', winery, ':', grape, ':', wine,
':', liquor, ':', other, ':', rec[fldWineDescr], ':',
rec[fldWine])
except Exception as e:
print('debug error2-continuing:', str(e))
print('fldWine:', fldWine)
if grape is None and wine is not None:
grape = wine
if debug:
print('bwgl:set-grape-to-wine:', grape)
if debug:
print('bwgl:create wgLookup for winery:', winery, ':grape:', grape)
if winery not in wgLookup:
wgLookup[winery] = {grape: []}
elif grape not in wgLookup[winery]:
wgLookup[winery][grape] = []
if other and other not in wgLookup[winery][grape]:
wgLookup[winery][grape].append(other)
if debug:
print('bwgl:appending to wgLookup:other:', other)
if debug:
print('bwgl:complete-read-of-master-file:sort wgLookup')
for winery in wgLookup:
for grape in wgLookup[winery]:
wgLookup[winery][grape] = sorted(wgLookup[winery][grape], key=
len, reverse=True)
if debug:
print('\n' * 5)
print('START WGLOOKUP DUMPED')
print('#' * 80)
if ppFlag:
pp.pprint(wgLookup)
else:
print('bwgl:final-wgLookup:\n', wgLookup)
print('#' * 80)
return wgLookup
def findAddAttribWgLookup(rec, winery, value, fldWine, AbbrLookup=[],
defaultorderlist=None, valueDescr='', debug=False):
singlematch = []
if debug:
try:
print('faawl:value:', valueDescr, ':match-wgLookup:', rec[
fldWine], ':', wgLookup[winery][value])
except Exception as e:
print('debug error7-continuing:', str(e))
print('fldWine:', fldWine)
for valuematchset in wgLookup[winery][value]:
if debug:
print('faawl:testing valuematchset:', valuematchset, ':length:',
len(valuematchset))
allmatch = True
for valuematch in valuematchset:
reMatch1 = re.compile('\\b' + valuematch + '\\b', re.IGNORECASE)
reMatch2 = re.compile('\\s' + valuematch + '\\s', re.IGNORECASE)
m1 = reMatch1.search(rec[fldWine])
m2 = reMatch2.search(rec[fldWine])
if m1 or m2:
allmatch = True and allmatch
elif valuematch in AbbrLookup:
if debug:
print('faawl:valuematch-abbr:', valuematch, ':',
wineAbbrLookup[valuematch])
reMatch = re.compile(wineAbbrLookup[valuematch], re.IGNORECASE)
allmatch = reMatch.search(rec[fldWine]) and allmatch
else:
allmatch = False and allmatch
if debug:
print('faawl:valuematch:', valuematch, ':allmatch:', allmatch)
if allmatch:
if debug:
print('faawl:value matched:', valuematchset)
if len(valuematchset) == 1:
if debug:
print('faawl:single-valuematch-set-added-to-singlematch:',
valuematchset)
singlematch.append(valuematchset)
else:
if debug:
print('faawl:multivalue-valuematch-set-found:done')
return valuematchset
if not singlematch:
if debug:
print('faawl:exit with singlematch NOT populated return blank')
return []
if debug:
print('faawl:exit with singlematch populated:', singlematch)
if len(singlematch) == 1 or not defaultorderlist:
if debug:
print('faawl:return first entry in singlematch:', singlematch[0])
return singlematch[0]
defaultorder = defaultorderlist[:]
if debug:
print('faawl:multiple single match value-singlematch:', singlematch)
for val in singlematch[::-1]:
if val not in defaultorder:
defaultorder.insert(0, val)
if winery == 'Mondavi' and ['Tok'] in singlematch:
if debug:
print('faawl:Change from:', valuematchset, ':to Tok for mondavi')
return ['Tok']
for val in defaultorder:
if val in singlematch:
if debug:
print('faawl:selected-singlematch-value:', val)
return val
if debug:
print('faawl:valuematchset-empty')
return []
<|reserved_special_token_0|>
def setDigitFld2Value(wines, fld, value, debug=False):
for rec in wines:
if rec[fld].isdigit():
rec[fld] = value
def updateFileOptionDictCheck(optiondict, wines, header, debug=False):
if optiondict['fldWineDescr'] not in wines[0]:
if debug:
print('updateFileOptionDictCheck:fldWineDescr NOT in file read in:'
, optiondict['fldWineDescr'])
if 'cnt' in wines[0]:
print('setting values fldWineDescr and fldWineDescrNew to: cnt')
optiondict['fldWineDescr'] = optiondict['fldWineDescrNew'] = 'cnt'
elif 'winedescr' in wines[0]:
print(
'setting values fldWineDescr to winedescr and fldWineDescrNew to winedescrnew'
)
optiondict['fldWineDescr'] = 'winedescr'
optiondict['fldWineDescrNew'] = 'winedescrnew'
else:
print('could not find fldWineDescr in wines[0]-aborting:',
optiondict['fldWineDescr'], '\nwines[0]:', wines[0])
error = wines[0][optiondict['fldWineDescr']]
if False and optiondict['fldWineDescr'] == 'winedescr':
if not optiondict['fldWineDescrMatch']:
optiondict['fldWineDescrMatch'] = 'same'
print('setting value fldWineDescrMatch to: same')
if optiondict['csvfile_update_in'] == optiondict['csvfile_update_out']:
file_path, base_filename, file_ext = kvutil.filename_split(optiondict
['csvfile_update_in'])
backupfile = kvutil.filename_proper(base_filename + optiondict[
'backupfile_ext'], file_path)
print('copying ', optiondict['csvfile_update_in'], ' to ', backupfile)
shutil.copyfile(optiondict['csvfile_update_in'], backupfile)
if optiondict['fldWineDescrNew'] == 'cnt':
optiondict['csvdictkeys'] = ['cnt', 'date', 'search', 'store',
'wine', 'winesrt']
elif optiondict['fldWineDescrMatch']:
optiondict['csvdictkeys'] = [optiondict['fldWineDescr'], optiondict
['fldWineDescrNew'], optiondict['fldWineDescrMatch'], *header]
else:
optiondict['csvdictkeys'] = [optiondict['fldWineDescrNew']] + header[1:
]
print('updateFileOptionDictCheck:set csvdictkeys to:', optiondict[
'csvdictkeys'])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def globalVariableCheck(debug=False):
for liquor in liquorLookup:
if liquor in noGrapeLookup:
print(
'WARNING:liquorLookup regexs will never execute - they are in noGrapeLookup:'
, liquor)
if liquor in ignoreGrapeLookup:
print(
'WARNING:liquorLookup regexs will never execute - they are in ignoreGrapeLookup:'
, liquor)
for winery in ignoreGrapeLookup:
if winery in noGrapeLookup:
print(
'WARNING:ignoreGrapeLookup regexs will never execute - they are in noGrapeLookup:'
, winery)
def setOptionDictMasterFldValues(optiondict, debug=False):
for fld in ('fldWine', 'fldWineDescr'):
if not optiondict[fld + 'Master']:
optiondict[fld + 'Master'] = optiondict[fld]
def wineLookupByName(nameLookup, lookupStr, other, msg, wineAbbrLookup=None,
debug=False):
funcname = 'wineLookupByName:' + msg + ':'
if debug:
print(funcname + 'nameLookup:', nameLookup)
if nameLookup is None:
if debug:
print(funcname + 'match: value is none - continue on')
return ''
for name in nameLookup:
if debug:
print(funcname + 'match-name:', name)
if name is None:
if debug:
print(funcname +
'name-matched: value is none - continue on:pass back blank'
)
return ''
reName = re.compile('\\b' + name + '\\b', re.IGNORECASE)
if reName.search(lookupStr):
if debug:
print(funcname + 'name-MATCHED:', name)
for val in other:
if reName.search(val):
other.remove(val)
if debug:
print(funcname + 'name-remove-from-other:', val)
return name
if wineAbbrLookup and name in wineAbbrLookup:
reName = re.compile(wineAbbrLookup[name], re.IGNORECASE)
if debug:
print(funcname + 'Abbr-match-name:', name)
if reName.search(lookupStr):
if debug:
print(funcname + 'Abbr-name-MATCHED:', wineAbbrLookup[name]
)
for val in other:
if reName.search(val):
other.remove(val)
if debug:
print(funcname + 'name-remove-from-other:', val)
return name
if debug:
print(funcname + 'name match not found:set to blank')
return None
def findQualifier(wine, debug=False):
for val, reSearch in reQualLookup:
if reSearch.search(wine):
if debug:
print('findQualifier:matched-returning:', val)
return val
if debug:
print('findQualifier:no-match-returning:', None)
return None
def findWinery(rec, lastWinery, lastReWinery, fldWine, debug=False):
if lastWinery:
if debug:
try:
print('fw:new winery:', rec[fldWine])
except Exception as e:
print('debug error8-continuing:', str(e))
print('rec[fldWine]:type:', type(rec[fldWine]))
print('fw:checking if this is lastWinery:', lastWinery)
if lastReWinery.search(rec[fldWine]):
if debug:
print('fw:this matches the last winery')
return lastWinery, lastReWinery
elif debug:
print('fw:not last winery')
for winery, reWinery in wineryLookup:
if debug:
print('fw:not lastWinery-checking winery:', winery)
if fldWine not in rec:
print('not a column in this record fldWine:', fldWine)
print('rec:', rec)
if reWinery.search(rec[fldWine]):
if debug:
print('fw:winery match found:', winery)
return winery, reWinery
return None, None
def findLiquor(rec, winery, fldWine, debug=False):
for liquor, reLiquor in liquorLookup[winery]:
if debug:
print('fl:checking liquor:', liquor)
if reLiquor.search(rec[fldWine]):
if debug:
print('fl:liquor match found:', liquor)
return liquor, reLiquor
return None, None
def findGrapeByRegex(rec, fldWine, debug=False):
for grape, reGrape in grapeLookup:
if debug:
print('fgbr:grape:', grape)
if grape is not None and reGrape.search(rec[fldWine]):
if debug:
print('fgbr:grape match found:', grape)
return grape, reGrape
return None, None
def findStrInRecReturnOther(rec, fldWineDescr, findStr, debug=False):
matchLoc = rec[fldWineDescr].find(findStr)
if matchLoc > -1:
other = rec[fldWineDescr][matchLoc + len(findStr) + 1:].split()
if debug:
print('fsirro:findStr matched:', findStr)
if debug:
print('fsirro:findStr other:', other)
return findStr, other
if debug:
print('fsirro:findStr did not match using:', findStr)
return None, []
def findGrapeByStr(rec, fldWineDescr, debug=False):
for grape, reGrape in grapeLookup:
if debug:
print('fg:grape:', grape)
grape, other = findStrInRecReturnOther(rec, fldWineDescr, grape,
debug=debug)
if grape:
return grape, other
return None, []
def findVintage(rec, fldWine, debug=False):
for reVintage in vintageLookup:
m = reVintage.search(rec[fldWine])
if m:
if m.group(1):
vintage = m.group(1)
if debug:
print('fv:vintage-match:', reVintage, ':group1')
elif m.group(2):
vintage = m.group(2)
if debug:
print('fv:vintage-match:', reVintage, ':group2')
elif m.group(3):
vintage = m.group(3)
if debug:
print('fv:vintage-match:', reVintage, ':group3')
else:
vintage = m.group(4)
if debug:
print('fv:vintage-match:', reVintage, ':group4')
return vintage
return None
def buildWineryGrapeLookup(wines, fldWineDescr='winedescr', fldWine='wine',
debug=False):
wgLookup = {}
lastWinery = None
lastReWinery = None
for rec in wines:
if debug:
print('bwgl:new rec:', rec[fldWineDescr])
if not fldWineDescr in rec:
print('creating-field:', fldWineDescr)
rec[fldWineDescr] = ''
winery = grape = wine = liquor = None
other = []
lastWinery, lastReWinery = winery, reWinery = findWinery(rec,
lastWinery, lastReWinery, fldWine, debug=debug)
if not winery:
if debug:
print('bwgl:did not find winery-skipping:', rec[fldWine])
continue
if winery in ignoreGrapeLookup:
wine = ''
if debug:
print('bwgl:wine check ignoreGrapeLookup on winery:', winery)
elif winery in noGrapeLookup:
if debug:
print('bwgl:wine check noGrapeLookup on winery:', winery)
wine = wineLookupByName(noGrapeLookup[winery], rec[fldWineDescr
], [], 'noGrapeLookup', debug=debug)
if False and wine == '':
if debug:
print('bwgl:nograpelookup:no-match:set wine to None')
wine = None
elif winery in liquorLookup:
if debug:
print('bwgl:liquor check on winery:', winery)
liquor, reLiquor = findLiquor(rec, winery, fldWine, debug=debug)
if liquor is not None:
wine = liquor
if debug:
print('bwgl:liquor found and put in wine:', wine)
if wine is None:
if debug:
print('bwgl:grape check because wine is None')
grape, other = findGrapeByStr(rec, fldWineDescr)
if debug:
print('bwgl:grape:', grape, ':other:', other)
elif debug:
print('bwgl:grape check skipped - we have a wine')
if wine is None and grape is None:
if debug:
print('bwgl:record skipped - no grape or wine defined')
continue
if grape is None:
if debug:
print('bwgl:build other from winery')
wineryFind, other = findStrInRecReturnOther(rec, fldWineDescr,
winery, debug=debug)
if 'case' in other:
other.remove('case')
if debug:
print('bwgl:remove case from other')
if other:
if debug:
print('bwgl:looking at other for quals, bottlesize and vintage'
)
if not other[-1].isdigit():
for qual, reQual in reQualLookup:
if qual == other[-1]:
if debug:
print('bwgl:remove qualifier from other:', qual)
del other[-1]
break
if other and not other[-1].isdigit():
for size, reSize in sizeLookup:
if size == other[-1]:
if debug:
print('bwgl:remove bottlesize from other:', size)
del other[-1]
break
if other and other[-1].isdigit():
if winery in ignoreGrapeLookup and ignoreGrapeLookup[winery
] and other[-1] in ignoreGrapeLookup[winery]:
if debug:
print(
'bwgl:value is in ignoreLookupGrape - keeping it:',
other[-1])
else:
if debug:
print('bwgl:remove vintage from other:', other[-1])
del other[-1]
if wine and wine in other:
other.remove(wine)
if debug:
print('bwgl:remove wine from other:', wine)
if debug:
try:
print('bwgl:Final-Build:', winery, ':', grape, ':', wine,
':', liquor, ':', other, ':', rec[fldWineDescr], ':',
rec[fldWine])
except Exception as e:
print('debug error2-continuing:', str(e))
print('fldWine:', fldWine)
if grape is None and wine is not None:
grape = wine
if debug:
print('bwgl:set-grape-to-wine:', grape)
if debug:
print('bwgl:create wgLookup for winery:', winery, ':grape:', grape)
if winery not in wgLookup:
wgLookup[winery] = {grape: []}
elif grape not in wgLookup[winery]:
wgLookup[winery][grape] = []
if other and other not in wgLookup[winery][grape]:
wgLookup[winery][grape].append(other)
if debug:
print('bwgl:appending to wgLookup:other:', other)
if debug:
print('bwgl:complete-read-of-master-file:sort wgLookup')
for winery in wgLookup:
for grape in wgLookup[winery]:
wgLookup[winery][grape] = sorted(wgLookup[winery][grape], key=
len, reverse=True)
if debug:
print('\n' * 5)
print('START WGLOOKUP DUMPED')
print('#' * 80)
if ppFlag:
pp.pprint(wgLookup)
else:
print('bwgl:final-wgLookup:\n', wgLookup)
print('#' * 80)
return wgLookup
def findAddAttribWgLookup(rec, winery, value, fldWine, AbbrLookup=[],
defaultorderlist=None, valueDescr='', debug=False):
singlematch = []
if debug:
try:
print('faawl:value:', valueDescr, ':match-wgLookup:', rec[
fldWine], ':', wgLookup[winery][value])
except Exception as e:
print('debug error7-continuing:', str(e))
print('fldWine:', fldWine)
for valuematchset in wgLookup[winery][value]:
if debug:
print('faawl:testing valuematchset:', valuematchset, ':length:',
len(valuematchset))
allmatch = True
for valuematch in valuematchset:
reMatch1 = re.compile('\\b' + valuematch + '\\b', re.IGNORECASE)
reMatch2 = re.compile('\\s' + valuematch + '\\s', re.IGNORECASE)
m1 = reMatch1.search(rec[fldWine])
m2 = reMatch2.search(rec[fldWine])
if m1 or m2:
allmatch = True and allmatch
elif valuematch in AbbrLookup:
if debug:
print('faawl:valuematch-abbr:', valuematch, ':',
wineAbbrLookup[valuematch])
reMatch = re.compile(wineAbbrLookup[valuematch], re.IGNORECASE)
allmatch = reMatch.search(rec[fldWine]) and allmatch
else:
allmatch = False and allmatch
if debug:
print('faawl:valuematch:', valuematch, ':allmatch:', allmatch)
if allmatch:
if debug:
print('faawl:value matched:', valuematchset)
if len(valuematchset) == 1:
if debug:
print('faawl:single-valuematch-set-added-to-singlematch:',
valuematchset)
singlematch.append(valuematchset)
else:
if debug:
print('faawl:multivalue-valuematch-set-found:done')
return valuematchset
if not singlematch:
if debug:
print('faawl:exit with singlematch NOT populated return blank')
return []
if debug:
print('faawl:exit with singlematch populated:', singlematch)
if len(singlematch) == 1 or not defaultorderlist:
if debug:
print('faawl:return first entry in singlematch:', singlematch[0])
return singlematch[0]
defaultorder = defaultorderlist[:]
if debug:
print('faawl:multiple single match value-singlematch:', singlematch)
for val in singlematch[::-1]:
if val not in defaultorder:
defaultorder.insert(0, val)
if winery == 'Mondavi' and ['Tok'] in singlematch:
if debug:
print('faawl:Change from:', valuematchset, ':to Tok for mondavi')
return ['Tok']
for val in defaultorder:
if val in singlematch:
if debug:
print('faawl:selected-singlematch-value:', val)
return val
if debug:
print('faawl:valuematchset-empty')
return []
def setWineryDescrFromWineryGrapeLookup(wgLookup, wines, fldWineDescr=
'winedescr', fldWine='wine', fldWineDescrNew='winedescrnew',
fldWineDescrMatch=False, debug=False):
if debug:
print('\n' * 10,
'START WINEDESCR SETTING HERE ---------------------------------------------'
)
for rec in wines:
(winery) = (grape) = (wine) = (vintage) = (case) = (size) = (liquor
) = (nongrape) = (qual) = None
winematchset = grapematchset = []
if debug:
try:
print('setWinery:fldWine:', rec[fldWine])
except Exception as e:
print('debug error2-continuing:', str(e))
print('fldWine:', fldWine)
if fldWineDescrNew not in rec:
rec[fldWineDescrNew] = rec[fldWineDescr]
winery, reWinery = findWinery(rec, None, None, fldWine, debug=debug)
if winery is None:
if debug:
print('setWinery:winery not found-next record:' + rec[fldWine])
continue
elif winery not in wgLookup:
if debug:
print('setWinery:winery not in wgLookup:', winery)
continue
grape, reGrape = findGrapeByRegex(rec, fldWine, debug=debug)
if debug:
print('setWinery:grape found:', grape)
if winery in ignoreGrapeLookup:
if debug:
print(
'setWinery:winery-match-ignoreGrape:clear-wine:set-grape-to-None:set-nongrape-True:winery:'
, winery)
wine = ''
grape = None
nongrape = True
if winery in noGrapeLookup:
if debug:
print('setWinery:noGrapeLookup wine check:', winery)
wine = wineLookupByName(noGrapeLookup[winery], rec[fldWine], [],
'noGrapeLookup', wineAbbrLookup, debug=debug)
if debug:
print('setWinery:nogrape check:wine:', wine)
if wine == '':
if debug:
print(
'setWinery:noGrapeLookup:matched:None::clear grape:set nongrape to True'
)
grape = None
wine = ''
nongrape = True
elif wine:
grape = None
if debug:
print(
'setWinery:nograpeLookup:wine found - clear grape field'
)
if wine is None and winery in liquorLookup:
if debug:
print('setWinery:liqourLookup:', winery)
liquor, reLiquor = findLiquor(rec, winery, fldWine, debug=debug)
if liquor is not None:
wine = liquor
if debug:
print('setWinery:liquorLookup-match:', liquor)
if not grape and not nongrape and not wine and liquor is None:
if debug:
print('setWinery:did not find grape-skipping record:', rec[
fldWineDescr])
continue
if debug:
print('setWinery:pre-vintage found values for wine/liquor:',
wine, ':grape:', grape)
vintage = findVintage(rec, fldWine, debug=debug)
if debug:
print('setWinery:vintage:', vintage)
if reCase.search(rec[fldWine]):
case = 'case'
for size, reSize in sizeLookup:
if debug:
print('setWinery:sizeLookup:', size)
if reSize.search(rec[fldWine]) and not reShipsAs.search(rec[
fldWine]):
if debug:
print('setWinery:sizeLookup:matched:', reSize)
break
else:
size = None
if debug:
print('setWinery:sizeLookup:None-found')
qual = findQualifier(rec[fldWine], debug=debug)
if debug:
try:
print('setWinery:FinalAttributes:', winery, ':', grape, ':',
wine, ':', liquor, ':', vintage, ':', case, ':', size,
':', qual, ':', rec[fldWine])
except Exception as e:
print('debug error5-continuing:', str(e))
print('fldWine:', fldWine)
if liquor is not None:
if debug:
print(
'setWinery:liquor flag set - no additional data needs to be collected'
)
elif wine is not None:
if debug:
print(
'setWinery:wine is not None - do additional lookups:wine:',
wine)
if wine in wgLookup[winery] and wgLookup[winery][wine]:
if debug:
print('setWinery:lookup winematchset')
winematchset = findAddAttribWgLookup(rec, winery, wine,
fldWine, wineAbbrLookup, None, valueDescr='wine', debug
=debug)
else:
print('setWinery:unable to perform wgLookup on winery:',
winery, ':wine:', wine, ':rec-wine:', rec[fldWine])
if debug:
try:
print('wgLookup[winery]:', wgLookup[winery])
except Exception as e:
print('debug error3-continuing:', str(e))
print('winery:', winery)
if debug:
print('setWinery:winematchset:', winematchset)
elif grape is not None:
if debug:
print('setWinery:grape is not None - do additional lookups:',
grape)
if grape in wgLookup[winery] and wgLookup[winery][grape]:
grapematchset = findAddAttribWgLookup(rec, winery, grape,
fldWine, wineAbbrLookup, defaultorderlist, valueDescr=
'grape', debug=debug)
elif grape in wgLookup[winery]:
if debug:
print(
'setWinery:grape match: matching record set is blank - no action required'
)
else:
print('setWinery:grape NONMATCH:', rec[fldWine])
if debug:
print('setWinery:liquor:', liquor, ':wine:', wine,
':grape:', grape, ':wgLookup[winery]:', wgLookup[
winery])
if debug:
print('setWinery:grapematchset:', grapematchset)
if vintage:
newVintageLookupWine = rec[fldWine]
for matchvalue in winematchset:
if vintage in matchvalue:
newVintageLookupWine = newVintageLookupWine.replace(
matchvalue, '')
if debug:
print(
'setWinery:2nd-vintage:winematchset:wine-name-removal:'
, matchvalue)
for matchvalue in grapematchset:
if vintage in matchvalue:
newVintageLookupWine = newVintageLookupWine.replace(
matchvalue, '')
if debug:
print(
'setWinery:2nd-vintage:grapematchset:wine-name-removal:'
, matchvalue)
if newVintageLookupWine != rec[fldWine]:
if debug:
print('setWinery:2nd-vintage:newVintageLookupWine:',
newVintageLookupWine)
newVintage = findVintage({fldWine: newVintageLookupWine},
fldWine, debug=debug)
if debug:
print('setWinery:2nd-vintage:newVintage:', newVintage)
vintage = newVintage
wineDescr = ''
if winery.startswith('z'):
vintage = None
if debug:
print('setWinery:winery starts with z: clear vintage')
if winematchset and ' '.join(winematchset) in wine:
if debug:
print('setWinery:clearing-winematchset:', winematchset,
':is-in-wine:', wine)
winematchset = []
if grapematchset and ' '.join(grapematchset) in grape:
if not (len(grapematchset) == 1 and len(grapematchset[0]) == 1):
if debug:
print('setWinery:clearing-grapematchset:',
grapematchset, ':is-in-grape:', grape)
grapematchset = []
if grapematchset and size and size in ' '.join(grapematchset):
size = ''
if winematchset and size and size in ' '.join(winematchset):
size = ''
if debug:
print('setWinery:vallist1:', [winery, grape, wine] +
grapematchset + winematchset + [vintage, size, qual, case])
print('setWinery:vallist2:', [winery, grape, wine, *
grapematchset, *winematchset, vintage, size, qual, case])
wdList = []
for val in ([winery, grape, wine] + grapematchset + winematchset +
[vintage, size, qual, case]):
if val:
wdList.append(val)
wineDescr = ' '.join(wdList)
if False:
if debug:
print('setWinery:wdList:', wdList)
if debug:
print('setWinery:wineDescr:', wineDescr)
if debug:
try:
print(':'.join(['setWinery:wineDescrList', wineDescr, rec[
fldWineDescr], str(wineDescr == rec[fldWineDescr]), rec
[fldWine]]))
except Exception as e:
print('debug error6-continuing:', str(e))
print('fldWine:', fldWine)
rec[fldWineDescrNew] = wineDescr
if fldWineDescrMatch:
rec[fldWineDescrMatch] = rec[fldWineDescr] == rec[fldWineDescrNew]
def setDigitFld2Value(wines, fld, value, debug=False):
for rec in wines:
if rec[fld].isdigit():
rec[fld] = value
def updateFileOptionDictCheck(optiondict, wines, header, debug=False):
if optiondict['fldWineDescr'] not in wines[0]:
if debug:
print('updateFileOptionDictCheck:fldWineDescr NOT in file read in:'
, optiondict['fldWineDescr'])
if 'cnt' in wines[0]:
print('setting values fldWineDescr and fldWineDescrNew to: cnt')
optiondict['fldWineDescr'] = optiondict['fldWineDescrNew'] = 'cnt'
elif 'winedescr' in wines[0]:
print(
'setting values fldWineDescr to winedescr and fldWineDescrNew to winedescrnew'
)
optiondict['fldWineDescr'] = 'winedescr'
optiondict['fldWineDescrNew'] = 'winedescrnew'
else:
print('could not find fldWineDescr in wines[0]-aborting:',
optiondict['fldWineDescr'], '\nwines[0]:', wines[0])
error = wines[0][optiondict['fldWineDescr']]
if False and optiondict['fldWineDescr'] == 'winedescr':
if not optiondict['fldWineDescrMatch']:
optiondict['fldWineDescrMatch'] = 'same'
print('setting value fldWineDescrMatch to: same')
if optiondict['csvfile_update_in'] == optiondict['csvfile_update_out']:
file_path, base_filename, file_ext = kvutil.filename_split(optiondict
['csvfile_update_in'])
backupfile = kvutil.filename_proper(base_filename + optiondict[
'backupfile_ext'], file_path)
print('copying ', optiondict['csvfile_update_in'], ' to ', backupfile)
shutil.copyfile(optiondict['csvfile_update_in'], backupfile)
if optiondict['fldWineDescrNew'] == 'cnt':
optiondict['csvdictkeys'] = ['cnt', 'date', 'search', 'store',
'wine', 'winesrt']
elif optiondict['fldWineDescrMatch']:
optiondict['csvdictkeys'] = [optiondict['fldWineDescr'], optiondict
['fldWineDescrNew'], optiondict['fldWineDescrMatch'], *header]
else:
optiondict['csvdictkeys'] = [optiondict['fldWineDescrNew']] + header[1:
]
print('updateFileOptionDictCheck:set csvdictkeys to:', optiondict[
'csvdictkeys'])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import kvutil
import kvcsv
import re
import sys
import shutil
import pprint
pp = pprint.PrettyPrinter(indent=4)
ppFlag = False
optiondictconfig = {'AppVersion': {'value': '1.13', 'description':
'defines the version number for the app'}, 'debug': {'value': False,
'type': 'bool', 'description':
'defines if we are running in debug mode'}, 'verbose': {'value': 1,
'type': 'int', 'description':
'defines the display level for print messages'}, 'setup_check': {
'value': False, 'type': 'bool', 'description':
'defines if we checking out setup'}, 'pprint': {'value': False, 'type':
'bool', 'description':
'defines if we output with pretty print when debugging'},
'csvfile_master_in': {'value': 'wine_xref.csv', 'description':
'defines the name of the master data input file'}, 'csvfile_update_in':
{'value': 'wineref.csv', 'description':
'defines the name of the input file to updated'}, 'csvfile_update_out':
{'value': 'wineref2.csv', 'description':
'defines the name of the updated output file'}, 'fldWine': {'value':
'wine', 'description':
'defines the name of the field that holds the Wine '}, 'fldWineDescr':
{'value': 'winedescr', 'description':
'defines the name of the field holding the wine description'},
'fldWineDescrNew': {'value': 'winedescrnew', 'description':
'defines the name of the NEW field holding the new description '},
'fldWineDescrMatch': {'value': None, 'description':
'defines the name of the NEW field holding the results of comparison existing to new description '
}, 'fldWineMaster': {'value': None, 'description':
'defines the name of the field that holds the Wine when reading the master file '
}, 'fldWineDescrMaster': {'value': None, 'description':
'defines the name of the field holding the wine description when reading the master file'
}, 'backupfile_ext': {'value': '.bak', 'description':
'defines the extension to use to copy the update input file to if we are replacing it with output'
}, 'defaultnew': {'value': None, 'description':
'defines if we should take field fldWineDescrNew and set to a value if not set'
}}
vintageLookup = re.compile('\\d\\d\\d\\d\\s+\\d\\d(\\d\\d)'), re.compile(
'^\\d\\d(\\d\\d)'), re.compile('\\s\\d\\d(\\d\\d)$'), re.compile(
'\\s\\d\\d(\\d\\d)\\s'), re.compile('XX\\d\\d(\\d\\d)\\s'), re.compile(
'\\s\\d\\d(\\d\\d)\\/'), re.compile("\\s'?(\\d\\d)'?$|\\s'?(\\d\\d)'?\\s")
reCase = re.compile('12\\s*X\\s*750\\s*ML|\\bcase\\b|12\\/750\\s*ML', re.
IGNORECASE)
reQualLookup = (None, re.compile('\\bWithout\\s+Gift\\b|\\bNo\\s+Gift', re.
IGNORECASE)), ('Gift', re.compile('\\bGift\\b', re.IGNORECASE)), ('VAP',
re.compile('\\bVAP\\b', re.IGNORECASE)), ('VAP', re.compile(
'\\bGlassVAP\\b', re.IGNORECASE)), ('Glass', re.compile('\\bGlass\\b',
re.IGNORECASE)), ('Glass', re.compile('\\bGlasses\\b', re.IGNORECASE)), (
'Etch', re.compile('\\bEtch\\b', re.IGNORECASE)), ('Basket', re.compile
('\\bBasket\\b', re.IGNORECASE))
sizeLookup = ('1.75L', re.compile('\\b1\\.75\\s*Li?|\\b1\\.75$', re.IGNORECASE)
), ('1.5L', re.compile('\\b1\\.5\\s*L?\\b|\\bMagnum\\b', re.IGNORECASE)), (
'375mL', re.compile('Half\\s+Bottle|375ml', re.IGNORECASE)), ('200mL',
re.compile('\\b200\\s*ML|\\(200\\s*ML', re.IGNORECASE)), ('50mL', re.
compile('\\b50\\s*ML|\\(50\\s*ML', re.IGNORECASE)), ('500mL', re.
compile('\\b500\\s*ML|\\(500\\s*ML', re.IGNORECASE)), ('3L', re.compile
('\\b3\\s*Li?', re.IGNORECASE)), ('6L', re.compile('\\b6\\s*Li?', re.
IGNORECASE)), ('9L', re.compile('\\b9\\s*Li?', re.IGNORECASE)), ('1L',
re.compile(
'\\b1L\\b|\\b1\\s+L$|\\b1.0\\s*L\\b|\\b1\\s+Liter\\b|\\bOne\\s+Liter\\b|\\bLITER\\b|\\b1\\s*LTR'
, re.IGNORECASE))
wineryLookup = ('Alban', re.compile('\\bAlban\\b', re.IGNORECASE)), ('Arrowood'
, re.compile('\\bArrowood\\b', re.IGNORECASE)), ('Atalon', re.compile(
'\\bAtalon\\b', re.IGNORECASE)), ('Attune', re.compile('\\bAttune\\b',
re.IGNORECASE)), ('Auteur', re.compile('\\bAuteur\\b', re.IGNORECASE)), (
'Austin Hope', re.compile('\\bAustin\\s+Hope\\b', re.IGNORECASE)), ('Badge'
, re.compile('\\bBadge\\b', re.IGNORECASE)), ('Balletto', re.compile(
'\\bBalletto\\b', re.IGNORECASE)), ('Bell', re.compile(
'\\bBell\\s+Cellar', re.IGNORECASE)), ('BR Cohn', re.compile(
'\\bB\\.?\\s?R\\.?\\s+Cohn\\b', re.IGNORECASE)), ('Bremer', re.compile(
'\\bBremer\\b', re.IGNORECASE)), ('Brewer-Clifton', re.compile(
'\\bBrewer[\\s\\-]Clifton\\b', re.IGNORECASE)), ('BV', re.compile(
'\\bBeaulieu\\s+V|\\bBV\\b', re.IGNORECASE)), ('Belle Glos', re.compile
('\\bBelle\\s+Glos\\b', re.IGNORECASE)), ('Bennett Ln', re.compile(
'\\bBennet+\\sLane\\b', re.IGNORECASE)), ('Benovia', re.compile(
'\\bBenovia\\b', re.IGNORECASE)), ('Beringer', re.compile(
'\\bBeringer\\b', re.IGNORECASE)), ('Blackstone', re.compile(
'\\bBlackstone\\b', re.IGNORECASE)), ('Brancott', re.compile(
'\\bBrancott\\b', re.IGNORECASE)), ('Cade', re.compile('\\bCade\\b', re
.IGNORECASE)), ('Cain Five', re.compile(
'\\bCain\\s+Five\\b|\\bCain\\s-\\sFive\\b|\\bCain\\s5\\b|\\bCainFive\\b',
re.IGNORECASE)), ('Cakebread', re.compile('\\bCakebread\\b', re.IGNORECASE)
), ('Cardinale', re.compile('\\bCardinale\\b', re.IGNORECASE)), ('Caymus',
re.compile('\\bCaymus\\b', re.IGNORECASE)), ('Chappellet', re.compile(
'\\bChappellet\\b', re.IGNORECASE)), ('Chalk Hill', re.compile(
'\\bChalk\\s+Hill\\b', re.IGNORECASE)), ('Clos Du Bois', re.compile(
'\\bClos\\s+Du\\s+Bois\\b', re.IGNORECASE)), ('ClosDuVal', re.compile(
'\\bClos\\s+du\\s+Val\\b', re.IGNORECASE)), ('Colgin', re.compile(
'\\bColgin\\b', re.IGNORECASE)), ('Concha Don Melchor', re.compile(
'\\bConcha\\s.*Don\\s+Melchor\\b|Don\\s+Melchor\\b', re.IGNORECASE)), (
'Continuum', re.compile('\\bContinuum\\b', re.IGNORECASE)), ('Corison',
re.compile('\\bCorison\\b', re.IGNORECASE)), ('Cristal', re.compile(
'Roederer\\s?.*Cristal\\b|\\bCristal\\b.+Brut', re.IGNORECASE)), ('Curran',
re.compile('\\bCurran\\b', re.IGNORECASE)), ('Darioush', re.compile(
'\\bDarioush\\b', re.IGNORECASE)), ('Darioush', re.compile(
'\\bCaravan\\b', re.IGNORECASE)), ('David Arthur', re.compile(
'\\bDavid\\s+Arthur\\b', re.IGNORECASE)), ('David Bruce', re.compile(
'\\bDavid\\s+Bruce\\b', re.IGNORECASE)), ('Davis Family', re.compile(
'\\bDavis\\s+Family\\b', re.IGNORECASE)), ('Del Dotto', re.compile(
'\\bDel\\s+Dotto\\b', re.IGNORECASE)), ('Dominus', re.compile(
'\\bDominus\\b', re.IGNORECASE)), ('Goldeneye', re.compile(
'\\bGoldeneye\\b', re.IGNORECASE)), ('Paraduxx', re.compile(
'\\bParaduxx\\b', re.IGNORECASE)), ('Domaine Carneros', re.compile(
'\\bDomaine\\s+Carneros\\b', re.IGNORECASE)), ('Dominus', re.compile(
'\\Dominus\\b', re.IGNORECASE)), ('Drappier', re.compile(
'\\bDrappier\\b', re.IGNORECASE)), ('Duckhorn', re.compile(
'\\bDuckhorn\\b', re.IGNORECASE)), ('Dumol', re.compile('\\bDumol\\b',
re.IGNORECASE)), ('Dunn', re.compile('\\bDunn\\b', re.IGNORECASE)), (
'Ehlers', re.compile('\\bEhlers\\b', re.IGNORECASE)), ('Etude', re.
compile('\\bEtude\\b', re.IGNORECASE)), ('Far Niente', re.compile(
'\\bFar Niente\\b', re.IGNORECASE)), ('Flora', re.compile(
'\\bFlora\\s+Springs\\b', re.IGNORECASE)), ('Flowers', re.compile(
'\\bFlowers\\b', re.IGNORECASE)), ('Robert Foley', re.compile(
'\\bRobert\\s+\\bFoley\\b', re.IGNORECASE)), ('Foley', re.compile(
'\\bFoley\\b', re.IGNORECASE)), ('Foxen', re.compile('\\bFoxen\\b', re.
IGNORECASE)), ('Franciscan', re.compile('\\bFranciscan\\b', re.IGNORECASE)
), ('Frank Family', re.compile('\\bFrank Family\\b', re.IGNORECASE)), (
'Gary Farrell', re.compile('\\bGary\\s+Farrel+\\b', re.IGNORECASE)), (
'Ghost Block', re.compile('\\bGhost\\s+Block\\b', re.IGNORECASE)), (
'Grgich', re.compile('\\bGrgich\\b', re.IGNORECASE)), ('Groth', re.
compile('\\bGroth\\b', re.IGNORECASE)), ('Gundlach', re.compile(
'\\bGundlach\\b', re.IGNORECASE)), ('Hansel', re.compile('\\bHansel\\b',
re.IGNORECASE)), ('Hanzell', re.compile('\\bHanzell\\b', re.IGNORECASE)), (
'Hess', re.compile('\\bHess\\b', re.IGNORECASE)), ('Hewitt', re.compile
('\\bHewitt\\b', re.IGNORECASE)), ('Hobbs', re.compile(
'\\bHobbs\\b|\\bcrossbarn\\b', re.IGNORECASE)), ('Hundred Acre', re.
compile('\\bHundred\\s+Acre\\b', re.IGNORECASE)), ('Jordan', re.compile
('\\bJordan\\b', re.IGNORECASE)), ('Justin', re.compile('\\bJustin\\b',
re.IGNORECASE)), ('Kim Crawford', re.compile('\\bKim\\s+Crawford\\b',
re.IGNORECASE)), ('Kistler', re.compile('\\bKistler\\b', re.IGNORECASE)), (
'Kosta', re.compile('\\bKosta\\s+Browne?\\b', re.IGNORECASE)), ('Krug',
re.compile('\\bKrug\\b', re.IGNORECASE)), ('Kunde', re.compile(
'\\bKunde\\b', re.IGNORECASE)), ('LaCrema', re.compile(
'\\bLa\\s?Crema\\b', re.IGNORECASE)), ('Lewis', re.compile(
'\\bLewis\\b', re.IGNORECASE)), ('Lokoya', re.compile('\\bLokoya\\b',
re.IGNORECASE)), ('Meiomi', re.compile('\\bMeiomi\\b', re.IGNORECASE)), (
'Melville', re.compile('\\bMelville\\b', re.IGNORECASE)), ('Momento Mori',
re.compile('\\bMomento\\s+Mori\\b', re.IGNORECASE)), ('Mondavi', re.
compile('\\bMondavi\\b', re.IGNORECASE)), ('Montelena', re.compile(
'\\bMontelena\\b', re.IGNORECASE)), ('Mt Veeder', re.compile(
'^Mount\\s+Veeder\\b|^Mt\\.? Veeder\\b|\\d+\\s+M[^t]*t\\s+Veeder\\b',
re.IGNORECASE)), ('Newton', re.compile('\\bNewton\\b', re.IGNORECASE)), (
'Nickel', re.compile('\\bNickel\\b', re.IGNORECASE)), ('Opus One', re.
compile('\\bOpus\\s+One\\b', re.IGNORECASE)), ('P Togni', re.compile(
'\\bTogni\\b', re.IGNORECASE)), ('Pahlmeyer Jayson', re.compile(
'\\bJayson\\b', re.IGNORECASE)), ('Pahlmeyer', re.compile(
'\\bPahlmeyer\\b(?!\\s*Jay)', re.IGNORECASE)), ('Papillon', re.compile(
'\\bPapillon\\b', re.IGNORECASE)), ('Patz', re.compile('\\bPatz\\b', re
.IGNORECASE)), ('Phelps', re.compile('\\bPhelps\\b', re.IGNORECASE)), (
'Plumpjack', re.compile('\\bPlumpjack\\b', re.IGNORECASE)), ('Pride',
re.compile('\\bPride\\b', re.IGNORECASE)), ('Prisoner', re.compile(
'\\bPrisoner\\b', re.IGNORECASE)), ('Provenance', re.compile(
'\\bProvenance\\b', re.IGNORECASE)), ('R Sinskey', re.compile(
'\\bSinskey\\b', re.IGNORECASE)), ('Ramey', re.compile('\\bRamey\\b',
re.IGNORECASE)), ('Revana', re.compile('\\bRevana\\b', re.IGNORECASE)), (
'Raptor', re.compile('\\bRaptor\\s+Ridge\\b', re.IGNORECASE)), ('Revana',
re.compile('\\bRevana\\b', re.IGNORECASE)), ('Ridge', re.compile(
'\\bRidge\\b', re.IGNORECASE)), ('Robert Foley', re.compile(
'\\bRobert\\s+Foley\\b', re.IGNORECASE)), ('Rombauer', re.compile(
'\\bRombauer\\b', re.IGNORECASE)), ('Rudd', re.compile('\\bRudd\\b', re
.IGNORECASE)), ('Scarecrow', re.compile('\\bScarecrow\\b', re.IGNORECASE)
), ('Sea Smoke', re.compile('\\bSea\\s+Smoke\\b', re.IGNORECASE)), (
'Seghesio', re.compile('\\bSeghesio\\b', re.IGNORECASE)), ('Shafer', re
.compile('\\bShafer\\b', re.IGNORECASE)), ('Sherwin', re.compile(
'\\bSherwin\\b', re.IGNORECASE)), ('Silver Oak', re.compile(
'\\bSilver\\s+Oak\\b', re.IGNORECASE)), ('Silverado', re.compile(
'\\bSilverado\\b', re.IGNORECASE)), ('Simi', re.compile('\\bSimi\\b',
re.IGNORECASE)), ('Sonoma Cutrer', re.compile('\\bCutrer\\b', re.
IGNORECASE)), ('Spottswoode', re.compile('\\bSpottswoode\\b', re.
IGNORECASE)), ('Stag Leap', re.compile('\\bStag.*\\sLeap\\b', re.
IGNORECASE)), ('Sullivan', re.compile('\\bSullivan\\b', re.IGNORECASE)), (
'Summerland', re.compile('\\bSummerland\\b', re.IGNORECASE)), ('Summers',
re.compile('\\bSummers\\b', re.IGNORECASE)), ('Tantara', re.compile(
'\\bTantara\\b', re.IGNORECASE)), ('Turnbull', re.compile(
'\\bTurnbull\\b', re.IGNORECASE)), ('Veuve', re.compile('\\bVeuve\\b',
re.IGNORECASE)), ('Viader', re.compile('\\bViader\\b', re.IGNORECASE)), (
'Waterstone', re.compile('\\bWaterstone\\b', re.IGNORECASE)), ('Whitehall',
re.compile('\\bWhitehall\\b', re.IGNORECASE)), ('Wm Selyem', re.compile
('\\bWilliams\\s*\\-?Selyem\\b', re.IGNORECASE)), ('ZD', re.compile(
'\\bZD\\b', re.IGNORECASE)), ('Zaca', re.compile('\\bZaca\\b', re.
IGNORECASE)), ('zBourbon Woodford Res', re.compile(
'\\bWoodford\\s+Reserve\\b', re.IGNORECASE)), ('zBourbon Woodford Res',
re.compile('\\bWoodford\\s+Rsv\\b', re.IGNORECASE)), ('zCognac Courvoisier'
, re.compile('\\bCourvoisier\\b', re.IGNORECASE)), ('zCognac Hennessy',
re.compile('\\bHennesse?y\\b', re.IGNORECASE)), ('zCognac Remy', re.
compile('\\bRemy\\s+Martin\\b|\\bRemy\\s+Louis', re.IGNORECASE)), (
'zCointreau', re.compile('\\bCointreau\\b', re.IGNORECASE)), (
'zGin Hendrick', re.compile('\\bHendrick', re.IGNORECASE)), (
'zGin Tanqueray', re.compile('\\bTanqueray\\b', re.IGNORECASE)), (
'zRum Mt Gay', re.compile('\\bMount\\s+Gay\\b|\\bMt\\s+Gay', re.IGNORECASE)
), ('zRum Ron Zacapa', re.compile('\\bRon\\s+Zacapa\\b', re.IGNORECASE)), (
'zRye Hayden', re.compile('\\bBasil\\s+Hayden\\b', re.IGNORECASE)), (
'zSambuca', re.compile('\\bSambuca\\b', re.IGNORECASE)), (
'zScotch Glenmorangie', re.compile('\\bGlenmorangie\\b', re.IGNORECASE)), (
'zScotch Hibiki Harmony', re.compile('\\bHibiki\\s.*Harmony\\b', re.
IGNORECASE)), ('zScotch Hibiki', re.compile('\\bHibiki\\b(?!\\s*Har)',
re.IGNORECASE)), ('zScotch Macallan', re.compile('\\bMacallan\\b', re.
IGNORECASE)), ('zTeq Campo Azul', re.compile('\\bCampo\\s+Azul\\b', re.
IGNORECASE)), ('zTeq Casamigos', re.compile('\\bCasamigos\\b', re.
IGNORECASE)), ('zTeq Casino Azul', re.compile('\\bCasino\\s+Azul\\b',
re.IGNORECASE)), ('zTeq Clase Azul', re.compile('\\bClase\\s+Azul\\b',
re.IGNORECASE)), ('zTeq Cuervo', re.compile(
'\\bJose\\s+Cuervo\\b|^Cuervo\\b', re.IGNORECASE)), ('zTeq Don Julio',
re.compile('\\bDon\\s+Julio\\b', re.IGNORECASE)), ('zTeq Dos Artes', re
.compile('\\bDos\\s+Artes\\b|^Cuervo\\b', re.IGNORECASE)), (
'zTeq Gran Cava', re.compile('\\bGran\\s+Cava\\b', re.IGNORECASE)), (
'zTeq Herradura', re.compile('\\bHerradura\\b', re.IGNORECASE)), (
'zTeq Loma Azul', re.compile('\\bLoma\\s+Azul\\b', re.IGNORECASE)), (
'zTeq Padre Azul', re.compile('\\bPadre\\s+Azul\\b', re.IGNORECASE)), (
'zTeq Partida', re.compile('\\bPartida\\b', re.IGNORECASE)), ('zTeq Patron'
, re.compile('\\bPatron\\b', re.IGNORECASE)), ('zTripleSec Gr Marnier',
re.compile('\\bGrand\\s+Marnier\\b', re.IGNORECASE)), (
'zTripleSec Dekuyper', re.compile('\\bDekuyper\\b', re.IGNORECASE)), (
'zTripleSec Hiram', re.compile('\\bHiram\\b', re.IGNORECASE)), (
'zVodka Absolut', re.compile('\\bAbsolut\\b', re.IGNORECASE)), (
'zVodka Skyy', re.compile('\\bSkyy\\b', re.IGNORECASE)), ('zVodka Tito',
re.compile('\\bTito', re.IGNORECASE)), ('zWhiskey Balvenie', re.compile
('\\bBalvenie\\b', re.IGNORECASE)), ('zWhiskey J Walker', re.compile(
'\\bJohn+ie\\s+Walker\\b', re.IGNORECASE))
grapeLookup = ('Cab Franc', re.compile(
'\\bCabernet\\s+Franc|\\bCab\\s+Franc', re.IGNORECASE)), ('Cab', re.
compile('\\bCabernet\\b|\\sCS\\s|\\sCS$|\\bCab\\b', re.IGNORECASE)), (
'Claret', re.compile('\\bClaret\\b', re.IGNORECASE)), ('Rose Pinot', re
.compile('\\bRose\\b.*\\bPinot\\b|\\bPinot\\b.*\\bRose\\b', re.IGNORECASE)
), ('Pinot', re.compile('\\bPinot\\b|\\bPN\\b|\\bP\\s+Noir\\b', re.
IGNORECASE)), ('Merlot', re.compile('\\bMerlot\\b|\\bME\\b', re.IGNORECASE)
), ('Sauv Blanc', re.compile('\\bSauvignon\\s+Blanc\\b|\\bSB\\b', re.
IGNORECASE)), ('Sauv Blanc', re.compile(
'\\bSauvignon\\/Fume\\s+Blanc\\b', re.IGNORECASE)), ('Meritage', re.
compile('\\bMeritage\\b', re.IGNORECASE)), ('Fume', re.compile(
'\\bFume\\b|\\bFumé', re.IGNORECASE)), ('Champagne', re.compile(
'\\bChampagne\\b', re.IGNORECASE)), ('Chard', re.compile(
'\\bChar+d|\\bCH\\b', re.IGNORECASE)), ('Shiraz', re.compile(
'\\bShiraz\\b', re.IGNORECASE)), ('Syrah', re.compile(
'\\bSyrah\\b|\\bSY\\b', re.IGNORECASE)), ('Zin', re.compile(
'\\bZinfandel\\b|\\bZIN\\b|\\bZN\\b', re.IGNORECASE)), ('Rose', re.
compile('\\bRose\\b|\\bRosé', re.IGNORECASE)), ('Sangiovese', re.
compile('\\Sangiovese\\b', re.IGNORECASE)), ('Gewurzt', re.compile(
'\\bGew.rztraminer\\b|\\bGewürzt', re.IGNORECASE)), ('Malbec', re.
compile('\\bMalbec\\b', re.IGNORECASE)), ('Viognier', re.compile(
'\\bViognier\\b', re.IGNORECASE)), ('Roussanne', re.compile(
'\\bRoussanne\\b', re.IGNORECASE)), ('Charbono', re.compile(
'\\bCharbono\\b', re.IGNORECASE)), ('PSirah', re.compile(
'\\bPetite Sirah\\b', re.IGNORECASE)), ('Cuvee', re.compile(
'\\bCuvee\\b', re.IGNORECASE)), ('Red', re.compile(
'\\bRed\\b|\\bBordeaux\\s+Blend\\b', re.IGNORECASE)), ('Syrah-Cab', re.
compile('\\bSyrcab\\b|\\bsyrah[-\\s\\/]+cab', re.IGNORECASE)), ('Grenache',
re.compile('\\bGrenache\\b', re.IGNORECASE)), ('Tempranillo', re.
compile('\\bTempranillo\\b', re.IGNORECASE))
ignoreGrapeLookup = {'Cristal': ['Rose', None], 'Domaine Carneros': ['Brut',
None], 'Dominus': [None], 'Papillon': None, 'Paraduxx': None, 'Veuve':
None, 'zCointreau': None, 'zGin Hendrick': None, 'zGin Tanqueray': [
'Ten', None], 'zTripleSec Gr Marnier': ['1880', '100th', 'Cent', 'Quin',
None], 'zTripleSec Dekuyper': None, 'zTripleSec Hiram': None,
'zVodka Skyy': ['Citrus', None], 'zVodka Tito': None}
noGrapeLookup = {'Ehlers': ['120-80'], 'Alban': ['Pandora'], 'BV': [
'Tapestry', 'Latour'], 'Bennett Ln': ['Maximus'], 'Bremer': [
'Austintatious'], 'Cain Five': None, 'Colgin': ['Cariad', 'IX'],
'Concha Don Melchor': None, 'Continuum': None, 'Darioush': ['Duel',
'Darius'], 'Duckhorn': ['Discussion'], 'Far Niente': ['Dolce'], 'Flora':
['Trilogy'], 'Franciscan': ['Magnificat'], 'Grgich': ['Violetta'],
'Gundlach': ['Vintage Reserve'], 'Justin': ['Isosceles'], 'Krug': [
'Generations'], 'Mondavi': ['Maestro'], 'Newton': ['Puzzle'],
'Opus One': None, 'Phelps': ['Insignia'], 'Prisoner': ['Cuttings',
'Derange', 'Saldo', 'Blindfold'], 'Ridge': ['Monte Bello'],
'Robert Foley': ['Griffin'], 'Sullivan': ['Coeur de Vigne'], 'Zaca': [
'ZThree', 'ZCuvee'], 'zCognac Courvoisier': ['Napolean', 'VS', 'VSOP',
'XO'], 'zCognac Hennessy': ['Paradis', 'Richard', 'VS', 'VSOP', 'XO',
'Master'], 'zCognac Remy': ['1738', 'Louis XIII', 'VSOP', 'XO', 'VS'],
'zRum Ron Zacapa': ['23', 'Negra', 'XO'], 'zRye Hayden': ['Dark',
'Caribbean'], 'zScotch Hibiki Harmony': None, 'zTeq Campo Azul': [
'Extra Anejo', 'Anejo', 'Blanco', 'Reposado'], 'zTeq Casamigos': [
'Extra Anejo', 'Anejo', 'Blanco', 'Reposado'], 'zTeq Casino Azul': [
'Extra Anejo', 'Anejo', 'Blanco', 'Reposado', 'Silver'],
'zTeq Clase Azul': ['Ultra', 'Extra Anejo', 'Anejo', 'Blanco',
'Reposado', 'Mezcal', 'Plata', 'Platino'], 'zTeq Dos Artes': [
'Extra Anejo'], 'zTeq Gran Cava': ['Extra Anejo'], 'zTeq Loma Azul': [
'Extra Anejo', 'Anejo', 'Blanco', 'Reposado'], 'zTeq Partida': [
'Blanco', 'Elegante'], 'zVodka Absolut': ['Citron', 'Mandarin',
'Mandrin', 'Mango', 'Ruby', 'Vanilia', 'Raspberri', 'Grapevine', None],
'zWhiskey J Walker': ['Double Black', 'Black', 'Blue', 'Gold', 'Green',
'Platinum', 'Red', 'Swing', 'White', '18', '21']}
liquorLookup = {'zRum Mt Gay': [('1703 Mst', re.compile('\\b1703\\b', re.
IGNORECASE)), ('BB', re.compile('\\bBlack Barrel\\b', re.IGNORECASE)),
('Eclipse Silver', re.compile('\\bEclipse\\s+Silver\\b', re.IGNORECASE)
), ('Eclipse', re.compile('\\bEclipse\\b', re.IGNORECASE)), ('Old Peat',
re.compile('\\bOld Peat', re.IGNORECASE)), ('Old Pot', re.compile(
'\\bPot\\s+Still\\b', re.IGNORECASE)), ('Old', re.compile('\\bOld\\b',
re.IGNORECASE)), ('Silver', re.compile('\\bSilver\\b', re.IGNORECASE)),
('XO Peat', re.compile('\\bXO\\b', re.IGNORECASE))],
'zScotch Glenmorangie': [('10', re.compile('\\b10(YR)?\\b', re.
IGNORECASE)), ('14 Port', re.compile(
'14.+\\bQuinta\\b|14.+\\bPort\\b|\\bQuinta\\b.+14|\\bPort\\b.+14', re.
IGNORECASE)), ('12 Bacalta', re.compile('\\bBacalta\\b', re.IGNORECASE)
), ('12 Burgundy', re.compile('\\bBurgundy\\b', re.IGNORECASE)), (
'12 Nectar', re.compile('\\bNectar\\b', re.IGNORECASE)), ('12 Port', re
.compile('\\bQuinta\\b|\\bPort\\b', re.IGNORECASE)), ('12 Sherry', re.
compile('\\bLa\\s?Santa\\b|\\bSherry\\b', re.IGNORECASE)), ('12 Signet',
re.compile('\\bSignet\\b', re.IGNORECASE)), ('15 Cadboll', re.compile(
'\\bCadboll', re.IGNORECASE)), ('15', re.compile('\\b15(YR)?\\b', re.
IGNORECASE)), ('18', re.compile('\\b18(YR)?\\b|\\b18YEAR\\b', re.
IGNORECASE)), ('25 Astar', re.compile('\\bAstar\\b', re.IGNORECASE)), (
'25', re.compile('\\b25(YR)?\\b', re.IGNORECASE)), ('Companta', re.
compile('\\bCompanta\\b', re.IGNORECASE)), ('Finealta', re.compile(
'\\bFinealta\\b', re.IGNORECASE)), ('Milsean', re.compile(
'\\bMilsean\\b', re.IGNORECASE)), ('Sonnalta', re.compile(
'\\bSonnalta\\b', re.IGNORECASE))], 'zScotch Macallan': [('10 Fine', re
.compile('\\bFine.*\\b10\\b|\\b10.*Fine')), ('10', re.compile(
'\\b10\\b')), ('12 Double Gold', re.compile(
'\\bDbl\\b.*Gold|\\bDouble\\b.*Gold', re.IGNORECASE)), ('12 Double', re
.compile('\\bDouble\\s.*12(YR)?\\b', re.IGNORECASE)), ('12 Double', re.
compile('\\b12\\s.*Double\\b', re.IGNORECASE)), ('12 Double', re.
compile('\\bDbl\\b|\\bDouble\\b', re.IGNORECASE)), ('12 Edition 1', re.
compile('\\bEdition\\s.*1\\b', re.IGNORECASE)), ('12 Edition 2', re.
compile('\\bEdition\\s.*2\\b', re.IGNORECASE)), ('12 Edition 3', re.
compile('\\bEdition\\s.*3\\b', re.IGNORECASE)), ('12 Edition 4', re.
compile('\\bEdition\\s.*4\\b', re.IGNORECASE)), ('12 Sherry', re.
compile('\\b12\\s.*Sherry\\b|\\bSherry\\b\\s.*\\b12', re.IGNORECASE)),
('12 Triple', re.compile('\\b12(YR)?\\s.*Triple\\b', re.IGNORECASE)), (
'12 Triple', re.compile('\\bTriple\\s.*12\\b', re.IGNORECASE)), ('12',
re.compile('\\b12(YR)?\\b', re.IGNORECASE)), ('15 Triple', re.compile(
'\\b15(YR)?\\s.*Triple\\b|Triple.+\\b15(YR)?\\b', re.IGNORECASE)), (
'15 Fine', re.compile('\\b15(YR)?\\b.*\\bFine\\b', re.IGNORECASE)), (
'15', re.compile('\\b15(YR)?\\b', re.IGNORECASE)), ('17 Sherry', re.
compile('\\b17(YR)?\\s.*Sherry\\b', re.IGNORECASE)), ('17 Fine', re.
compile('\\b17(YR)?\\b.*\\bFine\\b', re.IGNORECASE)), ('17', re.compile
('\\b17(YR)?\\b', re.IGNORECASE)), ('18 Sherry', re.compile(
'\\b18(YR)?\\s.*Sherry\\b|Sherry\\b.*18', re.IGNORECASE)), ('18 Triple',
re.compile('\\b18(YR)?\\s.*Triple\\b|Triple.+\\b18(YR)?\\b', re.
IGNORECASE)), ('18 Fine', re.compile('\\b18(YR)?\\b.*\\bFine\\b', re.
IGNORECASE)), ('18 Gran', re.compile('Gran\\b.*\\b18', re.IGNORECASE)),
('18', re.compile('\\b18(YR)?\\b', re.IGNORECASE)), ('21 Fine', re.
compile('\\b21.*Fine\\b', re.IGNORECASE)), ('21', re.compile(
'\\b21(YR)?\\b', re.IGNORECASE)), ('25 Sherry', re.compile(
'\\b25\\s.*Sherry\\b', re.IGNORECASE)), ('25', re.compile(
'\\b25(YR)?\\b')), ('30 Sherry', re.compile('\\b30\\s.*Sherry', re.
IGNORECASE)), ('30 Triple', re.compile(
'\\b30(YR)?\\s.*Triple\\b|Triple.+\\b30(YR)?\\b', re.IGNORECASE)), (
'30 Fine', re.compile('\\b30(YR)?\\b.*\\bFine\\b|Fine.*30', re.
IGNORECASE)), ('30', re.compile('\\b30(YR)?\\b')), ('Rare', re.compile(
'\\bRare\\b', re.IGNORECASE))], 'zTeq Cuervo': [('Especial Gold', re.
compile('\\bEspecial\\b.*Gold\\b|Gold.*Especial', re.IGNORECASE)), (
'Especial Blue', re.compile('\\bEspecial\\b.*Blue\\b', re.IGNORECASE)),
('Especial', re.compile('\\bEspecial\\b', re.IGNORECASE)), (
'Familia Platino', re.compile('\\bPlatino\\b', re.IGNORECASE)), (
'Familia Anejo', re.compile('\\bFamilia\\b|\\bReserva\\b', re.
IGNORECASE)), ('Gold', re.compile('\\bGold\\b', re.IGNORECASE)), (
'Reposado Lagavulin', re.compile('\\bReposado.*Lagavulin', re.
IGNORECASE)), ('Tradicional Anejo', re.compile(
'Tradicional.*Anejo|Anejo.*Tradicional', re.IGNORECASE)), (
'Tradicional Reposado', re.compile(
'Tradicional.*Reposado|Reposado.*Tradicional', re.IGNORECASE)), (
'Tradicional Silver', re.compile('\\bTradicional\\b', re.IGNORECASE)),
('Tradicional Silver', re.compile('\\bTraditional\\b', re.IGNORECASE)),
('Reposado', re.compile('\\bReposado\\b', re.IGNORECASE)), ('Silver',
re.compile('\\bSilver\\b', re.IGNORECASE))], 'zTeq Don Julio': [('1942',
re.compile('\\b1942\\b', re.IGNORECASE)), ('Real', re.compile(
'\\bReal\\b', re.IGNORECASE)), ('Anejo Claro 70th', re.compile(
'\\b70th\\b', re.IGNORECASE)), ('Anejo Claro', re.compile(
'\\bAnejo\\b\\s*Claro\\b', re.IGNORECASE)), ('Anejo', re.compile(
'\\bAnejo\\b', re.IGNORECASE)), ('Blanco', re.compile('\\bBlanco\\b',
re.IGNORECASE)), ('Reposado Lagavulin', re.compile(
'\\bRepo.+Lagvulin\\b', re.IGNORECASE)), ('Reposado Dbl', re.compile(
'\\bReposado.+Double\\b', re.IGNORECASE)), ('Reposado Dbl', re.compile(
'\\bReposado.+Dbl\\b', re.IGNORECASE)), ('Reposado Dbl', re.compile(
'\\bDouble.+Reposado\\b', re.IGNORECASE)), ('Reposado Private', re.
compile('\\bReposado.+Private\\b', re.IGNORECASE)), ('Reposado', re.
compile('\\bReposado\\b', re.IGNORECASE)), ('Silver', re.compile(
'\\bSilver\\b', re.IGNORECASE))], 'zTeq Herradura': [('Ultra', re.
compile('\\bUltra\\b', re.IGNORECASE)), ('Suprema', re.compile(
'\\bSuprema\\b', re.IGNORECASE)), ('Anejo', re.compile('\\bAnejo\\b',
re.IGNORECASE)), ('Blanco', re.compile('\\bBlanco\\b', re.IGNORECASE)),
('Reposado Gold', re.compile(
'\\bReposado\\s+Gold\\b|\\bGold\\s+Reposado\\b', re.IGNORECASE)), (
'Reposado Scotch', re.compile(
'\\bReposado.+Scotch\\b|\\bScotch.+Reposado\\b', re.IGNORECASE)), (
'Reposado Port', re.compile('\\bPort.+Reposado\\b|\\bReposado.+Port\\b',
re.IGNORECASE)), ('Reposado', re.compile('\\bReposado\\b', re.
IGNORECASE)), ('Silver', re.compile('\\bSilver\\b', re.IGNORECASE))],
'zTeq Patron': [('Gran Piedra', re.compile('\\bPiedra\\b', re.
IGNORECASE)), ('DELETE Roca DELETE', re.compile('\\bRoca\\b', re.
IGNORECASE)), ('Anejo Extra Lalique', re.compile('\\bLalique\\b', re.
IGNORECASE)), ('Anejo Extra 7yr', re.compile(
'\\b7YR\\b|\\b7 anos\\b|\\b7 year\\b', re.IGNORECASE)), (
'Anejo Extra 5yr', re.compile('\\b5YR\\b|\\b5 anos\\b|\\b5 year\\b', re
.IGNORECASE)), ('Anejo Extra 10yr', re.compile(
'\\b10\\b.+\\bExtra\\b|\\bExtra\\b.+10', re.IGNORECASE)), (
'Anejo Extra', re.compile('\\bExtra\\s+Anejo\\b', re.IGNORECASE)), (
'Gran Anejo', re.compile('\\bGran\\s+Anejo\\b', re.IGNORECASE)), (
'Gran Anejo', re.compile('\\bBurdeos\\b', re.IGNORECASE)), (
'Gran Smoky', re.compile('\\bGran\\s+.*Smoky\\b', re.IGNORECASE)), (
'Anejo', re.compile('\\bAnejo\\b', re.IGNORECASE)), ('Gran Platinum',
re.compile('\\bPlatinum\\b', re.IGNORECASE)), ('Reposado', re.compile(
'\\bReposado\\b', re.IGNORECASE)), ('Silver LTD', re.compile(
'\\bSilver.*Limited\\b|\\bLimited.*Silver\\b', re.IGNORECASE)), (
'Silver Estate', re.compile('\\bEstate.*Silver\\b|\\bSilver.*Estate\\b',
re.IGNORECASE)), ('Silver', re.compile('\\bSilver\\b', re.IGNORECASE)),
('Blanco', re.compile('\\bBlanco\\b', re.IGNORECASE))],
'zTeq Padre Azul': [('Blanco', re.compile('\\bsilver\\b', re.IGNORECASE
))], 'zWhiskey Balvenie': [('12 Double', re.compile(
'\\bDouble.*12(YR)?\\b', re.IGNORECASE)), ('12 Double', re.compile(
'\\b12(YR)?\\s.*Double', re.IGNORECASE)), ('12 First', re.compile(
'\\b12(YR)?\\s.*First', re.IGNORECASE)), ('12 USA', re.compile(
'\\b12.*American|American.*12', re.IGNORECASE)), ('12 Toast', re.
compile('\\b12(YR)?\\s.*Toast', re.IGNORECASE)), ('12', re.compile(
'\\b12(YR)?\\b', re.IGNORECASE)), ('14 Carib', re.compile(
'\\b14(YR)?\\s.*Carib', re.IGNORECASE)), ('14 Carib', re.compile(
'\\b14(YR)?\\s.*CB\\s+Cask', re.IGNORECASE)), ('14 Carib', re.compile(
'\\bCarr?ib', re.IGNORECASE)), ('14 Peat', re.compile(
'\\b14(YR)?\\s.*Peat', re.IGNORECASE)), ('15 Sherry', re.compile(
'\\b15(YR)?\\s.*Sherry\\b', re.IGNORECASE)), ('15 Sherry', re.compile(
'\\bSherry\\s+.*15(YR)?\\b', re.IGNORECASE)), ('15', re.compile(
'\\b15(YR)?\\b', re.IGNORECASE)), ('16 Triple', re.compile(
'\\b16(YR)?\\s.*Triple\\b', re.IGNORECASE)), ('17 Sherry Double', re.
compile('\\b17(YR)?\\s.*Sherry\\s+Doub', re.IGNORECASE)), ('17 Sherry',
re.compile('\\b17(YR)?\\s.*Sherry', re.IGNORECASE)), ('17 Double', re.
compile('\\b17(YR)?\\s.*Double', re.IGNORECASE)), ('17 Double', re.
compile('\\bDouble.*17(YR)?\\b', re.IGNORECASE)), ('17 Peat', re.
compile('\\b17(YR)?\\s.*Peat', re.IGNORECASE)), ('17 Peat', re.compile(
'\\bPeat.*17(YR)?\\b', re.IGNORECASE)), ('17', re.compile(
'\\b17(YR)?\\b', re.IGNORECASE)), ('21 Port', re.compile('\\b21.*Port',
re.IGNORECASE)), ('21 Port', re.compile('\\bPort.*21\\b', re.IGNORECASE
)), ('21', re.compile('21', re.IGNORECASE)), ('25', re.compile(
'\\b25(YR)?\\b', re.IGNORECASE)), ('30', re.compile('\\b30(YR)?\\b', re
.IGNORECASE)), ('40', re.compile('\\b40(YR)?\\b', re.IGNORECASE))],
'zBourbon Woodford Res': [('Dbl', re.compile('\\bDouble\\b', re.
IGNORECASE)), ('Derby', re.compile('\\bDerby\\b', re.IGNORECASE)), (
'Rye Choc', re.compile('\\bChocolate.*Rye\\b', re.IGNORECASE)), ('Rye',
re.compile('\\bRye\\b', re.IGNORECASE)), ('Brandy', re.compile(
'\\bBrandy\\b', re.IGNORECASE)), ('Batch', re.compile('\\bBatch\\b', re
.IGNORECASE)), ('Barrel', re.compile('\\bBarrel\\b', re.IGNORECASE)), (
'Master', re.compile('\\bMasters?\\b', re.IGNORECASE)), ('Malt', re.
compile('\\bMalt\\b', re.IGNORECASE)), ('Maple', re.compile(
'\\bMaple\\b', re.IGNORECASE)), ('Wheat', re.compile('\\bWheat\\b', re.
IGNORECASE)), ('', re.compile('\\bWoodford\\b', re.IGNORECASE))],
'zSambuca': [('Romana Black', re.compile(
'\\bRomana.*\\bBlack\\b|\\bBlack\\s+Romana\\b', re.IGNORECASE)), (
'Romana', re.compile('\\bRomana\\b', re.IGNORECASE)), ('Di Amore', re.
compile('\\bdi Amore\\b', re.IGNORECASE))], 'zScotch Hibiki': [('12',
re.compile('\\b12\\s*YE?A?R\\b', re.IGNORECASE)), ('17 Limited', re.
compile('\\b17\\s*YE?A?R\\b.+Limited', re.IGNORECASE)), ('17', re.
compile('\\b17\\s*YE?A?R\\b', re.IGNORECASE)), ('21 Limited', re.
compile('\\b21\\s*YE?A?R\\b.+Limited', re.IGNORECASE)), ('21', re.
compile('\\b21\\s*YE?A?R\\b', re.IGNORECASE)), ('30', re.compile(
'\\b30\\s*YE?A?R\\b', re.IGNORECASE))]}
wineAbbrLookup = {'120-80': '\\bOne\\s+Twenty\\s+Over\\s+Eighty\\b',
'3Amigos': '\\bThree\\s+Amigos\\b', '3Palms': '\\bThree\\s+Palms\\b',
'3Sister': '\\bThree\\s+Sisters?\\b', '4Barrell':
'\\b4[\\-\\s]Barrels?\\b', 'Alex': '\\bAlexander\\b', 'And':
'\\bAnderson\\b', 'Car': '\\bCarneros\\b', 'Carries': '\\bCarrie', 'CC':
'\\bC\\.?C\\.?\\s+Ranch\\b', 'Clone4': '\\bClone\\s+4\\b', 'Clone6':
'\\bClone\\s+6\\b', 'Crossbarn': '\\bCross\\s+Barn\\b', 'Donna':
'\\bDonna', 'Est': '\\bEstate\\b', 'Estate': '\\bEst\\b', 'Gap':
'\\bGap|\\s%27Gap', 'Gary': '\\bGary', 'Julia': '\\bJulia', 'Knights':
'\\bKnight', 'KistlerVnyd': '\\bKistler (Vineyard|VYD|EST)\\b', 'LP':
'\\bLes Pierres\\b', 'Lyn': '\\bLyndenhur?st\\b', 'Mont':
'\\bMonterey\\b', 'Mt': '\\bMount\\b|\\bMt\\.\\b', 'Napa/Son':
'\\bNapa.*Son', 'Oak': '\\bOakville\\b', 'One-Pt-5':
'\\bOne\\s+Point\\s+Five\\b', 'Pomm': '\\bPommeraie\\b', 'Priv':
'\\bPrivate\\b', 'RR': '\\bRussian\\s+Rivers?\\b|RRV', 'RRR':
'\\bRussian\\s+Rivers?\\b|RRV', 'Res':
'\\bReserve\\b|\\bRsv\\b|\\bResrv\\b|\\bReserv\\b|\\bReserve$', 'Rose':
'\\bRosé|\\bROS&EACUTE;|\\bRos%E9', 'Ruth': '\\bRutherford\\b',
'Sandy': '\\bSandy', 'Samanthas': '\\bSamantha', 'SC':
'\\bSanta\\s+Cruz\\b', 'SLD': '\\bStag.*Leap\\b', 'SLH':
'\\bSanta\\s+Lucia\\b', 'SMV': '\\bSanta\\s+Maria|\\bS\\s+Maria', 'SRH':
'\\bSTA\\.?|\\bSANTA\\s+Rita\\b|\\bSTA\\sRITA\\sHILLS|\\bS\\s+RITA\\b',
'SS': '\\bSpecial\\s+\\Selection\\b', 'Stage': '\\bStagecoach\\b',
'Son': '\\bSonoma\\b', 'SYV': '\\bSanta\\s+Ynez\\s+Valley\\b', 'TD9':
'\\bTD\\s+9\\b|\\bTD-9\\b', 'Terraces': '\\bTerrace', 'TheCutrer':
'\\bThe Cutrer\\b|nnay Cutrer\\b', 'Tok':
'\\bTo[\\s\\-]?Kolan|\\bTo[\\s\\-]?Kalon', 'Turn4': '\\bTurn\\s+4\\b',
'Vernas': '\\bVerna', 'Vine': '\\bVines\\b', 'Yount':
'\\bYountville\\b', 'ZThree': '\\bZ.*\\bThree\\b', 'ZCuvee':
'\\bZ.*\\bCuvee\\b|\\bCuvee Z\\b', 'Agustina': '\\bAugustina\\b',
'Durell': '\\bDurrell\\b', 'Benchland': '\\bBenchlands\\b', 'Pritchard':
'\\bPitchard\\b'}
reShipsAs = re.compile('\\(ships?\\s', re.IGNORECASE)
defaultorderlist = [['Tok'], ['Oak'], ['Res'], ['RR'], ['Landslide'], [
'Yount'], ['RRR'], ['Son'], ['Ruth'], ['Napa'], ['Helena'], ['SRH'], [
'SLH'], ['SMV'], ['SLD'], ['Paso'], ['Alex'], ['Single'], ['Estate']]
def globalVariableCheck(debug=False):
for liquor in liquorLookup:
if liquor in noGrapeLookup:
print(
'WARNING:liquorLookup regexs will never execute - they are in noGrapeLookup:'
, liquor)
if liquor in ignoreGrapeLookup:
print(
'WARNING:liquorLookup regexs will never execute - they are in ignoreGrapeLookup:'
, liquor)
for winery in ignoreGrapeLookup:
if winery in noGrapeLookup:
print(
'WARNING:ignoreGrapeLookup regexs will never execute - they are in noGrapeLookup:'
, winery)
def setOptionDictMasterFldValues(optiondict, debug=False):
for fld in ('fldWine', 'fldWineDescr'):
if not optiondict[fld + 'Master']:
optiondict[fld + 'Master'] = optiondict[fld]
def wineLookupByName(nameLookup, lookupStr, other, msg, wineAbbrLookup=None,
debug=False):
funcname = 'wineLookupByName:' + msg + ':'
if debug:
print(funcname + 'nameLookup:', nameLookup)
if nameLookup is None:
if debug:
print(funcname + 'match: value is none - continue on')
return ''
for name in nameLookup:
if debug:
print(funcname + 'match-name:', name)
if name is None:
if debug:
print(funcname +
'name-matched: value is none - continue on:pass back blank'
)
return ''
reName = re.compile('\\b' + name + '\\b', re.IGNORECASE)
if reName.search(lookupStr):
if debug:
print(funcname + 'name-MATCHED:', name)
for val in other:
if reName.search(val):
other.remove(val)
if debug:
print(funcname + 'name-remove-from-other:', val)
return name
if wineAbbrLookup and name in wineAbbrLookup:
reName = re.compile(wineAbbrLookup[name], re.IGNORECASE)
if debug:
print(funcname + 'Abbr-match-name:', name)
if reName.search(lookupStr):
if debug:
print(funcname + 'Abbr-name-MATCHED:', wineAbbrLookup[name]
)
for val in other:
if reName.search(val):
other.remove(val)
if debug:
print(funcname + 'name-remove-from-other:', val)
return name
if debug:
print(funcname + 'name match not found:set to blank')
return None
def findQualifier(wine, debug=False):
for val, reSearch in reQualLookup:
if reSearch.search(wine):
if debug:
print('findQualifier:matched-returning:', val)
return val
if debug:
print('findQualifier:no-match-returning:', None)
return None
def findWinery(rec, lastWinery, lastReWinery, fldWine, debug=False):
if lastWinery:
if debug:
try:
print('fw:new winery:', rec[fldWine])
except Exception as e:
print('debug error8-continuing:', str(e))
print('rec[fldWine]:type:', type(rec[fldWine]))
print('fw:checking if this is lastWinery:', lastWinery)
if lastReWinery.search(rec[fldWine]):
if debug:
print('fw:this matches the last winery')
return lastWinery, lastReWinery
elif debug:
print('fw:not last winery')
for winery, reWinery in wineryLookup:
if debug:
print('fw:not lastWinery-checking winery:', winery)
if fldWine not in rec:
print('not a column in this record fldWine:', fldWine)
print('rec:', rec)
if reWinery.search(rec[fldWine]):
if debug:
print('fw:winery match found:', winery)
return winery, reWinery
return None, None
def findLiquor(rec, winery, fldWine, debug=False):
for liquor, reLiquor in liquorLookup[winery]:
if debug:
print('fl:checking liquor:', liquor)
if reLiquor.search(rec[fldWine]):
if debug:
print('fl:liquor match found:', liquor)
return liquor, reLiquor
return None, None
def findGrapeByRegex(rec, fldWine, debug=False):
for grape, reGrape in grapeLookup:
if debug:
print('fgbr:grape:', grape)
if grape is not None and reGrape.search(rec[fldWine]):
if debug:
print('fgbr:grape match found:', grape)
return grape, reGrape
return None, None
def findStrInRecReturnOther(rec, fldWineDescr, findStr, debug=False):
matchLoc = rec[fldWineDescr].find(findStr)
if matchLoc > -1:
other = rec[fldWineDescr][matchLoc + len(findStr) + 1:].split()
if debug:
print('fsirro:findStr matched:', findStr)
if debug:
print('fsirro:findStr other:', other)
return findStr, other
if debug:
print('fsirro:findStr did not match using:', findStr)
return None, []
def findGrapeByStr(rec, fldWineDescr, debug=False):
for grape, reGrape in grapeLookup:
if debug:
print('fg:grape:', grape)
grape, other = findStrInRecReturnOther(rec, fldWineDescr, grape,
debug=debug)
if grape:
return grape, other
return None, []
def findVintage(rec, fldWine, debug=False):
for reVintage in vintageLookup:
m = reVintage.search(rec[fldWine])
if m:
if m.group(1):
vintage = m.group(1)
if debug:
print('fv:vintage-match:', reVintage, ':group1')
elif m.group(2):
vintage = m.group(2)
if debug:
print('fv:vintage-match:', reVintage, ':group2')
elif m.group(3):
vintage = m.group(3)
if debug:
print('fv:vintage-match:', reVintage, ':group3')
else:
vintage = m.group(4)
if debug:
print('fv:vintage-match:', reVintage, ':group4')
return vintage
return None
def buildWineryGrapeLookup(wines, fldWineDescr='winedescr', fldWine='wine',
debug=False):
wgLookup = {}
lastWinery = None
lastReWinery = None
for rec in wines:
if debug:
print('bwgl:new rec:', rec[fldWineDescr])
if not fldWineDescr in rec:
print('creating-field:', fldWineDescr)
rec[fldWineDescr] = ''
winery = grape = wine = liquor = None
other = []
lastWinery, lastReWinery = winery, reWinery = findWinery(rec,
lastWinery, lastReWinery, fldWine, debug=debug)
if not winery:
if debug:
print('bwgl:did not find winery-skipping:', rec[fldWine])
continue
if winery in ignoreGrapeLookup:
wine = ''
if debug:
print('bwgl:wine check ignoreGrapeLookup on winery:', winery)
elif winery in noGrapeLookup:
if debug:
print('bwgl:wine check noGrapeLookup on winery:', winery)
wine = wineLookupByName(noGrapeLookup[winery], rec[fldWineDescr
], [], 'noGrapeLookup', debug=debug)
if False and wine == '':
if debug:
print('bwgl:nograpelookup:no-match:set wine to None')
wine = None
elif winery in liquorLookup:
if debug:
print('bwgl:liquor check on winery:', winery)
liquor, reLiquor = findLiquor(rec, winery, fldWine, debug=debug)
if liquor is not None:
wine = liquor
if debug:
print('bwgl:liquor found and put in wine:', wine)
if wine is None:
if debug:
print('bwgl:grape check because wine is None')
grape, other = findGrapeByStr(rec, fldWineDescr)
if debug:
print('bwgl:grape:', grape, ':other:', other)
elif debug:
print('bwgl:grape check skipped - we have a wine')
if wine is None and grape is None:
if debug:
print('bwgl:record skipped - no grape or wine defined')
continue
if grape is None:
if debug:
print('bwgl:build other from winery')
wineryFind, other = findStrInRecReturnOther(rec, fldWineDescr,
winery, debug=debug)
if 'case' in other:
other.remove('case')
if debug:
print('bwgl:remove case from other')
if other:
if debug:
print('bwgl:looking at other for quals, bottlesize and vintage'
)
if not other[-1].isdigit():
for qual, reQual in reQualLookup:
if qual == other[-1]:
if debug:
print('bwgl:remove qualifier from other:', qual)
del other[-1]
break
if other and not other[-1].isdigit():
for size, reSize in sizeLookup:
if size == other[-1]:
if debug:
print('bwgl:remove bottlesize from other:', size)
del other[-1]
break
if other and other[-1].isdigit():
if winery in ignoreGrapeLookup and ignoreGrapeLookup[winery
] and other[-1] in ignoreGrapeLookup[winery]:
if debug:
print(
'bwgl:value is in ignoreLookupGrape - keeping it:',
other[-1])
else:
if debug:
print('bwgl:remove vintage from other:', other[-1])
del other[-1]
if wine and wine in other:
other.remove(wine)
if debug:
print('bwgl:remove wine from other:', wine)
if debug:
try:
print('bwgl:Final-Build:', winery, ':', grape, ':', wine,
':', liquor, ':', other, ':', rec[fldWineDescr], ':',
rec[fldWine])
except Exception as e:
print('debug error2-continuing:', str(e))
print('fldWine:', fldWine)
if grape is None and wine is not None:
grape = wine
if debug:
print('bwgl:set-grape-to-wine:', grape)
if debug:
print('bwgl:create wgLookup for winery:', winery, ':grape:', grape)
if winery not in wgLookup:
wgLookup[winery] = {grape: []}
elif grape not in wgLookup[winery]:
wgLookup[winery][grape] = []
if other and other not in wgLookup[winery][grape]:
wgLookup[winery][grape].append(other)
if debug:
print('bwgl:appending to wgLookup:other:', other)
if debug:
print('bwgl:complete-read-of-master-file:sort wgLookup')
for winery in wgLookup:
for grape in wgLookup[winery]:
wgLookup[winery][grape] = sorted(wgLookup[winery][grape], key=
len, reverse=True)
if debug:
print('\n' * 5)
print('START WGLOOKUP DUMPED')
print('#' * 80)
if ppFlag:
pp.pprint(wgLookup)
else:
print('bwgl:final-wgLookup:\n', wgLookup)
print('#' * 80)
return wgLookup
def findAddAttribWgLookup(rec, winery, value, fldWine, AbbrLookup=[],
defaultorderlist=None, valueDescr='', debug=False):
singlematch = []
if debug:
try:
print('faawl:value:', valueDescr, ':match-wgLookup:', rec[
fldWine], ':', wgLookup[winery][value])
except Exception as e:
print('debug error7-continuing:', str(e))
print('fldWine:', fldWine)
for valuematchset in wgLookup[winery][value]:
if debug:
print('faawl:testing valuematchset:', valuematchset, ':length:',
len(valuematchset))
allmatch = True
for valuematch in valuematchset:
reMatch1 = re.compile('\\b' + valuematch + '\\b', re.IGNORECASE)
reMatch2 = re.compile('\\s' + valuematch + '\\s', re.IGNORECASE)
m1 = reMatch1.search(rec[fldWine])
m2 = reMatch2.search(rec[fldWine])
if m1 or m2:
allmatch = True and allmatch
elif valuematch in AbbrLookup:
if debug:
print('faawl:valuematch-abbr:', valuematch, ':',
wineAbbrLookup[valuematch])
reMatch = re.compile(wineAbbrLookup[valuematch], re.IGNORECASE)
allmatch = reMatch.search(rec[fldWine]) and allmatch
else:
allmatch = False and allmatch
if debug:
print('faawl:valuematch:', valuematch, ':allmatch:', allmatch)
if allmatch:
if debug:
print('faawl:value matched:', valuematchset)
if len(valuematchset) == 1:
if debug:
print('faawl:single-valuematch-set-added-to-singlematch:',
valuematchset)
singlematch.append(valuematchset)
else:
if debug:
print('faawl:multivalue-valuematch-set-found:done')
return valuematchset
if not singlematch:
if debug:
print('faawl:exit with singlematch NOT populated return blank')
return []
if debug:
print('faawl:exit with singlematch populated:', singlematch)
if len(singlematch) == 1 or not defaultorderlist:
if debug:
print('faawl:return first entry in singlematch:', singlematch[0])
return singlematch[0]
defaultorder = defaultorderlist[:]
if debug:
print('faawl:multiple single match value-singlematch:', singlematch)
for val in singlematch[::-1]:
if val not in defaultorder:
defaultorder.insert(0, val)
if winery == 'Mondavi' and ['Tok'] in singlematch:
if debug:
print('faawl:Change from:', valuematchset, ':to Tok for mondavi')
return ['Tok']
for val in defaultorder:
if val in singlematch:
if debug:
print('faawl:selected-singlematch-value:', val)
return val
if debug:
print('faawl:valuematchset-empty')
return []
def setWineryDescrFromWineryGrapeLookup(wgLookup, wines, fldWineDescr=
'winedescr', fldWine='wine', fldWineDescrNew='winedescrnew',
fldWineDescrMatch=False, debug=False):
if debug:
print('\n' * 10,
'START WINEDESCR SETTING HERE ---------------------------------------------'
)
for rec in wines:
(winery) = (grape) = (wine) = (vintage) = (case) = (size) = (liquor
) = (nongrape) = (qual) = None
winematchset = grapematchset = []
if debug:
try:
print('setWinery:fldWine:', rec[fldWine])
except Exception as e:
print('debug error2-continuing:', str(e))
print('fldWine:', fldWine)
if fldWineDescrNew not in rec:
rec[fldWineDescrNew] = rec[fldWineDescr]
winery, reWinery = findWinery(rec, None, None, fldWine, debug=debug)
if winery is None:
if debug:
print('setWinery:winery not found-next record:' + rec[fldWine])
continue
elif winery not in wgLookup:
if debug:
print('setWinery:winery not in wgLookup:', winery)
continue
grape, reGrape = findGrapeByRegex(rec, fldWine, debug=debug)
if debug:
print('setWinery:grape found:', grape)
if winery in ignoreGrapeLookup:
if debug:
print(
'setWinery:winery-match-ignoreGrape:clear-wine:set-grape-to-None:set-nongrape-True:winery:'
, winery)
wine = ''
grape = None
nongrape = True
if winery in noGrapeLookup:
if debug:
print('setWinery:noGrapeLookup wine check:', winery)
wine = wineLookupByName(noGrapeLookup[winery], rec[fldWine], [],
'noGrapeLookup', wineAbbrLookup, debug=debug)
if debug:
print('setWinery:nogrape check:wine:', wine)
if wine == '':
if debug:
print(
'setWinery:noGrapeLookup:matched:None::clear grape:set nongrape to True'
)
grape = None
wine = ''
nongrape = True
elif wine:
grape = None
if debug:
print(
'setWinery:nograpeLookup:wine found - clear grape field'
)
if wine is None and winery in liquorLookup:
if debug:
print('setWinery:liqourLookup:', winery)
liquor, reLiquor = findLiquor(rec, winery, fldWine, debug=debug)
if liquor is not None:
wine = liquor
if debug:
print('setWinery:liquorLookup-match:', liquor)
if not grape and not nongrape and not wine and liquor is None:
if debug:
print('setWinery:did not find grape-skipping record:', rec[
fldWineDescr])
continue
if debug:
print('setWinery:pre-vintage found values for wine/liquor:',
wine, ':grape:', grape)
vintage = findVintage(rec, fldWine, debug=debug)
if debug:
print('setWinery:vintage:', vintage)
if reCase.search(rec[fldWine]):
case = 'case'
for size, reSize in sizeLookup:
if debug:
print('setWinery:sizeLookup:', size)
if reSize.search(rec[fldWine]) and not reShipsAs.search(rec[
fldWine]):
if debug:
print('setWinery:sizeLookup:matched:', reSize)
break
else:
size = None
if debug:
print('setWinery:sizeLookup:None-found')
qual = findQualifier(rec[fldWine], debug=debug)
if debug:
try:
print('setWinery:FinalAttributes:', winery, ':', grape, ':',
wine, ':', liquor, ':', vintage, ':', case, ':', size,
':', qual, ':', rec[fldWine])
except Exception as e:
print('debug error5-continuing:', str(e))
print('fldWine:', fldWine)
if liquor is not None:
if debug:
print(
'setWinery:liquor flag set - no additional data needs to be collected'
)
elif wine is not None:
if debug:
print(
'setWinery:wine is not None - do additional lookups:wine:',
wine)
if wine in wgLookup[winery] and wgLookup[winery][wine]:
if debug:
print('setWinery:lookup winematchset')
winematchset = findAddAttribWgLookup(rec, winery, wine,
fldWine, wineAbbrLookup, None, valueDescr='wine', debug
=debug)
else:
print('setWinery:unable to perform wgLookup on winery:',
winery, ':wine:', wine, ':rec-wine:', rec[fldWine])
if debug:
try:
print('wgLookup[winery]:', wgLookup[winery])
except Exception as e:
print('debug error3-continuing:', str(e))
print('winery:', winery)
if debug:
print('setWinery:winematchset:', winematchset)
elif grape is not None:
if debug:
print('setWinery:grape is not None - do additional lookups:',
grape)
if grape in wgLookup[winery] and wgLookup[winery][grape]:
grapematchset = findAddAttribWgLookup(rec, winery, grape,
fldWine, wineAbbrLookup, defaultorderlist, valueDescr=
'grape', debug=debug)
elif grape in wgLookup[winery]:
if debug:
print(
'setWinery:grape match: matching record set is blank - no action required'
)
else:
print('setWinery:grape NONMATCH:', rec[fldWine])
if debug:
print('setWinery:liquor:', liquor, ':wine:', wine,
':grape:', grape, ':wgLookup[winery]:', wgLookup[
winery])
if debug:
print('setWinery:grapematchset:', grapematchset)
if vintage:
newVintageLookupWine = rec[fldWine]
for matchvalue in winematchset:
if vintage in matchvalue:
newVintageLookupWine = newVintageLookupWine.replace(
matchvalue, '')
if debug:
print(
'setWinery:2nd-vintage:winematchset:wine-name-removal:'
, matchvalue)
for matchvalue in grapematchset:
if vintage in matchvalue:
newVintageLookupWine = newVintageLookupWine.replace(
matchvalue, '')
if debug:
print(
'setWinery:2nd-vintage:grapematchset:wine-name-removal:'
, matchvalue)
if newVintageLookupWine != rec[fldWine]:
if debug:
print('setWinery:2nd-vintage:newVintageLookupWine:',
newVintageLookupWine)
newVintage = findVintage({fldWine: newVintageLookupWine},
fldWine, debug=debug)
if debug:
print('setWinery:2nd-vintage:newVintage:', newVintage)
vintage = newVintage
wineDescr = ''
if winery.startswith('z'):
vintage = None
if debug:
print('setWinery:winery starts with z: clear vintage')
if winematchset and ' '.join(winematchset) in wine:
if debug:
print('setWinery:clearing-winematchset:', winematchset,
':is-in-wine:', wine)
winematchset = []
if grapematchset and ' '.join(grapematchset) in grape:
if not (len(grapematchset) == 1 and len(grapematchset[0]) == 1):
if debug:
print('setWinery:clearing-grapematchset:',
grapematchset, ':is-in-grape:', grape)
grapematchset = []
if grapematchset and size and size in ' '.join(grapematchset):
size = ''
if winematchset and size and size in ' '.join(winematchset):
size = ''
if debug:
print('setWinery:vallist1:', [winery, grape, wine] +
grapematchset + winematchset + [vintage, size, qual, case])
print('setWinery:vallist2:', [winery, grape, wine, *
grapematchset, *winematchset, vintage, size, qual, case])
wdList = []
for val in ([winery, grape, wine] + grapematchset + winematchset +
[vintage, size, qual, case]):
if val:
wdList.append(val)
wineDescr = ' '.join(wdList)
if False:
if debug:
print('setWinery:wdList:', wdList)
if debug:
print('setWinery:wineDescr:', wineDescr)
if debug:
try:
print(':'.join(['setWinery:wineDescrList', wineDescr, rec[
fldWineDescr], str(wineDescr == rec[fldWineDescr]), rec
[fldWine]]))
except Exception as e:
print('debug error6-continuing:', str(e))
print('fldWine:', fldWine)
rec[fldWineDescrNew] = wineDescr
if fldWineDescrMatch:
rec[fldWineDescrMatch] = rec[fldWineDescr] == rec[fldWineDescrNew]
def setDigitFld2Value(wines, fld, value, debug=False):
for rec in wines:
if rec[fld].isdigit():
rec[fld] = value
def updateFileOptionDictCheck(optiondict, wines, header, debug=False):
if optiondict['fldWineDescr'] not in wines[0]:
if debug:
print('updateFileOptionDictCheck:fldWineDescr NOT in file read in:'
, optiondict['fldWineDescr'])
if 'cnt' in wines[0]:
print('setting values fldWineDescr and fldWineDescrNew to: cnt')
optiondict['fldWineDescr'] = optiondict['fldWineDescrNew'] = 'cnt'
elif 'winedescr' in wines[0]:
print(
'setting values fldWineDescr to winedescr and fldWineDescrNew to winedescrnew'
)
optiondict['fldWineDescr'] = 'winedescr'
optiondict['fldWineDescrNew'] = 'winedescrnew'
else:
print('could not find fldWineDescr in wines[0]-aborting:',
optiondict['fldWineDescr'], '\nwines[0]:', wines[0])
error = wines[0][optiondict['fldWineDescr']]
if False and optiondict['fldWineDescr'] == 'winedescr':
if not optiondict['fldWineDescrMatch']:
optiondict['fldWineDescrMatch'] = 'same'
print('setting value fldWineDescrMatch to: same')
if optiondict['csvfile_update_in'] == optiondict['csvfile_update_out']:
file_path, base_filename, file_ext = kvutil.filename_split(optiondict
['csvfile_update_in'])
backupfile = kvutil.filename_proper(base_filename + optiondict[
'backupfile_ext'], file_path)
print('copying ', optiondict['csvfile_update_in'], ' to ', backupfile)
shutil.copyfile(optiondict['csvfile_update_in'], backupfile)
if optiondict['fldWineDescrNew'] == 'cnt':
optiondict['csvdictkeys'] = ['cnt', 'date', 'search', 'store',
'wine', 'winesrt']
elif optiondict['fldWineDescrMatch']:
optiondict['csvdictkeys'] = [optiondict['fldWineDescr'], optiondict
['fldWineDescrNew'], optiondict['fldWineDescrMatch'], *header]
else:
optiondict['csvdictkeys'] = [optiondict['fldWineDescrNew']] + header[1:
]
print('updateFileOptionDictCheck:set csvdictkeys to:', optiondict[
'csvdictkeys'])
if __name__ == '__main__':
optiondict = kvutil.kv_parse_command_line(optiondictconfig, debug=False)
ppFlag = optiondict['pprint']
setOptionDictMasterFldValues(optiondict, debug=False)
if optiondict['setup_check']:
print('Running global variable check')
globalVariableCheck(debug=optiondict['debug'])
sys.exit()
print('reading in master file:', optiondict['csvfile_master_in'])
wines, header = kvcsv.readcsv2list_with_header(optiondict[
'csvfile_master_in'], headerlc=True)
wgLookup = buildWineryGrapeLookup(wines, optiondict[
'fldWineDescrMaster'], optiondict['fldWineMaster'], debug=
optiondict['debug'])
if optiondict['csvfile_master_in'] != optiondict['csvfile_update_in']:
print('reading in update file:', optiondict['csvfile_update_in'])
wines, header = kvcsv.readcsv2list_with_header(optiondict[
'csvfile_update_in'], headerlc=True)
if not wines:
print(
'wineset.py - no records read in - no work to be done - exitting'
)
sys.exit()
updateFileOptionDictCheck(optiondict, wines, header, debug=optiondict[
'debug'])
setWineryDescrFromWineryGrapeLookup(wgLookup, wines, optiondict[
'fldWineDescr'], optiondict['fldWine'], optiondict[
'fldWineDescrNew'], optiondict['fldWineDescrMatch'], debug=
optiondict['debug'])
if optiondict['defaultnew'] is not None:
print('Setting ', optiondict['fldWineDescrNew'], ' to ', optiondict
['defaultnew'], 'if not set')
setDigitFld2Value(wines, optiondict['fldWineDescrNew'], optiondict[
'defaultnew'], debug=optiondict['debug'])
kvcsv.writelist2csv(optiondict['csvfile_update_out'], wines, optiondict
['csvdictkeys'])
print('Saved results to:', optiondict['csvfile_update_out'])
<|reserved_special_token_1|>
'''
@author: Ken Venner
@contact: [email protected]
@version: 1.13
Read in a file of wine names and create consistent wine descriptions
from these names.
'''
import kvutil
import kvcsv
import re
import sys
import shutil
# may comment out in the future
import pprint
pp = pprint.PrettyPrinter(indent=4)
ppFlag = False
# application variables
optiondictconfig = {
'AppVersion' : {
'value' : '1.13',
'description' : 'defines the version number for the app',
},
'debug' : {
'value' : False,
'type' : 'bool',
'description' : 'defines if we are running in debug mode',
},
'verbose' : {
'value' : 1,
'type' : 'int',
'description' : 'defines the display level for print messages',
},
'setup_check' : {
'value' : False,
'type' : 'bool',
'description' : 'defines if we checking out setup',
},
'pprint' : {
'value' : False,
'type' : 'bool',
'description' : 'defines if we output with pretty print when debugging',
},
'csvfile_master_in' : {
'value' : 'wine_xref.csv',
'description' : 'defines the name of the master data input file',
},
'csvfile_update_in' : {
'value' : 'wineref.csv',
'description' : 'defines the name of the input file to updated',
},
'csvfile_update_out' : {
'value' : 'wineref2.csv',
'description' : 'defines the name of the updated output file',
},
'fldWine' : {
'value' : 'wine',
'description' : 'defines the name of the field that holds the Wine ',
},
'fldWineDescr' : {
'value' : 'winedescr',
'description' : 'defines the name of the field holding the wine description',
},
'fldWineDescrNew' : {
'value' : 'winedescrnew',
'description' : 'defines the name of the NEW field holding the new description ',
},
'fldWineDescrMatch' : {
'value' : None,
'description' : 'defines the name of the NEW field holding the results of comparison existing to new description ',
},
'fldWineMaster' : {
'value' : None,
'description' : 'defines the name of the field that holds the Wine when reading the master file ',
},
'fldWineDescrMaster' : {
'value' : None,
'description' : 'defines the name of the field holding the wine description when reading the master file',
},
'backupfile_ext' : {
'value' : '.bak',
'description' : 'defines the extension to use to copy the update input file to if we are replacing it with output',
},
'defaultnew' : {
'value' : None,
'description' : 'defines if we should take field fldWineDescrNew and set to a value if not set',
},
}
### GLOBAL VARIABLES / LOOKUPS ########################################
# regex search for vintage in wine name
vintageLookup = (
re.compile('\d\d\d\d\s+\d\d(\d\d)'), # two years together - get this one over early
re.compile('^\d\d(\d\d)'), # four position start of line
re.compile('\s\d\d(\d\d)$'), # four position end of line
re.compile('\s\d\d(\d\d)\s'), # four position middle of line
re.compile('XX\d\d(\d\d)\s'), # four position middle of line
re.compile('\s\d\d(\d\d)\/'), # four position split
re.compile('\s\'?(\d\d)\'?$|\s\'?(\d\d)\'?\s'), # two position date with optional apostrophe front or back
)
# regex search for case in wine name
reCase = re.compile(r'12\s*X\s*750\s*ML|\bcase\b|12\/750\s*ML',re.IGNORECASE)
# regex to pick up qualifiers from the wine
reQualLookup = (
(None, re.compile(r'\bWithout\s+Gift\b|\bNo\s+Gift', re.IGNORECASE)), # the none gift do them first
('Gift', re.compile(r'\bGift\b', re.IGNORECASE)),
('VAP', re.compile(r'\bVAP\b', re.IGNORECASE)),
('VAP', re.compile(r'\bGlassVAP\b', re.IGNORECASE)),
('Glass', re.compile(r'\bGlass\b', re.IGNORECASE)),
('Glass', re.compile(r'\bGlasses\b', re.IGNORECASE)),
('Etch', re.compile(r'\bEtch\b', re.IGNORECASE)),
('Basket', re.compile(r'\bBasket\b', re.IGNORECASE)),
)
# regex search to define the size of the wine bottle
sizeLookup = (
('1.75L', re.compile(r'\b1\.75\s*Li?|\b1\.75$', re.IGNORECASE)),
('1.5L', re.compile(r'\b1\.5\s*L?\b|\bMagnum\b', re.IGNORECASE)),
('375mL', re.compile(r'Half\s+Bottle|375ml', re.IGNORECASE)),
('200mL', re.compile(r'\b200\s*ML|\(200\s*ML', re.IGNORECASE)),
('50mL', re.compile(r'\b50\s*ML|\(50\s*ML', re.IGNORECASE)),
('500mL', re.compile(r'\b500\s*ML|\(500\s*ML', re.IGNORECASE)),
('3L', re.compile(r'\b3\s*Li?', re.IGNORECASE)),
('6L', re.compile(r'\b6\s*Li?', re.IGNORECASE)),
('9L', re.compile(r'\b9\s*Li?', re.IGNORECASE)),
('1L', re.compile(r'\b1L\b|\b1\s+L$|\b1.0\s*L\b|\b1\s+Liter\b|\bOne\s+Liter\b|\bLITER\b|\b1\s*LTR', re.IGNORECASE)),
)
# regex extract winery names from the wine field
wineryLookup = (
('Alban', re.compile(r'\bAlban\b', re.IGNORECASE)),
('Arrowood', re.compile(r'\bArrowood\b', re.IGNORECASE)),
('Atalon', re.compile(r'\bAtalon\b', re.IGNORECASE)),
('Attune', re.compile(r'\bAttune\b', re.IGNORECASE)),
('Auteur', re.compile(r'\bAuteur\b', re.IGNORECASE)),
('Austin Hope', re.compile(r'\bAustin\s+Hope\b', re.IGNORECASE)),
('Badge', re.compile(r'\bBadge\b', re.IGNORECASE)),
('Balletto', re.compile(r'\bBalletto\b', re.IGNORECASE)),
('Bell', re.compile(r'\bBell\s+Cellar', re.IGNORECASE)),
('BR Cohn', re.compile(r'\bB\.?\s?R\.?\s+Cohn\b', re.IGNORECASE)),
('Bremer', re.compile(r'\bBremer\b', re.IGNORECASE)),
('Brewer-Clifton', re.compile(r'\bBrewer[\s\-]Clifton\b', re.IGNORECASE)),
('BV', re.compile(r'\bBeaulieu\s+V|\bBV\b', re.IGNORECASE)),
('Belle Glos', re.compile(r'\bBelle\s+Glos\b', re.IGNORECASE)),
('Bennett Ln', re.compile(r'\bBennet+\sLane\b', re.IGNORECASE)),
('Benovia', re.compile(r'\bBenovia\b', re.IGNORECASE)),
('Beringer', re.compile(r'\bBeringer\b', re.IGNORECASE)),
('Blackstone', re.compile(r'\bBlackstone\b', re.IGNORECASE)),
('Brancott', re.compile(r'\bBrancott\b', re.IGNORECASE)),
('Cade', re.compile(r'\bCade\b', re.IGNORECASE)),
('Cain Five', re.compile(r'\bCain\s+Five\b|\bCain\s-\sFive\b|\bCain\s5\b|\bCainFive\b', re.IGNORECASE)),
('Cakebread', re.compile(r'\bCakebread\b', re.IGNORECASE)),
('Cardinale', re.compile(r'\bCardinale\b', re.IGNORECASE)),
('Caymus', re.compile(r'\bCaymus\b', re.IGNORECASE)),
('Chappellet', re.compile(r'\bChappellet\b', re.IGNORECASE)),
('Chalk Hill', re.compile(r'\bChalk\s+Hill\b', re.IGNORECASE)),
('Clos Du Bois', re.compile(r'\bClos\s+Du\s+Bois\b', re.IGNORECASE)),
('ClosDuVal', re.compile(r'\bClos\s+du\s+Val\b', re.IGNORECASE)),
('Colgin', re.compile(r'\bColgin\b', re.IGNORECASE)),
('Concha Don Melchor', re.compile(r'\bConcha\s.*Don\s+Melchor\b|Don\s+Melchor\b', re.IGNORECASE)),
('Continuum', re.compile(r'\bContinuum\b', re.IGNORECASE)),
('Corison', re.compile(r'\bCorison\b', re.IGNORECASE)),
('Cristal', re.compile(r'Roederer\s?.*Cristal\b|\bCristal\b.+Brut', re.IGNORECASE)),
('Curran', re.compile(r'\bCurran\b', re.IGNORECASE)),
('Darioush', re.compile(r'\bDarioush\b', re.IGNORECASE)),
('Darioush', re.compile(r'\bCaravan\b', re.IGNORECASE)),
('David Arthur', re.compile(r'\bDavid\s+Arthur\b', re.IGNORECASE)),
('David Bruce', re.compile(r'\bDavid\s+Bruce\b', re.IGNORECASE)),
('Davis Family', re.compile(r'\bDavis\s+Family\b', re.IGNORECASE)),
('Del Dotto', re.compile(r'\bDel\s+Dotto\b', re.IGNORECASE)),
('Dominus', re.compile(r'\bDominus\b', re.IGNORECASE)),
('Goldeneye', re.compile(r'\bGoldeneye\b', re.IGNORECASE)), # before duckhorn
('Paraduxx', re.compile(r'\bParaduxx\b', re.IGNORECASE)), # before duckhorn
('Domaine Carneros', re.compile(r'\bDomaine\s+Carneros\b', re.IGNORECASE)),
('Dominus', re.compile(r'\Dominus\b', re.IGNORECASE)),
('Drappier', re.compile(r'\bDrappier\b', re.IGNORECASE)),
('Duckhorn', re.compile(r'\bDuckhorn\b', re.IGNORECASE)),
('Dumol', re.compile(r'\bDumol\b', re.IGNORECASE)),
('Dunn', re.compile(r'\bDunn\b', re.IGNORECASE)),
('Ehlers', re.compile(r'\bEhlers\b', re.IGNORECASE)),
('Etude', re.compile(r'\bEtude\b', re.IGNORECASE)),
('Far Niente', re.compile(r'\bFar Niente\b', re.IGNORECASE)),
('Flora', re.compile(r'\bFlora\s+Springs\b', re.IGNORECASE)),
('Flowers', re.compile(r'\bFlowers\b', re.IGNORECASE)),
('Robert Foley', re.compile(r'\bRobert\s+\bFoley\b', re.IGNORECASE)), #before Foley
('Foley', re.compile(r'\bFoley\b', re.IGNORECASE)),
('Foxen', re.compile(r'\bFoxen\b', re.IGNORECASE)),
('Franciscan', re.compile(r'\bFranciscan\b', re.IGNORECASE)),
('Frank Family', re.compile(r'\bFrank Family\b', re.IGNORECASE)),
('Gary Farrell', re.compile(r'\bGary\s+Farrel+\b', re.IGNORECASE)),
('Ghost Block', re.compile(r'\bGhost\s+Block\b', re.IGNORECASE)),
('Grgich', re.compile(r'\bGrgich\b', re.IGNORECASE)),
('Groth', re.compile(r'\bGroth\b', re.IGNORECASE)),
('Gundlach', re.compile(r'\bGundlach\b', re.IGNORECASE)),
('Hansel', re.compile(r'\bHansel\b', re.IGNORECASE)),
('Hanzell', re.compile(r'\bHanzell\b', re.IGNORECASE)),
('Hess', re.compile(r'\bHess\b', re.IGNORECASE)),
('Hewitt', re.compile(r'\bHewitt\b', re.IGNORECASE)),
('Hobbs', re.compile(r'\bHobbs\b|\bcrossbarn\b', re.IGNORECASE)),
('Hundred Acre', re.compile(r'\bHundred\s+Acre\b', re.IGNORECASE)),
('Jordan', re.compile(r'\bJordan\b', re.IGNORECASE)),
('Justin', re.compile(r'\bJustin\b', re.IGNORECASE)),
('Kim Crawford', re.compile(r'\bKim\s+Crawford\b', re.IGNORECASE)),
('Kistler', re.compile(r'\bKistler\b', re.IGNORECASE)),
('Kosta', re.compile(r'\bKosta\s+Browne?\b', re.IGNORECASE)),
('Krug', re.compile(r'\bKrug\b', re.IGNORECASE)),
('Kunde', re.compile(r'\bKunde\b', re.IGNORECASE)),
('LaCrema', re.compile(r'\bLa\s?Crema\b', re.IGNORECASE)),
('Lewis', re.compile(r'\bLewis\b', re.IGNORECASE)),
('Lokoya', re.compile(r'\bLokoya\b', re.IGNORECASE)),
('Meiomi', re.compile(r'\bMeiomi\b', re.IGNORECASE)),
('Melville', re.compile(r'\bMelville\b', re.IGNORECASE)),
('Momento Mori', re.compile(r'\bMomento\s+Mori\b', re.IGNORECASE)),
('Mondavi', re.compile(r'\bMondavi\b', re.IGNORECASE)),
('Montelena', re.compile(r'\bMontelena\b', re.IGNORECASE)),
('Mt Veeder', re.compile(r'^Mount\s+Veeder\b|^Mt\.? Veeder\b|\d+\s+M[^t]*t\s+Veeder\b', re.IGNORECASE)),
('Newton', re.compile(r'\bNewton\b', re.IGNORECASE)),
('Nickel', re.compile(r'\bNickel\b', re.IGNORECASE)),
('Opus One', re.compile(r'\bOpus\s+One\b', re.IGNORECASE)),
('P Togni', re.compile(r'\bTogni\b', re.IGNORECASE)),
('Pahlmeyer Jayson', re.compile(r'\bJayson\b', re.IGNORECASE)), # this before pahlmeyer
('Pahlmeyer', re.compile(r'\bPahlmeyer\b(?!\s*Jay)', re.IGNORECASE)),
('Papillon', re.compile(r'\bPapillon\b', re.IGNORECASE)),
('Patz', re.compile(r'\bPatz\b', re.IGNORECASE)),
('Phelps', re.compile(r'\bPhelps\b', re.IGNORECASE)),
('Plumpjack', re.compile(r'\bPlumpjack\b', re.IGNORECASE)),
('Pride', re.compile(r'\bPride\b', re.IGNORECASE)),
('Prisoner', re.compile(r'\bPrisoner\b', re.IGNORECASE)),
('Provenance', re.compile(r'\bProvenance\b', re.IGNORECASE)),
('R Sinskey', re.compile(r'\bSinskey\b', re.IGNORECASE)),
('Ramey', re.compile(r'\bRamey\b', re.IGNORECASE)),
('Revana', re.compile(r'\bRevana\b', re.IGNORECASE)),
('Raptor', re.compile(r'\bRaptor\s+Ridge\b', re.IGNORECASE)),
('Revana', re.compile(r'\bRevana\b', re.IGNORECASE)),
('Ridge', re.compile(r'\bRidge\b', re.IGNORECASE)),
('Robert Foley', re.compile(r'\bRobert\s+Foley\b', re.IGNORECASE)),
('Rombauer', re.compile(r'\bRombauer\b', re.IGNORECASE)),
('Rudd', re.compile(r'\bRudd\b', re.IGNORECASE)),
('Scarecrow', re.compile(r'\bScarecrow\b', re.IGNORECASE)),
('Sea Smoke', re.compile(r'\bSea\s+Smoke\b', re.IGNORECASE)),
('Seghesio', re.compile(r'\bSeghesio\b', re.IGNORECASE)),
('Shafer', re.compile(r'\bShafer\b', re.IGNORECASE)),
('Sherwin', re.compile(r'\bSherwin\b', re.IGNORECASE)),
('Silver Oak', re.compile(r'\bSilver\s+Oak\b', re.IGNORECASE)),
('Silverado', re.compile(r'\bSilverado\b', re.IGNORECASE)),
('Simi', re.compile(r'\bSimi\b', re.IGNORECASE)),
('Sonoma Cutrer', re.compile(r'\bCutrer\b', re.IGNORECASE)),
('Spottswoode', re.compile(r'\bSpottswoode\b', re.IGNORECASE)),
('Stag Leap', re.compile(r'\bStag.*\sLeap\b', re.IGNORECASE)),
('Sullivan', re.compile(r'\bSullivan\b', re.IGNORECASE)),
('Summerland', re.compile(r'\bSummerland\b', re.IGNORECASE)),
('Summers', re.compile(r'\bSummers\b', re.IGNORECASE)),
('Tantara', re.compile(r'\bTantara\b', re.IGNORECASE)),
('Turnbull', re.compile(r'\bTurnbull\b', re.IGNORECASE)),
('Veuve', re.compile(r'\bVeuve\b', re.IGNORECASE)),
('Viader', re.compile(r'\bViader\b', re.IGNORECASE)),
('Waterstone', re.compile(r'\bWaterstone\b', re.IGNORECASE)),
('Whitehall', re.compile(r'\bWhitehall\b', re.IGNORECASE)),
('Wm Selyem', re.compile(r'\bWilliams\s*\-?Selyem\b', re.IGNORECASE)),
('ZD', re.compile(r'\bZD\b', re.IGNORECASE)),
('Zaca', re.compile(r'\bZaca\b', re.IGNORECASE)),
('zBourbon Woodford Res', re.compile(r'\bWoodford\s+Reserve\b', re.IGNORECASE)),
('zBourbon Woodford Res', re.compile(r'\bWoodford\s+Rsv\b', re.IGNORECASE)),
('zCognac Courvoisier', re.compile(r'\bCourvoisier\b', re.IGNORECASE)),
('zCognac Hennessy', re.compile(r'\bHennesse?y\b', re.IGNORECASE)),
('zCognac Remy', re.compile(r'\bRemy\s+Martin\b|\bRemy\s+Louis', re.IGNORECASE)),
('zCointreau', re.compile(r'\bCointreau\b', re.IGNORECASE)),
('zGin Hendrick', re.compile(r'\bHendrick', re.IGNORECASE)),
('zGin Tanqueray', re.compile(r'\bTanqueray\b', re.IGNORECASE)),
('zRum Mt Gay', re.compile(r'\bMount\s+Gay\b|\bMt\s+Gay', re.IGNORECASE)),
('zRum Ron Zacapa', re.compile(r'\bRon\s+Zacapa\b', re.IGNORECASE)),
('zRye Hayden', re.compile(r'\bBasil\s+Hayden\b', re.IGNORECASE)),
('zSambuca', re.compile(r'\bSambuca\b', re.IGNORECASE)),
('zScotch Glenmorangie', re.compile(r'\bGlenmorangie\b', re.IGNORECASE)),
('zScotch Hibiki Harmony', re.compile(r'\bHibiki\s.*Harmony\b', re.IGNORECASE)),
('zScotch Hibiki', re.compile(r'\bHibiki\b(?!\s*Har)', re.IGNORECASE)),
('zScotch Macallan', re.compile(r'\bMacallan\b', re.IGNORECASE)),
('zTeq Campo Azul', re.compile(r'\bCampo\s+Azul\b', re.IGNORECASE)),
('zTeq Casamigos', re.compile(r'\bCasamigos\b', re.IGNORECASE)),
('zTeq Casino Azul', re.compile(r'\bCasino\s+Azul\b', re.IGNORECASE)),
('zTeq Clase Azul', re.compile(r'\bClase\s+Azul\b', re.IGNORECASE)),
('zTeq Cuervo', re.compile(r'\bJose\s+Cuervo\b|^Cuervo\b', re.IGNORECASE)),
('zTeq Don Julio', re.compile(r'\bDon\s+Julio\b', re.IGNORECASE)),
('zTeq Dos Artes', re.compile(r'\bDos\s+Artes\b|^Cuervo\b', re.IGNORECASE)),
('zTeq Gran Cava', re.compile(r'\bGran\s+Cava\b', re.IGNORECASE)),
('zTeq Herradura', re.compile(r'\bHerradura\b', re.IGNORECASE)),
('zTeq Loma Azul', re.compile(r'\bLoma\s+Azul\b', re.IGNORECASE)),
('zTeq Padre Azul', re.compile(r'\bPadre\s+Azul\b', re.IGNORECASE)),
('zTeq Partida', re.compile(r'\bPartida\b', re.IGNORECASE)),
('zTeq Patron', re.compile(r'\bPatron\b', re.IGNORECASE)),
('zTripleSec Gr Marnier', re.compile(r'\bGrand\s+Marnier\b', re.IGNORECASE)),
('zTripleSec Dekuyper', re.compile(r'\bDekuyper\b', re.IGNORECASE)),
('zTripleSec Hiram', re.compile(r'\bHiram\b', re.IGNORECASE)),
('zVodka Absolut', re.compile(r'\bAbsolut\b', re.IGNORECASE)),
('zVodka Skyy', re.compile(r'\bSkyy\b', re.IGNORECASE)),
('zVodka Tito', re.compile(r'\bTito', re.IGNORECASE)),
('zWhiskey Balvenie', re.compile(r'\bBalvenie\b', re.IGNORECASE)),
('zWhiskey J Walker', re.compile(r'\bJohn+ie\s+Walker\b', re.IGNORECASE)),
# ('', re.compile(r'\b\b', re.IGNORECASE)),
)
# regex extract the grape from the wine fld
grapeLookup = (
('Cab Franc', re.compile(r'\bCabernet\s+Franc|\bCab\s+Franc', re.IGNORECASE)), # before cab
('Cab', re.compile(r'\bCabernet\b|\sCS\s|\sCS$|\bCab\b', re.IGNORECASE)),
('Claret', re.compile(r'\bClaret\b', re.IGNORECASE)),
('Rose Pinot', re.compile(r'\bRose\b.*\bPinot\b|\bPinot\b.*\bRose\b', re.IGNORECASE)),
('Pinot', re.compile(r'\bPinot\b|\bPN\b|\bP\s+Noir\b', re.IGNORECASE)),
('Merlot', re.compile(r'\bMerlot\b|\bME\b', re.IGNORECASE)),
('Sauv Blanc', re.compile(r'\bSauvignon\s+Blanc\b|\bSB\b', re.IGNORECASE)),
('Sauv Blanc', re.compile(r'\bSauvignon\/Fume\s+Blanc\b', re.IGNORECASE)),
('Meritage', re.compile(r'\bMeritage\b', re.IGNORECASE)),
('Fume', re.compile(r'\bFume\b|\bFumé', re.IGNORECASE)),
('Champagne', re.compile(r'\bChampagne\b', re.IGNORECASE)),
('Chard', re.compile(r'\bChar+d|\bCH\b', re.IGNORECASE)),
('Shiraz', re.compile(r'\bShiraz\b', re.IGNORECASE)),
('Syrah', re.compile(r'\bSyrah\b|\bSY\b',re.IGNORECASE)),
('Zin', re.compile(r'\bZinfandel\b|\bZIN\b|\bZN\b', re.IGNORECASE)),
('Rose', re.compile(r'\bRose\b|\bRosé', re.IGNORECASE)),
('Sangiovese', re.compile(r'\Sangiovese\b', re.IGNORECASE)),
# ('Brandy', re.compile(r'\bBrandy\b', re.IGNORECASE)),
('Gewurzt', re.compile(r'\bGew.rztraminer\b|\bGewürzt', re.IGNORECASE)),
('Malbec', re.compile(r'\bMalbec\b', re.IGNORECASE)),
('Viognier', re.compile(r'\bViognier\b', re.IGNORECASE)),
('Roussanne', re.compile(r'\bRoussanne\b', re.IGNORECASE)),
('Charbono', re.compile(r'\bCharbono\b', re.IGNORECASE)),
('PSirah', re.compile(r'\bPetite Sirah\b', re.IGNORECASE)),
('Cuvee', re.compile(r'\bCuvee\b', re.IGNORECASE)),
('Red', re.compile(r'\bRed\b|\bBordeaux\s+Blend\b', re.IGNORECASE)),
('Syrah-Cab', re.compile(r'\bSyrcab\b|\bsyrah[-\s\/]+cab', re.IGNORECASE)),
('Grenache', re.compile(r'\bGrenache\b', re.IGNORECASE)),
('Tempranillo', re.compile(r'\bTempranillo\b', re.IGNORECASE)),
)
# wineries that we don't want to look up the grape on
ignoreGrapeLookup = {
'Cristal' : ['Rose', None],
'Domaine Carneros' : ['Brut', None],
'Dominus' : [None],
'Papillon' : None,
'Paraduxx' : None,
'Veuve' : None,
'zCointreau' : None,
'zGin Hendrick' : None,
'zGin Tanqueray' : ['Ten', None],
'zTripleSec Gr Marnier' : ['1880', '100th', 'Cent', 'Quin', None],
'zTripleSec Dekuyper' : None,
'zTripleSec Hiram' : None,
'zVodka Skyy' : ['Citrus', None],
'zVodka Tito' : None,
# 'Prisoner' : ['Cuttings', 'Red', 'Derange', 'Saldo', 'Blindfold', None],
}
# winery to wine lookup when no grape is found in the wine name
#
# extract the wine name from a winery - when a field does not have a grape lookup for the row
# the name looked up and found will be the name used
noGrapeLookup = {
'Ehlers' : ['120-80'], # matches an abbreviations - and matches fldWineDescr
'Alban' : ['Pandora'],
'BV' : ['Tapestry', 'Latour'],
'Bennett Ln' : ['Maximus'],
'Bremer' : ['Austintatious'],
'Cain Five' : None,
'Colgin' : ['Cariad', 'IX'],
'Concha Don Melchor' : None,
'Continuum' : None,
'Darioush' : ['Duel', 'Darius'],
'Duckhorn' : ['Discussion'],
'Far Niente' : ['Dolce'],
'Flora' : ['Trilogy'],
'Franciscan' : ['Magnificat'],
'Grgich' : ['Violetta'],
'Gundlach' : ['Vintage Reserve'],
'Justin' : ['Isosceles'],
'Krug' : ['Generations'],
'Mondavi' : ['Maestro'],
'Newton' : ['Puzzle'],
'Opus One' : None,
'Phelps' : ['Insignia'],
'Prisoner' : ['Cuttings', 'Derange', 'Saldo', 'Blindfold'],
'Ridge' : ['Monte Bello'],
'Robert Foley' : ['Griffin'],
'Sullivan' : ['Coeur de Vigne'],
'Zaca' : ['ZThree', 'ZCuvee'],
'zCognac Courvoisier' : ['Napolean', 'VS', 'VSOP', 'XO'],
'zCognac Hennessy' : ['Paradis', 'Richard', 'VS', 'VSOP', 'XO', 'Master'],
'zCognac Remy' : ['1738', 'Louis XIII', 'VSOP', 'XO', 'VS'],
'zRum Ron Zacapa' : ['23', 'Negra', 'XO'],
'zRye Hayden' : ['Dark', 'Caribbean'],
'zScotch Hibiki Harmony' : None,
# 'zScotch Hibiki' : ['Toki', '12', '17', '21', '30'],
'zTeq Campo Azul' : ['Extra Anejo', 'Anejo', 'Blanco', 'Reposado'],
'zTeq Casamigos' : ['Extra Anejo', 'Anejo', 'Blanco', 'Reposado'],
'zTeq Casino Azul' : ['Extra Anejo', 'Anejo', 'Blanco', 'Reposado', 'Silver'],
'zTeq Clase Azul' : ['Ultra', 'Extra Anejo', 'Anejo', 'Blanco', 'Reposado', 'Mezcal', 'Plata', 'Platino'],
'zTeq Dos Artes' : ['Extra Anejo'],
'zTeq Gran Cava' : ['Extra Anejo'],
'zTeq Loma Azul' : ['Extra Anejo', 'Anejo', 'Blanco', 'Reposado'],
# 'zTeq Padre Azul' : ['Extra Anejo', 'Anejo', 'Blanco', 'Reposado'],
'zTeq Partida' : ['Blanco', 'Elegante'],
'zVodka Absolut' : ['Citron', 'Mandarin', 'Mandrin', 'Mango', 'Ruby', 'Vanilia', 'Raspberri', 'Grapevine', None],
'zWhiskey J Walker' : ['Double Black', 'Black', 'Blue', 'Gold', 'Green', 'Platinum', 'Red','Swing', 'White', '18', '21'],
}
# regex to use to determine if this is a liquor not a wine
#
# winery -> [ liquor, regex ]
# if there is no grape, and no noGrapeLookup found, but the winery has a liquorLookup
# use the list of lookups to find the additional infomratoin to add to the winery
#
liquorLookup = {
'zRum Mt Gay' : [
('1703 Mst', re.compile(r'\b1703\b', re.IGNORECASE)),
('BB', re.compile(r'\bBlack Barrel\b', re.IGNORECASE)),
('Eclipse Silver', re.compile(r'\bEclipse\s+Silver\b', re.IGNORECASE)),
('Eclipse', re.compile(r'\bEclipse\b', re.IGNORECASE)),
('Old Peat', re.compile(r'\bOld Peat', re.IGNORECASE)),
('Old Pot', re.compile(r'\bPot\s+Still\b', re.IGNORECASE)),
('Old', re.compile(r'\bOld\b', re.IGNORECASE)),
('Silver', re.compile(r'\bSilver\b', re.IGNORECASE)),
('XO Peat', re.compile(r'\bXO\b', re.IGNORECASE)),
],
'zScotch Glenmorangie' : [
('10', re.compile(r'\b10(YR)?\b', re.IGNORECASE)),
('14 Port', re.compile(r'14.+\bQuinta\b|14.+\bPort\b|\bQuinta\b.+14|\bPort\b.+14', re.IGNORECASE)),
('12 Bacalta', re.compile(r'\bBacalta\b', re.IGNORECASE)),
('12 Burgundy', re.compile(r'\bBurgundy\b', re.IGNORECASE)),
('12 Nectar', re.compile(r'\bNectar\b', re.IGNORECASE)),
('12 Port', re.compile(r'\bQuinta\b|\bPort\b', re.IGNORECASE)),
('12 Sherry', re.compile(r'\bLa\s?Santa\b|\bSherry\b', re.IGNORECASE)),
('12 Signet', re.compile(r'\bSignet\b', re.IGNORECASE)),
('15 Cadboll', re.compile(r'\bCadboll', re.IGNORECASE)),
('15', re.compile(r'\b15(YR)?\b', re.IGNORECASE)),
('18', re.compile(r'\b18(YR)?\b|\b18YEAR\b', re.IGNORECASE)),
('25 Astar', re.compile(r'\bAstar\b', re.IGNORECASE)),
('25', re.compile(r'\b25(YR)?\b', re.IGNORECASE)),
('Companta', re.compile(r'\bCompanta\b', re.IGNORECASE)),
('Finealta', re.compile(r'\bFinealta\b', re.IGNORECASE)),
('Milsean', re.compile(r'\bMilsean\b', re.IGNORECASE)),
('Sonnalta', re.compile(r'\bSonnalta\b', re.IGNORECASE)),
],
'zScotch Macallan' : [
('10 Fine', re.compile(r'\bFine.*\b10\b|\b10.*Fine')),
('10', re.compile(r'\b10\b')),
('12 Double Gold', re.compile(r'\bDbl\b.*Gold|\bDouble\b.*Gold', re.IGNORECASE)),
('12 Double', re.compile(r'\bDouble\s.*12(YR)?\b', re.IGNORECASE)),
('12 Double', re.compile(r'\b12\s.*Double\b', re.IGNORECASE)),
('12 Double', re.compile(r'\bDbl\b|\bDouble\b', re.IGNORECASE)),
('12 Edition 1', re.compile(r'\bEdition\s.*1\b', re.IGNORECASE)),
('12 Edition 2', re.compile(r'\bEdition\s.*2\b', re.IGNORECASE)),
('12 Edition 3', re.compile(r'\bEdition\s.*3\b', re.IGNORECASE)),
('12 Edition 4', re.compile(r'\bEdition\s.*4\b', re.IGNORECASE)),
('12 Sherry', re.compile(r'\b12\s.*Sherry\b|\bSherry\b\s.*\b12', re.IGNORECASE)),
('12 Triple', re.compile(r'\b12(YR)?\s.*Triple\b', re.IGNORECASE)),
('12 Triple', re.compile(r'\bTriple\s.*12\b', re.IGNORECASE)),
('12', re.compile(r'\b12(YR)?\b', re.IGNORECASE)),
('15 Triple', re.compile(r'\b15(YR)?\s.*Triple\b|Triple.+\b15(YR)?\b', re.IGNORECASE)),
('15 Fine', re.compile(r'\b15(YR)?\b.*\bFine\b', re.IGNORECASE)),
('15', re.compile(r'\b15(YR)?\b', re.IGNORECASE)),
('17 Sherry', re.compile(r'\b17(YR)?\s.*Sherry\b', re.IGNORECASE)),
('17 Fine', re.compile(r'\b17(YR)?\b.*\bFine\b', re.IGNORECASE)),
('17', re.compile(r'\b17(YR)?\b', re.IGNORECASE)),
('18 Sherry', re.compile(r'\b18(YR)?\s.*Sherry\b|Sherry\b.*18', re.IGNORECASE)),
('18 Triple', re.compile(r'\b18(YR)?\s.*Triple\b|Triple.+\b18(YR)?\b', re.IGNORECASE)),
('18 Fine', re.compile(r'\b18(YR)?\b.*\bFine\b', re.IGNORECASE)),
('18 Gran', re.compile(r'Gran\b.*\b18', re.IGNORECASE)),
('18', re.compile(r'\b18(YR)?\b', re.IGNORECASE)),
('21 Fine', re.compile(r'\b21.*Fine\b', re.IGNORECASE)),
('21', re.compile(r'\b21(YR)?\b', re.IGNORECASE)),
('25 Sherry', re.compile(r'\b25\s.*Sherry\b', re.IGNORECASE)),
('25', re.compile(r'\b25(YR)?\b')),
('30 Sherry', re.compile(r'\b30\s.*Sherry', re.IGNORECASE)),
('30 Triple', re.compile(r'\b30(YR)?\s.*Triple\b|Triple.+\b30(YR)?\b', re.IGNORECASE)),
('30 Fine', re.compile(r'\b30(YR)?\b.*\bFine\b|Fine.*30', re.IGNORECASE)),
('30', re.compile(r'\b30(YR)?\b')),
('Rare', re.compile(r'\bRare\b', re.IGNORECASE)),
],
'zTeq Cuervo' : [
('Especial Gold', re.compile(r'\bEspecial\b.*Gold\b|Gold.*Especial', re.IGNORECASE)),
('Especial Blue', re.compile(r'\bEspecial\b.*Blue\b', re.IGNORECASE)),
('Especial', re.compile(r'\bEspecial\b', re.IGNORECASE)),
('Familia Platino', re.compile(r'\bPlatino\b', re.IGNORECASE)),
('Familia Anejo', re.compile(r'\bFamilia\b|\bReserva\b', re.IGNORECASE)),
('Gold', re.compile(r'\bGold\b', re.IGNORECASE)),
('Reposado Lagavulin', re.compile(r'\bReposado.*Lagavulin', re.IGNORECASE)),
('Tradicional Anejo', re.compile(r'Tradicional.*Anejo|Anejo.*Tradicional', re.IGNORECASE)),
('Tradicional Reposado', re.compile(r'Tradicional.*Reposado|Reposado.*Tradicional', re.IGNORECASE)),
('Tradicional Silver', re.compile(r'\bTradicional\b', re.IGNORECASE)),
('Tradicional Silver', re.compile(r'\bTraditional\b', re.IGNORECASE)),
('Reposado', re.compile(r'\bReposado\b', re.IGNORECASE)),
('Silver', re.compile(r'\bSilver\b', re.IGNORECASE)),
],
'zTeq Don Julio' : [
('1942', re.compile(r'\b1942\b', re.IGNORECASE)),
('Real', re.compile(r'\bReal\b', re.IGNORECASE)),
('Anejo Claro 70th', re.compile(r'\b70th\b', re.IGNORECASE)),
('Anejo Claro', re.compile(r'\bAnejo\b\s*Claro\b', re.IGNORECASE)),
('Anejo', re.compile(r'\bAnejo\b', re.IGNORECASE)),
('Blanco', re.compile(r'\bBlanco\b', re.IGNORECASE)),
('Reposado Lagavulin', re.compile(r'\bRepo.+Lagvulin\b', re.IGNORECASE)),
('Reposado Dbl', re.compile(r'\bReposado.+Double\b', re.IGNORECASE)),
('Reposado Dbl', re.compile(r'\bReposado.+Dbl\b', re.IGNORECASE)),
('Reposado Dbl', re.compile(r'\bDouble.+Reposado\b', re.IGNORECASE)),
('Reposado Private', re.compile(r'\bReposado.+Private\b', re.IGNORECASE)),
('Reposado', re.compile(r'\bReposado\b', re.IGNORECASE)),
('Silver', re.compile(r'\bSilver\b', re.IGNORECASE)),
],
'zTeq Herradura' : [
('Ultra', re.compile(r'\bUltra\b', re.IGNORECASE)),
('Suprema', re.compile(r'\bSuprema\b', re.IGNORECASE)),
('Anejo', re.compile(r'\bAnejo\b', re.IGNORECASE)),
('Blanco', re.compile(r'\bBlanco\b', re.IGNORECASE)),
('Reposado Gold', re.compile(r'\bReposado\s+Gold\b|\bGold\s+Reposado\b', re.IGNORECASE)),
('Reposado Scotch', re.compile(r'\bReposado.+Scotch\b|\bScotch.+Reposado\b', re.IGNORECASE)),
('Reposado Port', re.compile(r'\bPort.+Reposado\b|\bReposado.+Port\b', re.IGNORECASE)),
('Reposado', re.compile(r'\bReposado\b', re.IGNORECASE)),
('Silver', re.compile(r'\bSilver\b', re.IGNORECASE)),
],
'zTeq Patron' : [
('Gran Piedra', re.compile(r'\bPiedra\b', re.IGNORECASE)),
('DELETE Roca DELETE', re.compile(r'\bRoca\b', re.IGNORECASE)),
('Anejo Extra Lalique', re.compile(r'\bLalique\b', re.IGNORECASE)),
('Anejo Extra 7yr', re.compile(r'\b7YR\b|\b7 anos\b|\b7 year\b', re.IGNORECASE)),
('Anejo Extra 5yr', re.compile(r'\b5YR\b|\b5 anos\b|\b5 year\b', re.IGNORECASE)),
('Anejo Extra 10yr', re.compile(r'\b10\b.+\bExtra\b|\bExtra\b.+10', re.IGNORECASE)),
('Anejo Extra', re.compile(r'\bExtra\s+Anejo\b', re.IGNORECASE)),
('Gran Anejo', re.compile(r'\bGran\s+Anejo\b', re.IGNORECASE)),
('Gran Anejo', re.compile(r'\bBurdeos\b', re.IGNORECASE)),
('Gran Smoky', re.compile(r'\bGran\s+.*Smoky\b', re.IGNORECASE)),
('Anejo', re.compile(r'\bAnejo\b', re.IGNORECASE)),
('Gran Platinum', re.compile(r'\bPlatinum\b', re.IGNORECASE)),
('Reposado', re.compile(r'\bReposado\b', re.IGNORECASE)),
('Silver LTD', re.compile(r'\bSilver.*Limited\b|\bLimited.*Silver\b', re.IGNORECASE)),
('Silver Estate', re.compile(r'\bEstate.*Silver\b|\bSilver.*Estate\b', re.IGNORECASE)),
('Silver', re.compile(r'\bSilver\b', re.IGNORECASE)),
('Blanco', re.compile(r'\bBlanco\b', re.IGNORECASE)),
# ('', re.compile(r'\b\b', re.IGNORECASE)),
],
'zTeq Padre Azul' : [
('Blanco', re.compile(r'\bsilver\b', re.IGNORECASE)),
],
'zWhiskey Balvenie' : [
('12 Double', re.compile(r'\bDouble.*12(YR)?\b', re.IGNORECASE)),
('12 Double', re.compile(r'\b12(YR)?\s.*Double', re.IGNORECASE)),
('12 First', re.compile(r'\b12(YR)?\s.*First', re.IGNORECASE)),
('12 USA', re.compile(r'\b12.*American|American.*12', re.IGNORECASE)),
('12 Toast', re.compile(r'\b12(YR)?\s.*Toast', re.IGNORECASE)),
('12', re.compile(r'\b12(YR)?\b', re.IGNORECASE)),
('14 Carib', re.compile(r'\b14(YR)?\s.*Carib', re.IGNORECASE)),
('14 Carib', re.compile(r'\b14(YR)?\s.*CB\s+Cask', re.IGNORECASE)),
('14 Carib', re.compile(r'\bCarr?ib', re.IGNORECASE)),
('14 Peat', re.compile(r'\b14(YR)?\s.*Peat', re.IGNORECASE)),
('15 Sherry', re.compile(r'\b15(YR)?\s.*Sherry\b', re.IGNORECASE)),
('15 Sherry', re.compile(r'\bSherry\s+.*15(YR)?\b', re.IGNORECASE)),
('15', re.compile(r'\b15(YR)?\b', re.IGNORECASE)),
('16 Triple', re.compile(r'\b16(YR)?\s.*Triple\b', re.IGNORECASE)),
('17 Sherry Double', re.compile(r'\b17(YR)?\s.*Sherry\s+Doub', re.IGNORECASE)),
('17 Sherry', re.compile(r'\b17(YR)?\s.*Sherry', re.IGNORECASE)),
('17 Double', re.compile(r'\b17(YR)?\s.*Double', re.IGNORECASE)),
('17 Double', re.compile(r'\bDouble.*17(YR)?\b', re.IGNORECASE)),
# 17 Double Sherry
# 17 Islay
# 17 New Oak
('17 Peat', re.compile(r'\b17(YR)?\s.*Peat', re.IGNORECASE)),
('17 Peat', re.compile(r'\bPeat.*17(YR)?\b', re.IGNORECASE)),
('17', re.compile(r'\b17(YR)?\b', re.IGNORECASE)),
('21 Port', re.compile(r'\b21.*Port', re.IGNORECASE)),
('21 Port', re.compile(r'\bPort.*21\b', re.IGNORECASE)),
('21', re.compile(r'21', re.IGNORECASE)),
('25', re.compile(r'\b25(YR)?\b', re.IGNORECASE)),
('30', re.compile(r'\b30(YR)?\b', re.IGNORECASE)),
('40', re.compile(r'\b40(YR)?\b', re.IGNORECASE)),
],
'zBourbon Woodford Res' : [
('Dbl', re.compile(r'\bDouble\b', re.IGNORECASE)),
('Derby', re.compile(r'\bDerby\b', re.IGNORECASE)),
('Rye Choc', re.compile(r'\bChocolate.*Rye\b', re.IGNORECASE)),
('Rye', re.compile(r'\bRye\b', re.IGNORECASE)),
('Brandy', re.compile(r'\bBrandy\b', re.IGNORECASE)),
('Batch', re.compile(r'\bBatch\b', re.IGNORECASE)),
('Barrel', re.compile(r'\bBarrel\b', re.IGNORECASE)),
('Master', re.compile(r'\bMasters?\b', re.IGNORECASE)),
('Malt', re.compile(r'\bMalt\b', re.IGNORECASE)),
('Maple', re.compile(r'\bMaple\b', re.IGNORECASE)),
('Wheat', re.compile(r'\bWheat\b', re.IGNORECASE)),
('', re.compile(r'\bWoodford\b', re.IGNORECASE)),
],
'zSambuca' : [
('Romana Black', re.compile(r'\bRomana.*\bBlack\b|\bBlack\s+Romana\b', re.IGNORECASE)),
('Romana', re.compile(r'\bRomana\b', re.IGNORECASE)),
('Di Amore', re.compile(r'\bdi Amore\b', re.IGNORECASE)),
],
'zScotch Hibiki' : [
('12', re.compile(r'\b12\s*YE?A?R\b', re.IGNORECASE)),
('17 Limited', re.compile(r'\b17\s*YE?A?R\b.+Limited', re.IGNORECASE)),
('17', re.compile(r'\b17\s*YE?A?R\b', re.IGNORECASE)),
('21 Limited', re.compile(r'\b21\s*YE?A?R\b.+Limited', re.IGNORECASE)),
('21', re.compile(r'\b21\s*YE?A?R\b', re.IGNORECASE)),
('30', re.compile(r'\b30\s*YE?A?R\b', re.IGNORECASE)),
]
}
# regex to expand out optional values in the optoinal values to find a match against wine fld
wineAbbrLookup = {
'120-80' : r'\bOne\s+Twenty\s+Over\s+Eighty\b',
'3Amigos' : r'\bThree\s+Amigos\b',
'3Palms' : r'\bThree\s+Palms\b',
'3Sister' : r'\bThree\s+Sisters?\b',
'4Barrell' : r'\b4[\-\s]Barrels?\b',
'Alex' : r'\bAlexander\b',
'And' : r'\bAnderson\b',
'Car' : r'\bCarneros\b',
'Carries' : r'\bCarrie',
'CC' : r'\bC\.?C\.?\s+Ranch\b',
'Clone4' : r'\bClone\s+4\b',
'Clone6' : r'\bClone\s+6\b',
'Crossbarn' : r'\bCross\s+Barn\b',
'Donna' : r'\bDonna',
'Est' : r'\bEstate\b',
'Estate' : r'\bEst\b',
'Gap' : r'\bGap|\s%27Gap',
'Gary' : r'\bGary',
'Julia' : r'\bJulia',
'Knights' : r'\bKnight',
'KistlerVnyd' : r'\bKistler (Vineyard|VYD|EST)\b',
'LP' : r'\bLes Pierres\b',
'Lyn' : r'\bLyndenhur?st\b',
'Mont' : r'\bMonterey\b',
'Mt' : r'\bMount\b|\bMt\.\b',
'Napa/Son' : r'\bNapa.*Son',
'Oak' : r'\bOakville\b',
'One-Pt-5' : r'\bOne\s+Point\s+Five\b',
'Pomm' : r'\bPommeraie\b',
'Priv' : r'\bPrivate\b',
'RR' : r'\bRussian\s+Rivers?\b|RRV',
'RRR' : r'\bRussian\s+Rivers?\b|RRV',
'Res' : r'\bReserve\b|\bRsv\b|\bResrv\b|\bReserv\b|\bReserve$',
'Rose' : r'\bRosé|\bROS&EACUTE;|\bRos%E9',
'Ruth' : r'\bRutherford\b',
'Sandy' : r'\bSandy',
'Samanthas' : r'\bSamantha',
'SC' : r'\bSanta\s+Cruz\b',
'SLD' : r'\bStag.*Leap\b',
'SLH' : r'\bSanta\s+Lucia\b',
'SMV' : r'\bSanta\s+Maria|\bS\s+Maria',
'SRH' : r'\bSTA\.?|\bSANTA\s+Rita\b|\bSTA\sRITA\sHILLS|\bS\s+RITA\b',
'SS' : r'\bSpecial\s+\Selection\b',
'Stage' : r'\bStagecoach\b',
'Son' : r'\bSonoma\b',
'SYV' : r'\bSanta\s+Ynez\s+Valley\b',
'TD9' : r'\bTD\s+9\b|\bTD-9\b',
'Terraces' : r'\bTerrace',
'TheCutrer' : r'\bThe Cutrer\b|nnay Cutrer\b',
'Tok' : r'\bTo[\s\-]?Kolan|\bTo[\s\-]?Kalon',
'Turn4' : r'\bTurn\s+4\b',
'Vernas' : r'\bVerna',
'Vine' : r'\bVines\b',
'Yount' : r'\bYountville\b',
'ZThree' : r'\bZ.*\bThree\b',
'ZCuvee' : r'\bZ.*\bCuvee\b|\bCuvee Z\b',
# misspellings
'Agustina' : r'\bAugustina\b',
'Durell' : r'\bDurrell\b',
'Benchland' : r'\bBenchlands\b',
'Pritchard' : r'\bPitchard\b',
}
# regex search - set the ships as
reShipsAs = re.compile(r'\(ships?\s', re.IGNORECASE)
# the order in which we pull multiple single match attributes
defaultorderlist=[['Tok'], ['Oak'], ['Res'], ['RR'], ['Landslide'], ['Yount'], ['RRR'], ['Son'], ['Ruth'], ['Napa'], ['Helena'], ['SRH'], ['SLH'], ['SMV'], ['SLD'], ['Paso'], ['Alex'], ['Single'], ['Estate']]
### FUNCTIONS ############################################
#########################################################################################
def globalVariableCheck( debug=False ):
# check for liquor definitions that are in noGrapeLookup
# these will never execute
for liquor in liquorLookup:
if liquor in noGrapeLookup:
print('WARNING:liquorLookup regexs will never execute - they are in noGrapeLookup:', liquor)
if liquor in ignoreGrapeLookup:
print('WARNING:liquorLookup regexs will never execute - they are in ignoreGrapeLookup:', liquor)
for winery in ignoreGrapeLookup:
if winery in noGrapeLookup:
print('WARNING:ignoreGrapeLookup regexs will never execute - they are in noGrapeLookup:', winery)
#########################################################################################
def setOptionDictMasterFldValues( optiondict, debug=False ):
# default these fields to the fld values if they are not set
# otherwise leave them alone
for fld in ('fldWine', 'fldWineDescr'):
if not optiondict[fld+'Master']:
optiondict[fld+'Master'] = optiondict[fld]
#########################################################################################
# having a list of names to look at and match on - see if this record has a match
# nameLookup - list of names could have 'None' as the last value, or just the value of None
# lookupStr - string to be searched
# other - array of strings that will have the matching name removed from
# msg - string defining who called this function
#
# returns: string - if a matching string is found
# None - did not find a match
# '' - valid match with "None"
#
def wineLookupByName( nameLookup, lookupStr, other, msg, wineAbbrLookup=None, debug=False ):
# string for debugging messages
funcname = 'wineLookupByName:' + msg + ':'
# debugging
if debug: print(funcname + 'nameLookup:', nameLookup)
# if the value for this winery is None - than there is no additiona work we are done
if nameLookup is None:
# no additional processing
# debugging
if debug: print(funcname + 'match: value is none - continue on')
# return empty string
return ''
# there are additional lookups for this winery - not using grape as part of the description
# check each of the things to look up
for name in nameLookup:
# debugging
if debug: print(funcname + 'match-name:', name)
# special processing of a lookup value of none
if name is None:
# Lookup on none - means just use what we found
# debugging
if debug: print(funcname + 'name-matched: value is none - continue on:pass back blank')
# stop iterating on nameLookup - by returning empty string
return ''
# we have not encountered 'None' - so build the regex based on the text provided
reName = re.compile( r'\b'+name+r'\b', re.IGNORECASE)
# check to see if we have a match with this regex
if reName.search(lookupStr):
# we have a match - so this is the additional attribute we are looking for
# debugging
if debug: print(funcname+'name-MATCHED:', name)
# remove from other if it is in there
for val in other:
if reName.search(val):
other.remove(val)
# debugging
if debug: print(funcname + 'name-remove-from-other:', val)
# stop iterating on nameLookup - return what we found
return name
# 2nd check see if have a translation and this name is translatable
if wineAbbrLookup and name in wineAbbrLookup:
# build the regex with the look up value
reName = re.compile(wineAbbrLookup[name], re.IGNORECASE)
# debugging
if debug: print(funcname + 'Abbr-match-name:', name)
# check to see if we have a match with this regext
if reName.search(lookupStr):
# we have a match - so this is the additional attribute we are looking for
# debugging
if debug: print(funcname+'Abbr-name-MATCHED:', wineAbbrLookup[name])
# remove from other if it is in there
for val in other:
if reName.search(val):
other.remove(val)
# debugging
if debug: print(funcname + 'name-remove-from-other:', val)
# stop iterating on nameLookup - return what we found
return name
# checked all the namelookupd - and did not find any matches
# debuging
if debug: print(funcname + 'name match not found:set to blank')
# return none meaning we did not find a match
return None
#########################################################################################
# find the qualifer like gift, etch, glass tied to this string
#
#
#
# returns: first qualifier or None
#
def findQualifier( wine, debug=False ):
for (val, reSearch) in reQualLookup:
if reSearch.search(wine):
if debug: print('findQualifier:matched-returning:', val)
return val
if debug: print('findQualifier:no-match-returning:', None)
return None
#########################################################################################
# find the winery tied to the rec
#
# Global Variable Used: wineryLookup (an array of regex that define the winery)
#
# returns: (winery, reWinery)
#
def findWinery( rec, lastWinery, lastReWinery, fldWine, debug=False ):
# if we had a prior winery - test for this match first
if lastWinery:
# debugging
if debug:
try:
print('fw:new winery:', rec[fldWine])
except Exception as e:
print('debug error8-continuing:', str(e))
print('rec[fldWine]:type:', type(rec[fldWine]))
# print('fw:new winery:', rec[fldWine].decode('windows-1252'))
print('fw:checking if this is lastWinery:', lastWinery)
# check to see if the winery is a match again for this record
if lastReWinery.search(rec[fldWine]):
# debugging
if debug: print('fw:this matches the last winery')
# match again - return values
return(lastWinery, lastReWinery)
else:
# not match - debugging
if debug: print('fw:not last winery')
# if we did not match lastWinery - lets look through the list
# go through the list of wineries (global variable),
# each row contains wineryName, wineryRegex
# pulling out the tuple from the lookup
for (winery, reWinery) in wineryLookup:
# debugging
if debug: print('fw:not lastWinery-checking winery:', winery)
if fldWine not in rec:
print('not a column in this record fldWine:', fldWine)
print('rec:', rec)
# check to see if this winery is a match
if reWinery.search(rec[fldWine]):
# debugging
if debug: print('fw:winery match found:', winery)
# this is a match - set the variables
return (winery, reWinery)
# for loop ends without a match
# did not find a matching winery in the for loop - clear values
return (None, None)
#########################################################################################
# find the liquor tied to the rec, leveraging the winery
# Global Variable Used: liquorLookup
#
# returns: (liquor, reLiquor)
#
def findLiquor( rec, winery, fldWine, debug=False ):
# go through the list of liquors (global variable), pulling out the tuple from the lookup
for (liquor, reLiquor) in liquorLookup[winery]:
# debugging
if debug: print('fl:checking liquor:', liquor)
# check to see if this liquor is a match
if reLiquor.search(rec[fldWine]):
# debugging
if debug: print('fl:liquor match found:', liquor)
# this is a match - set the variables
return (liquor, reLiquor)
# for loop ends without a match
# did not find a matching liquor in the for loop - clear values
return (None, None)
#########################################################################################
# find the grape tied to the rec by regex evaluation
#
# Global Variable Used: grapeLookup
#
# returns: (grape, reGrape)
#
def findGrapeByRegex( rec, fldWine, debug=False ):
# go through the list of liquors (global variable), pulling out the tuple from the lookup
for (grape, reGrape) in grapeLookup:
# debugging
if debug: print('fgbr:grape:', grape)
# check to see if this liquor is a match
if grape is not None and reGrape.search(rec[fldWine]):
# debugging
if debug: print('fgbr:grape match found:', grape)
# this is a match - set the variables
return (grape, reGrape)
# for loop ends without a match
# did not find a matching grape in the for loop - clear values
return (None, None)
#########################################################################################
# find a string in a field of a record using string match and
# on match, return that it matched and the remainder of the string as an array
#
# returns: (findStr, other)
#
def findStrInRecReturnOther( rec, fldWineDescr, findStr, debug=False ):
# find where in the string this findStr is positioned
matchLoc = rec[fldWineDescr].find(findStr)
# if we found a location
if matchLoc > -1:
# then strip everthing to the left of the findStr value and then split this to create other attributes
other = rec[fldWineDescr][matchLoc+len(findStr)+1:].split()
# debugging
if debug: print('fsirro:findStr matched:', findStr)
if debug: print('fsirro:findStr other:', other)
# return what we found
return (findStr, other)
#no match found - debugging
if debug: print('fsirro:findStr did not match using:', findStr)
# did not find a matching findStr - return that fact
return (None, [])
#########################################################################################
# find the grape tied to the rec and the list of other attributes
# to the right of the grape in that description
#
# Global Variable Used: grapeLookup
#
# returns: (grape, other)
#
def findGrapeByStr( rec, fldWineDescr, debug=False ):
# find the grape and strip everything right of that from the fldWineDescr field
for (grape,reGrape) in grapeLookup:
# debugging
if debug: print('fg:grape:', grape)
# find where in the string this grape is positioned
(grape, other) = findStrInRecReturnOther( rec, fldWineDescr, grape, debug=debug)
# if we have a match return that match
if grape:
return (grape, other)
# did not find a matching grape - return that fact
return (None, [])
#########################################################################################
# find the vintage tied to the rec
#
# Global Variable Used: vintageLookup
#
# returns: vintage
#
def findVintage( rec, fldWine, debug=False ):
# loop through the vintage lookup records
for reVintage in vintageLookup:
# search for match
m = reVintage.search(rec[fldWine])
# if there is a match
if m:
# extract the vlaue from the first regex group with a value
if m.group(1):
vintage = m.group(1)
if debug: print('fv:vintage-match:', reVintage,':group1')
elif m.group(2):
vintage = m.group(2)
if debug: print('fv:vintage-match:', reVintage,':group2')
elif m.group(3):
vintage = m.group(3)
if debug: print('fv:vintage-match:', reVintage,':group3')
else:
vintage = m.group(4)
if debug: print('fv:vintage-match:', reVintage,':group4')
# return what we vound
return vintage
# did not find it
return None
#########################################################################################
# Create the winery/grape-wine-liquour conversion table based on the
# array of records passed in
#
# this routine takes the already read in list of definitions and parses them up
# in order to create a winery-wine-attributes file - that will be used
# later to take new records from searching the internet and properly assign
# an aligned/consistent wine description to that wine string
#
# we expect the wines array to have attributes: fldWineDescr (winedescr), and fldWine (wine_name)
#
# returns: wgLookup - dictionary - which is built from parsing winedescr NOT wine_name
#
# wgLookup[winery][grape] = list of lists of attributes to perform lookups with
#
def buildWineryGrapeLookup( wines, fldWineDescr='winedescr', fldWine='wine', debug=False ):
# local variables
wgLookup = {}
lastWinery = None
lastReWinery = None
# step through the records read in
for rec in wines:
# debugging
if debug: print('bwgl:new rec:', rec[fldWineDescr])
# set the variable
if not fldWineDescr in rec:
print('creating-field:', fldWineDescr)
rec[fldWineDescr] = ''
# local loop variables
winery = grape = wine = liquor = None
other = []
### WINERY
(lastWinery, lastReWinery) = (winery, reWinery) = findWinery( rec, lastWinery, lastReWinery, fldWine, debug=debug )
# if we did not find the winery - skipt this record
if not winery:
# debugging
if debug: print('bwgl:did not find winery-skipping:', rec[fldWine])
# don't process this record - get the next record to process
continue
### IGNOREGRAPE and NOGRAPE and LIQUOR
# if this winery has a noGrapeLookup option - use that to split up the record
if winery in ignoreGrapeLookup:
### BLANK WINE
# don't get the grape for this winery
# set wine to blank
wine = ''
# debugging
if debug: print('bwgl:wine check ignoreGrapeLookup on winery:', winery)
elif winery in noGrapeLookup:
### NO GRAPE WINE -- fldWineDescr
# debugging
if debug: print('bwgl:wine check noGrapeLookup on winery:', winery)
# find which wine is a match from the noGrapeLookup
wine = wineLookupByName( noGrapeLookup[winery], rec[fldWineDescr], [], 'noGrapeLookup', debug=debug )
# not getting a match - we want to continue to have the wine as blank
if False and wine == '':
# debugging
if debug: print('bwgl:nograpelookup:no-match:set wine to None')
wine = None
elif winery in liquorLookup:
### LIQUOR ---- fldWine
# debugging
if debug: print('bwgl:liquor check on winery:', winery)
# see if a liquor matches
(liquor, reLiquor) = findLiquor( rec, winery, fldWine, debug=debug )
# if we found match - populate wine so we don't look for grape
if liquor is not None:
wine = liquor
# debugging
if debug: print('bwgl:liquor found and put in wine:', wine)
### GRAPE (if we have not filled in wine) --- fldWineDescr
if wine is None:
# debugging
if debug: print('bwgl:grape check because wine is None')
# determine if there is a grape in this string
# if ther
(grape,other) = findGrapeByStr( rec, fldWineDescr )
# debugging
if debug: print('bwgl:grape:', grape, ':other:', other)
else:
# debugging
if debug: print('bwgl:grape check skipped - we have a wine')
### Skip this record if we don't have a wine or a grape
if wine is None and grape is None:
# debugging
if debug: print('bwgl:record skipped - no grape or wine defined')
continue
### OTHER (if not already created by grape lookup) ---- fldWineDescr
#
# if we did not find the grape in the string
# so other was not populated
# we need to look up other using 'winery' as the filter
if grape is None:
# debugging
if debug: print('bwgl:build other from winery')
# find where in the string this grape is positioned
(wineryFind, other) = findStrInRecReturnOther( rec, fldWineDescr, winery, debug=debug)
### OTHER Additional Processing
# remove CASE - the keyword case if it exists
if 'case' in other:
other.remove('case')
# debugging
if debug: print('bwgl:remove case from other')
# remove VINTAGE and/or BOTTLESIZE and/or other QUALIFIERS
# the last element will either be the vintage (no bottle size)
# or will be the bottle size and then next is the vintage
# if the last position is not vintage, attempt to remove the bottle size
# then remove vintage - this should be the vintage (validated by isdigit lookup)
if other:
if debug: print('bwgl:looking at other for quals, bottlesize and vintage')
# remove qualifiers if exist
if not other[-1].isdigit():
# first we check to see if there is a qualifier appended
# we are not vintage as the position posiition - see if it is size
for qual,reQual in reQualLookup:
if qual == other[-1]:
if debug: print('bwgl:remove qualifier from other:', qual)
del other[-1]
break
# remove bottle size if exist
if other and not other[-1].isdigit():
# we are not vintage as the position posiition - see if it is size
for size,reSize in sizeLookup:
if size == other[-1]:
if debug: print('bwgl:remove bottlesize from other:', size)
del other[-1]
break
# remove vintage if it is there
if other and other[-1].isdigit():
# first check to see if this is part of the ignore grape solution
if winery in ignoreGrapeLookup and ignoreGrapeLookup[winery]and other[-1] in ignoreGrapeLookup[winery]:
if debug: print('bwgl:value is in ignoreLookupGrape - keeping it:', other[-1])
else:
# debugging
if debug: print('bwgl:remove vintage from other:', other[-1])
del other[-1]
# remove WINE - the element if the element is the same as the wine
if wine and wine in other:
other.remove(wine)
# debugging
if debug: print('bwgl:remove wine from other:', wine)
# debugging
if debug:
try:
print('bwgl:Final-Build:', winery, ':', grape, ':', wine, ':', liquor, ':', other, ':', rec[fldWineDescr], ':', rec[fldWine])
except Exception as e:
print('debug error2-continuing:', str(e))
print('fldWine:', fldWine)
### BUILD LOOKUP FOR CONVERSION (we use the grape attribute to build the dictionary)
# move liquor value into grape because we did not find the
if grape is None and wine is not None:
grape = wine
# debugging
if debug: print('bwgl:set-grape-to-wine:', grape)
### WINERY:GRAPE-WINE-LIQOUR Dictionary creation
# debugging
if debug: print('bwgl:create wgLookup for winery:', winery, ':grape:', grape)
# validate we have an entry for this winery in the lookup dict
if winery not in wgLookup:
# one does not create - so create a stub for winery:grape
wgLookup[winery] = { grape : [] }
else:
# one DOES exist - check to see if the grape is already here
if grape not in wgLookup[winery]:
# grape is not here - so create an empty list to stuff values into
wgLookup[winery][grape] = []
# check to see if we have OTHER attributes
# and if we do - check to see that this list of attributes
# is not already in the wineLookup array
# and if this list does not exist - then append this list
if other and other not in wgLookup[winery][grape]:
# add this list of other to this entry
wgLookup[winery][grape].append(other)
# debugging
if debug: print('bwgl:appending to wgLookup:other:', other)
# end loop on wines
### SORTED WINERY:GRAPE lookup - most optional attributes first in the list
# debbuging
if debug: print('bwgl:complete-read-of-master-file:sort wgLookup')
# now sort the list of lookups from most specific (greatest number of attributes) to least
for winery in wgLookup:
for grape in wgLookup[winery]:
wgLookup[winery][grape] = sorted(wgLookup[winery][grape], key=len, reverse=True)
# debugging
if debug:
print('\n'*5)
print('START WGLOOKUP DUMPED')
print('#'*80)
if ppFlag:
pp.pprint(wgLookup)
else:
print('bwgl:final-wgLookup:\n', wgLookup)
print('#'*80)
# done with for loop - return the lookup
return wgLookup
#########################################################################################
# find the matching set of additional attributes that match this record
# from the global lookup.
#
# we assume that we have already tested that winery and value exist in wgLookup prior to calling this routine
#
# the special paramaters here are:
# value - this is either "wine" or "grape" - this routine allows you to lookup on different attributes
# valueDescr - passed in string for debugging telling us which value was passed in
#
# defaultorderlist = array of array of string - gives the default order of singlematch looks to determine which of
# many matches is the one we will select
#
# Global Variable Used: wgLookup
#
# returns: valuematchset array selected
#
def findAddAttribWgLookup( rec, winery, value, fldWine, AbbrLookup=[], defaultorderlist=None, valueDescr='', debug=False ):
# local variable - capture all the entries that are single match entries
singlematch=[]
# debugging
if debug:
try:
print('faawl:value:', valueDescr, ':match-wgLookup:', rec[fldWine], ':', wgLookup[winery][value])
except Exception as e:
print('debug error7-continuing:', str(e))
print('fldWine:', fldWine)
# for each set of values that could be a match
for valuematchset in wgLookup[winery][value]:
# debugging
if debug: print('faawl:testing valuematchset:', valuematchset, ':length:', len(valuematchset))
# set the flag to start
allmatch = True
# loop through the set of values that make up this set
for valuematch in valuematchset:
# for each entry - build a regex and test it and add it up
# we need all values in this valueset to be true for this valueset to be match
reMatch1 = re.compile(r'\b'+valuematch+r'\b', re.IGNORECASE)
reMatch2 = re.compile(r'\s'+valuematch+r'\s', re.IGNORECASE)
# check to see if this regex is a match
m1 = reMatch1.search(rec[fldWine])
m2 = reMatch2.search(rec[fldWine])
if m1 or m2:
# this regex is a match
allmatch = True and allmatch
elif valuematch in AbbrLookup:
# this regex was not a match - but we want to check if the value also has
# a translation - and if it has a translation - then we test the translation also
# the value did not work but there is an alternate value to check
# debugging
if debug: print('faawl:valuematch-abbr:', valuematch, ':', wineAbbrLookup[valuematch])
# create the regex
reMatch = re.compile(wineAbbrLookup[valuematch], re.IGNORECASE)
# test the regex and attach the results to allmatch
allmatch = reMatch.search(rec[fldWine]) and allmatch
else:
# not a match - update allmatch
allmatch = False and allmatch
# debugging
if debug: print('faawl:valuematch:', valuematch, ':allmatch:', allmatch)
# check to see if all matched
if allmatch:
# all matched - so this is a match - so break out of the valuematchset group
# debugging
if debug: print('faawl:value matched:', valuematchset)
# different action based on # of items being match
if len(valuematchset) == 1:
# debugging
if debug: print('faawl:single-valuematch-set-added-to-singlematch:', valuematchset)
# single value matching - we don't stop when we find a match
singlematch.append(valuematchset)
else:
# debugging
if debug: print('faawl:multivalue-valuematch-set-found:done')
# multi value match so we are done when we find a match - so return
return valuematchset
# did not find matchset in the for loop - check to see if we have singlematch
if not singlematch:
# debugging
if debug: print('faawl:exit with singlematch NOT populated return blank')
# did not have singlematch found - we are done - return empty
return []
# singlematch populated
# debugging
if debug: print('faawl:exit with singlematch populated:', singlematch)
# check to see how many matches we got
if len(singlematch) == 1 or not defaultorderlist:
# debugging
if debug: print('faawl:return first entry in singlematch:', singlematch[0])
# if there is only one entry in here
# or we don't have a default order so we pick the first found
# and we set the value to this
return singlematch[0]
# we need to define which of the singlematch values we will return
# the defaultorderlist will be used to set that ordering
#
# create a local copy of the list that can be changed in this routine
defaultorder = defaultorderlist[:]
# multiple singlematch values so lets find and pick the best one
# debugging
if debug: print('faawl:multiple single match value-singlematch:', singlematch)
# get the values from singlematch that are not in defaultorder
# and put them at the start of defaultorder list
# go in reverse order when doing this lookup
for val in singlematch[::-1]:
if val not in defaultorder:
defaultorder.insert(0,val)
### HARDCODED ###
# very short term fix - we need to prioritze these single tags (mondavi problem)
if winery == 'Mondavi' and ['Tok'] in singlematch:
if debug: print('faawl:Change from:', valuematchset, ':to Tok for mondavi')
return ['Tok']
# find the first matching value from priority order list
for val in defaultorder:
if val in singlematch:
# debugging
if debug: print('faawl:selected-singlematch-value:', val)
# we found the first match - set it and break out
return val
# debugging
if debug: print('faawl:valuematchset-empty')
# did not match - return empty
return []
#########################################################################################
# create a consistent wine name for a list or records with store based wine descriptions
#
# the special paramaters here are:
# wgLookup - dictionary of winery, wine, list of wines
# wines - list of records to be processed
#
# Global Variable Used: ignoreGrapeLookup, noGrapeLookup, wineAbbrLookup, liquorLookup
# reCase, sizeLookup
#
# returns: [updated values in teh wines array]
#
#### Use the winery/grape-wine-liquour conversion table to define a wine description for the records
def setWineryDescrFromWineryGrapeLookup( wgLookup, wines, fldWineDescr = 'winedescr', fldWine = 'wine', fldWineDescrNew = 'winedescrnew', fldWineDescrMatch=False, debug=False ):
if debug:
print('\n'*10,'START WINEDESCR SETTING HERE ---------------------------------------------')
# step through all the records passed in
for rec in wines:
# local variables
winery = grape = wine = vintage = case = size = liquor = nongrape = qual = None
winematchset = grapematchset = []
# debugging
if debug:
try:
print('setWinery:fldWine:', rec[fldWine])
except Exception as e:
print('debug error2-continuing:', str(e))
print('fldWine:', fldWine)
# make the field if it does not exist
if fldWineDescrNew not in rec:
rec[fldWineDescrNew] = rec[fldWineDescr]
### WINERY
(winery, reWinery) = findWinery( rec, None, None, fldWine, debug=debug )
# validate the winery
if winery is None:
### WINERY NONE - go to next record
# debugging
if debug: print('setWinery:winery not found-next record:' + rec[fldWine])
# get the next record
continue
elif winery not in wgLookup:
### WINERY NOT IN LOOKUP
# skip this record - nothing to process
# debugging
if debug: print('setWinery:winery not in wgLookup:', winery)
continue
### GRAPE
# find the grape that is this record
(grape, reGrape) = findGrapeByRegex( rec, fldWine, debug=debug )
# debugging
if debug: print('setWinery:grape found:', grape)
### OVERRIDES
if winery in ignoreGrapeLookup:
### IGNORE GRAPE
# debugging
if debug: print('setWinery:winery-match-ignoreGrape:clear-wine:set-grape-to-None:set-nongrape-True:winery:', winery)
# clear wine and grape
wine = ''
# clear the grape field
grape = None
# set the liquor flag to control processing
nongrape = True
if winery in noGrapeLookup:
### NOGRAPE - WINE
# debugging
if debug: print('setWinery:noGrapeLookup wine check:', winery)
# do the lookup and if a search is a match on None take appropriate action
wine = wineLookupByName( noGrapeLookup[winery], rec[fldWine], [], 'noGrapeLookup', wineAbbrLookup, debug=debug )
# debugging
if debug: print('setWinery:nogrape check:wine:', wine)
# test the value we got back
if wine == '':
# debugging
if debug: print('setWinery:noGrapeLookup:matched:None::clear grape:set nongrape to True')
# the lookup match None - so we want to ignore any grape found and we blank out the wine
grape = None
wine = ''
nongrape = True
elif wine:
# matched a wine - so clear the grape value
grape = None
# debugging
if debug: print('setWinery:nograpeLookup:wine found - clear grape field')
if wine is None and winery in liquorLookup:
### LIQUOR
# debugging
if debug: print('setWinery:liqourLookup:', winery)
(liquor, reLiquor) = findLiquor( rec, winery, fldWine, debug=debug)
# if we found something update wine to be what we found
if liquor is not None:
wine = liquor
# debugging
if debug: print('setWinery:liquorLookup-match:', liquor)
if not grape and not nongrape and not wine and liquor is None:
# NO GRAPE - and not connected to noGrapeLookup or liquorLookkup
# get the next record
# debugging
if debug: print('setWinery:did not find grape-skipping record:', rec[fldWineDescr])
continue
# debugging
if debug: print('setWinery:pre-vintage found values for wine/liquor:', wine, ':grape:', grape)
### VINTAGE
vintage = findVintage( rec, fldWine, debug=debug )
# debugging
if debug: print('setWinery:vintage:', vintage)
### CASE information
if reCase.search(rec[fldWine]):
case = 'case'
### BOTTLE SIZE - get the size information
for (size, reSize) in sizeLookup:
# debugging
if debug: print('setWinery:sizeLookup:',size)
if reSize.search(rec[fldWine]) and not reShipsAs.search(rec[fldWine]):
# debugging
if debug: print('setWinery:sizeLookup:matched:',reSize)
break
else:
size = None
if debug: print('setWinery:sizeLookup:None-found')
### QUAL for this wine
qual = findQualifier(rec[fldWine], debug=debug)
# debugging
if debug:
try:
print('setWinery:FinalAttributes:', winery, ':', grape, ':', wine, ':', liquor, ':', vintage, ':', case, ':', size, ':', qual, ':', rec[fldWine])
except Exception as e:
print('debug error5-continuing:', str(e))
print('fldWine:', fldWine)
### WINE - ADDITIONAL INFORMATION
if liquor is not None:
# debugging
if debug: print('setWinery:liquor flag set - no additional data needs to be collected')
elif wine is not None:
# debugging
if debug: print('setWinery:wine is not None - do additional lookups:wine:', wine)
# we found a wine / liquor - so see if there are additional attributes
if wine in wgLookup[winery] and wgLookup[winery][wine]:
# debugging
if debug: print('setWinery:lookup winematchset')
# there is one or more additional lookups for this winery/wine
winematchset = findAddAttribWgLookup( rec, winery, wine, fldWine, wineAbbrLookup, None, valueDescr='wine', debug=debug )
else:
# wine not in wgLookup so thing to work
print('setWinery:unable to perform wgLookup on winery:', winery, ':wine:', wine, ':rec-wine:', rec[fldWine])
# debugging
if debug:
try:
print('wgLookup[winery]:', wgLookup[winery])
except Exception as e:
print('debug error3-continuing:', str(e))
print('winery:', winery)
# debugging - wine is not None - what is the final winematchset
if debug: print('setWinery:winematchset:', winematchset)
elif grape is not None:
# debugging
if debug: print('setWinery:grape is not None - do additional lookups:', grape)
# grape was returned (not wine) so do the lookup on grape
if grape in wgLookup[winery] and wgLookup[winery][grape]:
# see if we can create a match based on attributes and the grape
grapematchset = findAddAttribWgLookup( rec, winery, grape, fldWine, wineAbbrLookup, defaultorderlist, valueDescr='grape', debug=debug )
elif grape in wgLookup[winery]:
# do nothing this is a empty set
if debug: print('setWinery:grape match: matching record set is blank - no action required')
else:
# wine not in wgLookup so thing to work
# debugging
print('setWinery:grape NONMATCH:', rec[fldWine])
if debug: print('setWinery:liquor:', liquor, ':wine:', wine, ':grape:', grape, ':wgLookup[winery]:', wgLookup[winery])
# debugging - wine is not None - what is the final grapematchset
if debug: print('setWinery:grapematchset:', grapematchset)
### check the matchsets we got back - if any of them look like vintage values
### remove them from the string and look at up vintage again
if vintage:
newVintageLookupWine = rec[fldWine]
for matchvalue in winematchset:
if vintage in matchvalue:
newVintageLookupWine = newVintageLookupWine.replace(matchvalue,'')
if debug: print('setWinery:2nd-vintage:winematchset:wine-name-removal:', matchvalue)
for matchvalue in grapematchset:
if vintage in matchvalue:
newVintageLookupWine = newVintageLookupWine.replace(matchvalue,'')
if debug: print('setWinery:2nd-vintage:grapematchset:wine-name-removal:', matchvalue)
if newVintageLookupWine != rec[fldWine]:
if debug: print('setWinery:2nd-vintage:newVintageLookupWine:', newVintageLookupWine)
newVintage = findVintage( { fldWine : newVintageLookupWine}, fldWine, debug=debug )
if debug: print('setWinery:2nd-vintage:newVintage:', newVintage)
vintage = newVintage
### FINAL WINEDESCR
# create initial value
wineDescr = ''
# if winery starts with a z then we don't have a vintage
if winery.startswith('z'):
vintage = None
# debugging
if debug: print('setWinery:winery starts with z: clear vintage')
# quick test - does the wine and the winematchset the same
if winematchset and ' '.join(winematchset) in wine:
#debugging
if debug: print('setWinery:clearing-winematchset:', winematchset,':is-in-wine:', wine)
winematchset = []
if grapematchset and ' '.join(grapematchset) in grape:
#TODO - work around for single letter matches
if not (len(grapematchset)==1 and len(grapematchset[0])==1):
#debugging
if debug: print('setWinery:clearing-grapematchset:',grapematchset,':is-in-grape:', grape)
grapematchset = []
if grapematchset and size and size in ' '.join(grapematchset):
size = ''
if winematchset and size and size in ' '.join(winematchset):
size = ''
if debug:
print('setWinery:vallist1:', [winery, grape, wine] + grapematchset + winematchset + [vintage, size, qual, case])
print('setWinery:vallist2:', [winery, grape, wine, *grapematchset, *winematchset, vintage, size, qual, case])
# create a list
wdList= []
# step through the values
for val in [winery, grape, wine] + grapematchset + winematchset + [vintage, size, qual, case]:
# and if there is a value add to the list - otherwise skip
if val: wdList.append(val)
# build the wine description by joining all these values together
wineDescr = ' '.join(wdList)
# debugging
if False:
if debug: print('setWinery:wdList:', wdList)
if debug: print('setWinery:wineDescr:', wineDescr)
# debugging
if debug:
try:
print(':'.join(['setWinery:wineDescrList', wineDescr, rec[fldWineDescr], str(wineDescr==rec[fldWineDescr]), rec[fldWine]]) )
except Exception as e:
print('debug error6-continuing:', str(e))
print('fldWine:', fldWine)
# fill thew new value into the array
rec[fldWineDescrNew] = wineDescr
# fill in the matching field
if fldWineDescrMatch:
rec[fldWineDescrMatch] = (rec[fldWineDescr] == rec[fldWineDescrNew])
#########################################################################################
# set any digit only field to the word passed
def setDigitFld2Value( wines, fld, value, debug=False ):
for rec in wines:
if rec[fld].isdigit():
rec[fld] = value
#########################################################################################
# validate the field settings match the file we read in for update
def updateFileOptionDictCheck( optiondict, wines, header, debug=False ):
# check to see if the description field is in the file we read in
if optiondict['fldWineDescr'] not in wines[0]:
if debug: print('updateFileOptionDictCheck:fldWineDescr NOT in file read in:', optiondict['fldWineDescr'])
# field needed is not in the record - see if we know what to do
if 'cnt' in wines[0]:
# the cnt field is in the file - so set to that structure
# we will put the updated values into the 'cnt' field
print('setting values fldWineDescr and fldWineDescrNew to: cnt')
# change the field we are updating
optiondict['fldWineDescr'] = optiondict['fldWineDescrNew'] = 'cnt'
elif 'winedescr' in wines[0]:
# the WineDescr field is in the file - so set to that structure
print('setting values fldWineDescr to winedescr and fldWineDescrNew to winedescrnew')
# change the field we are updating
optiondict['fldWineDescr'] = 'winedescr'
optiondict['fldWineDescrNew'] = 'winedescrnew'
else:
# no idea - we need to error out
print('could not find fldWineDescr in wines[0]-aborting:', optiondict['fldWineDescr'], '\nwines[0]:', wines[0])
# force the error
error = wines[0][optiondict['fldWineDescr']]
# determine if we should create the match column (may want ot remove this section later)
# removed this logic - require the person to set this field - we will not set it for them.
if False and optiondict['fldWineDescr'] == 'winedescr':
# we are using the file format that is the xref file
# so check to see if we have match enabled
if not optiondict['fldWineDescrMatch']:
# create the default value
optiondict['fldWineDescrMatch'] = 'same'
# provide message
print('setting value fldWineDescrMatch to: same')
# check to see if the input file is the same as the output file
if optiondict['csvfile_update_in'] == optiondict['csvfile_update_out']:
# they are the same file (in and out) - so we need to move the input file to a backup location
(file_path, base_filename, file_ext) = kvutil.filename_split(optiondict['csvfile_update_in'])
# create the new filename
backupfile = kvutil.filename_proper( base_filename + optiondict['backupfile_ext'], file_path )
# messaging
print('copying ', optiondict['csvfile_update_in'], ' to ', backupfile)
# copy the input file to the backup filename
shutil.copyfile(optiondict['csvfile_update_in'], backupfile)
# set the output keys we are going to assign
if optiondict['fldWineDescrNew'] == 'cnt':
# output matches the original ref file format with the "cnt" field
optiondict['csvdictkeys'] = ['cnt','date','search','store','wine','winesrt']
elif optiondict['fldWineDescrMatch']:
# output is a modified xref format so you can look at old and new definitions
# optiondict['csvdictkeys'] = [optiondict['fldWineDescr'],optiondict['fldWineDescrNew'],optiondict['fldWineDescrMatch'], 'date','search','company','wine','winesrt']
optiondict['csvdictkeys'] = [optiondict['fldWineDescr'],optiondict['fldWineDescrNew'],optiondict['fldWineDescrMatch'], *header]
else:
# copy over the read in format
optiondict['csvdictkeys'] = [optiondict['fldWineDescrNew']] + header[1:]
# output matches expected input - should really change this to be the format of the read in file
#optiondict['csvdictkeys'] = [optiondict['fldWineDescrNew'], 'date','search','company','wine','winesrt']
print('updateFileOptionDictCheck:set csvdictkeys to:',optiondict['csvdictkeys'])
# ---------------------------------------------------------------------------
if __name__ == '__main__':
# capture the command line
optiondict = kvutil.kv_parse_command_line( optiondictconfig, debug=False )
# set the global debug flag
ppFlag = optiondict['pprint']
# set master fields
setOptionDictMasterFldValues( optiondict, debug=False )
### global variable checks ###
if optiondict['setup_check']:
print('Running global variable check')
globalVariableCheck( debug = optiondict['debug'] )
sys.exit()
# messaging
print('reading in master file:', optiondict['csvfile_master_in'])
# read in the MASTER FILE INPUT file
wines,header = kvcsv.readcsv2list_with_header(optiondict['csvfile_master_in'], headerlc=True)
# build the wine lookup dictionary
wgLookup = buildWineryGrapeLookup( wines, optiondict['fldWineDescrMaster'], optiondict['fldWineMaster'], debug=optiondict['debug'] )
# read in the UPDATE FILE INPUT file - if not updating the master file
if optiondict['csvfile_master_in'] != optiondict['csvfile_update_in']:
# messaging
print('reading in update file:', optiondict['csvfile_update_in'])
# read in the INPUT file
wines,header = kvcsv.readcsv2list_with_header(optiondict['csvfile_update_in'], headerlc=True)
# check to see if we read in any records and if not just return
if not wines:
print('wineset.py - no records read in - no work to be done - exitting')
sys.exit()
# test to see if we should set the fields based on what we just read in
updateFileOptionDictCheck( optiondict, wines, header, debug=optiondict['debug'] )
# do the assignment of wines to records
setWineryDescrFromWineryGrapeLookup( wgLookup, wines, optiondict['fldWineDescr'], optiondict['fldWine'], optiondict['fldWineDescrNew'], optiondict['fldWineDescrMatch'], debug=optiondict['debug'] )
# if enabled - set all unassigned new descriptions the default value
if optiondict['defaultnew'] is not None:
# message
print('Setting ', optiondict['fldWineDescrNew'], ' to ', optiondict['defaultnew'], 'if not set')
# do the work
setDigitFld2Value( wines, optiondict['fldWineDescrNew'], optiondict['defaultnew'], debug=optiondict['debug'] )
# save the output to the file of interest
kvcsv.writelist2csv( optiondict['csvfile_update_out'], wines, optiondict['csvdictkeys'] )
# messaging
print('Saved results to:', optiondict['csvfile_update_out'])
|
flexible
|
{
"blob_id": "d786e89b9d478dcff3c541c89731247075d078c3",
"index": 678,
"step-1": "<mask token>\n\n\ndef globalVariableCheck(debug=False):\n for liquor in liquorLookup:\n if liquor in noGrapeLookup:\n print(\n 'WARNING:liquorLookup regexs will never execute - they are in noGrapeLookup:'\n , liquor)\n if liquor in ignoreGrapeLookup:\n print(\n 'WARNING:liquorLookup regexs will never execute - they are in ignoreGrapeLookup:'\n , liquor)\n for winery in ignoreGrapeLookup:\n if winery in noGrapeLookup:\n print(\n 'WARNING:ignoreGrapeLookup regexs will never execute - they are in noGrapeLookup:'\n , winery)\n\n\ndef setOptionDictMasterFldValues(optiondict, debug=False):\n for fld in ('fldWine', 'fldWineDescr'):\n if not optiondict[fld + 'Master']:\n optiondict[fld + 'Master'] = optiondict[fld]\n\n\n<mask token>\n\n\ndef findWinery(rec, lastWinery, lastReWinery, fldWine, debug=False):\n if lastWinery:\n if debug:\n try:\n print('fw:new winery:', rec[fldWine])\n except Exception as e:\n print('debug error8-continuing:', str(e))\n print('rec[fldWine]:type:', type(rec[fldWine]))\n print('fw:checking if this is lastWinery:', lastWinery)\n if lastReWinery.search(rec[fldWine]):\n if debug:\n print('fw:this matches the last winery')\n return lastWinery, lastReWinery\n elif debug:\n print('fw:not last winery')\n for winery, reWinery in wineryLookup:\n if debug:\n print('fw:not lastWinery-checking winery:', winery)\n if fldWine not in rec:\n print('not a column in this record fldWine:', fldWine)\n print('rec:', rec)\n if reWinery.search(rec[fldWine]):\n if debug:\n print('fw:winery match found:', winery)\n return winery, reWinery\n return None, None\n\n\n<mask token>\n\n\ndef findStrInRecReturnOther(rec, fldWineDescr, findStr, debug=False):\n matchLoc = rec[fldWineDescr].find(findStr)\n if matchLoc > -1:\n other = rec[fldWineDescr][matchLoc + len(findStr) + 1:].split()\n if debug:\n print('fsirro:findStr matched:', findStr)\n if debug:\n print('fsirro:findStr other:', other)\n return findStr, other\n if debug:\n print('fsirro:findStr did not match using:', findStr)\n return None, []\n\n\n<mask token>\n\n\ndef findVintage(rec, fldWine, debug=False):\n for reVintage in vintageLookup:\n m = reVintage.search(rec[fldWine])\n if m:\n if m.group(1):\n vintage = m.group(1)\n if debug:\n print('fv:vintage-match:', reVintage, ':group1')\n elif m.group(2):\n vintage = m.group(2)\n if debug:\n print('fv:vintage-match:', reVintage, ':group2')\n elif m.group(3):\n vintage = m.group(3)\n if debug:\n print('fv:vintage-match:', reVintage, ':group3')\n else:\n vintage = m.group(4)\n if debug:\n print('fv:vintage-match:', reVintage, ':group4')\n return vintage\n return None\n\n\ndef buildWineryGrapeLookup(wines, fldWineDescr='winedescr', fldWine='wine',\n debug=False):\n wgLookup = {}\n lastWinery = None\n lastReWinery = None\n for rec in wines:\n if debug:\n print('bwgl:new rec:', rec[fldWineDescr])\n if not fldWineDescr in rec:\n print('creating-field:', fldWineDescr)\n rec[fldWineDescr] = ''\n winery = grape = wine = liquor = None\n other = []\n lastWinery, lastReWinery = winery, reWinery = findWinery(rec,\n lastWinery, lastReWinery, fldWine, debug=debug)\n if not winery:\n if debug:\n print('bwgl:did not find winery-skipping:', rec[fldWine])\n continue\n if winery in ignoreGrapeLookup:\n wine = ''\n if debug:\n print('bwgl:wine check ignoreGrapeLookup on winery:', winery)\n elif winery in noGrapeLookup:\n if debug:\n print('bwgl:wine check noGrapeLookup on winery:', winery)\n wine = wineLookupByName(noGrapeLookup[winery], rec[fldWineDescr\n ], [], 'noGrapeLookup', debug=debug)\n if False and wine == '':\n if debug:\n print('bwgl:nograpelookup:no-match:set wine to None')\n wine = None\n elif winery in liquorLookup:\n if debug:\n print('bwgl:liquor check on winery:', winery)\n liquor, reLiquor = findLiquor(rec, winery, fldWine, debug=debug)\n if liquor is not None:\n wine = liquor\n if debug:\n print('bwgl:liquor found and put in wine:', wine)\n if wine is None:\n if debug:\n print('bwgl:grape check because wine is None')\n grape, other = findGrapeByStr(rec, fldWineDescr)\n if debug:\n print('bwgl:grape:', grape, ':other:', other)\n elif debug:\n print('bwgl:grape check skipped - we have a wine')\n if wine is None and grape is None:\n if debug:\n print('bwgl:record skipped - no grape or wine defined')\n continue\n if grape is None:\n if debug:\n print('bwgl:build other from winery')\n wineryFind, other = findStrInRecReturnOther(rec, fldWineDescr,\n winery, debug=debug)\n if 'case' in other:\n other.remove('case')\n if debug:\n print('bwgl:remove case from other')\n if other:\n if debug:\n print('bwgl:looking at other for quals, bottlesize and vintage'\n )\n if not other[-1].isdigit():\n for qual, reQual in reQualLookup:\n if qual == other[-1]:\n if debug:\n print('bwgl:remove qualifier from other:', qual)\n del other[-1]\n break\n if other and not other[-1].isdigit():\n for size, reSize in sizeLookup:\n if size == other[-1]:\n if debug:\n print('bwgl:remove bottlesize from other:', size)\n del other[-1]\n break\n if other and other[-1].isdigit():\n if winery in ignoreGrapeLookup and ignoreGrapeLookup[winery\n ] and other[-1] in ignoreGrapeLookup[winery]:\n if debug:\n print(\n 'bwgl:value is in ignoreLookupGrape - keeping it:',\n other[-1])\n else:\n if debug:\n print('bwgl:remove vintage from other:', other[-1])\n del other[-1]\n if wine and wine in other:\n other.remove(wine)\n if debug:\n print('bwgl:remove wine from other:', wine)\n if debug:\n try:\n print('bwgl:Final-Build:', winery, ':', grape, ':', wine,\n ':', liquor, ':', other, ':', rec[fldWineDescr], ':',\n rec[fldWine])\n except Exception as e:\n print('debug error2-continuing:', str(e))\n print('fldWine:', fldWine)\n if grape is None and wine is not None:\n grape = wine\n if debug:\n print('bwgl:set-grape-to-wine:', grape)\n if debug:\n print('bwgl:create wgLookup for winery:', winery, ':grape:', grape)\n if winery not in wgLookup:\n wgLookup[winery] = {grape: []}\n elif grape not in wgLookup[winery]:\n wgLookup[winery][grape] = []\n if other and other not in wgLookup[winery][grape]:\n wgLookup[winery][grape].append(other)\n if debug:\n print('bwgl:appending to wgLookup:other:', other)\n if debug:\n print('bwgl:complete-read-of-master-file:sort wgLookup')\n for winery in wgLookup:\n for grape in wgLookup[winery]:\n wgLookup[winery][grape] = sorted(wgLookup[winery][grape], key=\n len, reverse=True)\n if debug:\n print('\\n' * 5)\n print('START WGLOOKUP DUMPED')\n print('#' * 80)\n if ppFlag:\n pp.pprint(wgLookup)\n else:\n print('bwgl:final-wgLookup:\\n', wgLookup)\n print('#' * 80)\n return wgLookup\n\n\ndef findAddAttribWgLookup(rec, winery, value, fldWine, AbbrLookup=[],\n defaultorderlist=None, valueDescr='', debug=False):\n singlematch = []\n if debug:\n try:\n print('faawl:value:', valueDescr, ':match-wgLookup:', rec[\n fldWine], ':', wgLookup[winery][value])\n except Exception as e:\n print('debug error7-continuing:', str(e))\n print('fldWine:', fldWine)\n for valuematchset in wgLookup[winery][value]:\n if debug:\n print('faawl:testing valuematchset:', valuematchset, ':length:',\n len(valuematchset))\n allmatch = True\n for valuematch in valuematchset:\n reMatch1 = re.compile('\\\\b' + valuematch + '\\\\b', re.IGNORECASE)\n reMatch2 = re.compile('\\\\s' + valuematch + '\\\\s', re.IGNORECASE)\n m1 = reMatch1.search(rec[fldWine])\n m2 = reMatch2.search(rec[fldWine])\n if m1 or m2:\n allmatch = True and allmatch\n elif valuematch in AbbrLookup:\n if debug:\n print('faawl:valuematch-abbr:', valuematch, ':',\n wineAbbrLookup[valuematch])\n reMatch = re.compile(wineAbbrLookup[valuematch], re.IGNORECASE)\n allmatch = reMatch.search(rec[fldWine]) and allmatch\n else:\n allmatch = False and allmatch\n if debug:\n print('faawl:valuematch:', valuematch, ':allmatch:', allmatch)\n if allmatch:\n if debug:\n print('faawl:value matched:', valuematchset)\n if len(valuematchset) == 1:\n if debug:\n print('faawl:single-valuematch-set-added-to-singlematch:',\n valuematchset)\n singlematch.append(valuematchset)\n else:\n if debug:\n print('faawl:multivalue-valuematch-set-found:done')\n return valuematchset\n if not singlematch:\n if debug:\n print('faawl:exit with singlematch NOT populated return blank')\n return []\n if debug:\n print('faawl:exit with singlematch populated:', singlematch)\n if len(singlematch) == 1 or not defaultorderlist:\n if debug:\n print('faawl:return first entry in singlematch:', singlematch[0])\n return singlematch[0]\n defaultorder = defaultorderlist[:]\n if debug:\n print('faawl:multiple single match value-singlematch:', singlematch)\n for val in singlematch[::-1]:\n if val not in defaultorder:\n defaultorder.insert(0, val)\n if winery == 'Mondavi' and ['Tok'] in singlematch:\n if debug:\n print('faawl:Change from:', valuematchset, ':to Tok for mondavi')\n return ['Tok']\n for val in defaultorder:\n if val in singlematch:\n if debug:\n print('faawl:selected-singlematch-value:', val)\n return val\n if debug:\n print('faawl:valuematchset-empty')\n return []\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef globalVariableCheck(debug=False):\n for liquor in liquorLookup:\n if liquor in noGrapeLookup:\n print(\n 'WARNING:liquorLookup regexs will never execute - they are in noGrapeLookup:'\n , liquor)\n if liquor in ignoreGrapeLookup:\n print(\n 'WARNING:liquorLookup regexs will never execute - they are in ignoreGrapeLookup:'\n , liquor)\n for winery in ignoreGrapeLookup:\n if winery in noGrapeLookup:\n print(\n 'WARNING:ignoreGrapeLookup regexs will never execute - they are in noGrapeLookup:'\n , winery)\n\n\ndef setOptionDictMasterFldValues(optiondict, debug=False):\n for fld in ('fldWine', 'fldWineDescr'):\n if not optiondict[fld + 'Master']:\n optiondict[fld + 'Master'] = optiondict[fld]\n\n\n<mask token>\n\n\ndef findQualifier(wine, debug=False):\n for val, reSearch in reQualLookup:\n if reSearch.search(wine):\n if debug:\n print('findQualifier:matched-returning:', val)\n return val\n if debug:\n print('findQualifier:no-match-returning:', None)\n return None\n\n\ndef findWinery(rec, lastWinery, lastReWinery, fldWine, debug=False):\n if lastWinery:\n if debug:\n try:\n print('fw:new winery:', rec[fldWine])\n except Exception as e:\n print('debug error8-continuing:', str(e))\n print('rec[fldWine]:type:', type(rec[fldWine]))\n print('fw:checking if this is lastWinery:', lastWinery)\n if lastReWinery.search(rec[fldWine]):\n if debug:\n print('fw:this matches the last winery')\n return lastWinery, lastReWinery\n elif debug:\n print('fw:not last winery')\n for winery, reWinery in wineryLookup:\n if debug:\n print('fw:not lastWinery-checking winery:', winery)\n if fldWine not in rec:\n print('not a column in this record fldWine:', fldWine)\n print('rec:', rec)\n if reWinery.search(rec[fldWine]):\n if debug:\n print('fw:winery match found:', winery)\n return winery, reWinery\n return None, None\n\n\ndef findLiquor(rec, winery, fldWine, debug=False):\n for liquor, reLiquor in liquorLookup[winery]:\n if debug:\n print('fl:checking liquor:', liquor)\n if reLiquor.search(rec[fldWine]):\n if debug:\n print('fl:liquor match found:', liquor)\n return liquor, reLiquor\n return None, None\n\n\ndef findGrapeByRegex(rec, fldWine, debug=False):\n for grape, reGrape in grapeLookup:\n if debug:\n print('fgbr:grape:', grape)\n if grape is not None and reGrape.search(rec[fldWine]):\n if debug:\n print('fgbr:grape match found:', grape)\n return grape, reGrape\n return None, None\n\n\ndef findStrInRecReturnOther(rec, fldWineDescr, findStr, debug=False):\n matchLoc = rec[fldWineDescr].find(findStr)\n if matchLoc > -1:\n other = rec[fldWineDescr][matchLoc + len(findStr) + 1:].split()\n if debug:\n print('fsirro:findStr matched:', findStr)\n if debug:\n print('fsirro:findStr other:', other)\n return findStr, other\n if debug:\n print('fsirro:findStr did not match using:', findStr)\n return None, []\n\n\ndef findGrapeByStr(rec, fldWineDescr, debug=False):\n for grape, reGrape in grapeLookup:\n if debug:\n print('fg:grape:', grape)\n grape, other = findStrInRecReturnOther(rec, fldWineDescr, grape,\n debug=debug)\n if grape:\n return grape, other\n return None, []\n\n\ndef findVintage(rec, fldWine, debug=False):\n for reVintage in vintageLookup:\n m = reVintage.search(rec[fldWine])\n if m:\n if m.group(1):\n vintage = m.group(1)\n if debug:\n print('fv:vintage-match:', reVintage, ':group1')\n elif m.group(2):\n vintage = m.group(2)\n if debug:\n print('fv:vintage-match:', reVintage, ':group2')\n elif m.group(3):\n vintage = m.group(3)\n if debug:\n print('fv:vintage-match:', reVintage, ':group3')\n else:\n vintage = m.group(4)\n if debug:\n print('fv:vintage-match:', reVintage, ':group4')\n return vintage\n return None\n\n\ndef buildWineryGrapeLookup(wines, fldWineDescr='winedescr', fldWine='wine',\n debug=False):\n wgLookup = {}\n lastWinery = None\n lastReWinery = None\n for rec in wines:\n if debug:\n print('bwgl:new rec:', rec[fldWineDescr])\n if not fldWineDescr in rec:\n print('creating-field:', fldWineDescr)\n rec[fldWineDescr] = ''\n winery = grape = wine = liquor = None\n other = []\n lastWinery, lastReWinery = winery, reWinery = findWinery(rec,\n lastWinery, lastReWinery, fldWine, debug=debug)\n if not winery:\n if debug:\n print('bwgl:did not find winery-skipping:', rec[fldWine])\n continue\n if winery in ignoreGrapeLookup:\n wine = ''\n if debug:\n print('bwgl:wine check ignoreGrapeLookup on winery:', winery)\n elif winery in noGrapeLookup:\n if debug:\n print('bwgl:wine check noGrapeLookup on winery:', winery)\n wine = wineLookupByName(noGrapeLookup[winery], rec[fldWineDescr\n ], [], 'noGrapeLookup', debug=debug)\n if False and wine == '':\n if debug:\n print('bwgl:nograpelookup:no-match:set wine to None')\n wine = None\n elif winery in liquorLookup:\n if debug:\n print('bwgl:liquor check on winery:', winery)\n liquor, reLiquor = findLiquor(rec, winery, fldWine, debug=debug)\n if liquor is not None:\n wine = liquor\n if debug:\n print('bwgl:liquor found and put in wine:', wine)\n if wine is None:\n if debug:\n print('bwgl:grape check because wine is None')\n grape, other = findGrapeByStr(rec, fldWineDescr)\n if debug:\n print('bwgl:grape:', grape, ':other:', other)\n elif debug:\n print('bwgl:grape check skipped - we have a wine')\n if wine is None and grape is None:\n if debug:\n print('bwgl:record skipped - no grape or wine defined')\n continue\n if grape is None:\n if debug:\n print('bwgl:build other from winery')\n wineryFind, other = findStrInRecReturnOther(rec, fldWineDescr,\n winery, debug=debug)\n if 'case' in other:\n other.remove('case')\n if debug:\n print('bwgl:remove case from other')\n if other:\n if debug:\n print('bwgl:looking at other for quals, bottlesize and vintage'\n )\n if not other[-1].isdigit():\n for qual, reQual in reQualLookup:\n if qual == other[-1]:\n if debug:\n print('bwgl:remove qualifier from other:', qual)\n del other[-1]\n break\n if other and not other[-1].isdigit():\n for size, reSize in sizeLookup:\n if size == other[-1]:\n if debug:\n print('bwgl:remove bottlesize from other:', size)\n del other[-1]\n break\n if other and other[-1].isdigit():\n if winery in ignoreGrapeLookup and ignoreGrapeLookup[winery\n ] and other[-1] in ignoreGrapeLookup[winery]:\n if debug:\n print(\n 'bwgl:value is in ignoreLookupGrape - keeping it:',\n other[-1])\n else:\n if debug:\n print('bwgl:remove vintage from other:', other[-1])\n del other[-1]\n if wine and wine in other:\n other.remove(wine)\n if debug:\n print('bwgl:remove wine from other:', wine)\n if debug:\n try:\n print('bwgl:Final-Build:', winery, ':', grape, ':', wine,\n ':', liquor, ':', other, ':', rec[fldWineDescr], ':',\n rec[fldWine])\n except Exception as e:\n print('debug error2-continuing:', str(e))\n print('fldWine:', fldWine)\n if grape is None and wine is not None:\n grape = wine\n if debug:\n print('bwgl:set-grape-to-wine:', grape)\n if debug:\n print('bwgl:create wgLookup for winery:', winery, ':grape:', grape)\n if winery not in wgLookup:\n wgLookup[winery] = {grape: []}\n elif grape not in wgLookup[winery]:\n wgLookup[winery][grape] = []\n if other and other not in wgLookup[winery][grape]:\n wgLookup[winery][grape].append(other)\n if debug:\n print('bwgl:appending to wgLookup:other:', other)\n if debug:\n print('bwgl:complete-read-of-master-file:sort wgLookup')\n for winery in wgLookup:\n for grape in wgLookup[winery]:\n wgLookup[winery][grape] = sorted(wgLookup[winery][grape], key=\n len, reverse=True)\n if debug:\n print('\\n' * 5)\n print('START WGLOOKUP DUMPED')\n print('#' * 80)\n if ppFlag:\n pp.pprint(wgLookup)\n else:\n print('bwgl:final-wgLookup:\\n', wgLookup)\n print('#' * 80)\n return wgLookup\n\n\ndef findAddAttribWgLookup(rec, winery, value, fldWine, AbbrLookup=[],\n defaultorderlist=None, valueDescr='', debug=False):\n singlematch = []\n if debug:\n try:\n print('faawl:value:', valueDescr, ':match-wgLookup:', rec[\n fldWine], ':', wgLookup[winery][value])\n except Exception as e:\n print('debug error7-continuing:', str(e))\n print('fldWine:', fldWine)\n for valuematchset in wgLookup[winery][value]:\n if debug:\n print('faawl:testing valuematchset:', valuematchset, ':length:',\n len(valuematchset))\n allmatch = True\n for valuematch in valuematchset:\n reMatch1 = re.compile('\\\\b' + valuematch + '\\\\b', re.IGNORECASE)\n reMatch2 = re.compile('\\\\s' + valuematch + '\\\\s', re.IGNORECASE)\n m1 = reMatch1.search(rec[fldWine])\n m2 = reMatch2.search(rec[fldWine])\n if m1 or m2:\n allmatch = True and allmatch\n elif valuematch in AbbrLookup:\n if debug:\n print('faawl:valuematch-abbr:', valuematch, ':',\n wineAbbrLookup[valuematch])\n reMatch = re.compile(wineAbbrLookup[valuematch], re.IGNORECASE)\n allmatch = reMatch.search(rec[fldWine]) and allmatch\n else:\n allmatch = False and allmatch\n if debug:\n print('faawl:valuematch:', valuematch, ':allmatch:', allmatch)\n if allmatch:\n if debug:\n print('faawl:value matched:', valuematchset)\n if len(valuematchset) == 1:\n if debug:\n print('faawl:single-valuematch-set-added-to-singlematch:',\n valuematchset)\n singlematch.append(valuematchset)\n else:\n if debug:\n print('faawl:multivalue-valuematch-set-found:done')\n return valuematchset\n if not singlematch:\n if debug:\n print('faawl:exit with singlematch NOT populated return blank')\n return []\n if debug:\n print('faawl:exit with singlematch populated:', singlematch)\n if len(singlematch) == 1 or not defaultorderlist:\n if debug:\n print('faawl:return first entry in singlematch:', singlematch[0])\n return singlematch[0]\n defaultorder = defaultorderlist[:]\n if debug:\n print('faawl:multiple single match value-singlematch:', singlematch)\n for val in singlematch[::-1]:\n if val not in defaultorder:\n defaultorder.insert(0, val)\n if winery == 'Mondavi' and ['Tok'] in singlematch:\n if debug:\n print('faawl:Change from:', valuematchset, ':to Tok for mondavi')\n return ['Tok']\n for val in defaultorder:\n if val in singlematch:\n if debug:\n print('faawl:selected-singlematch-value:', val)\n return val\n if debug:\n print('faawl:valuematchset-empty')\n return []\n\n\n<mask token>\n\n\ndef setDigitFld2Value(wines, fld, value, debug=False):\n for rec in wines:\n if rec[fld].isdigit():\n rec[fld] = value\n\n\ndef updateFileOptionDictCheck(optiondict, wines, header, debug=False):\n if optiondict['fldWineDescr'] not in wines[0]:\n if debug:\n print('updateFileOptionDictCheck:fldWineDescr NOT in file read in:'\n , optiondict['fldWineDescr'])\n if 'cnt' in wines[0]:\n print('setting values fldWineDescr and fldWineDescrNew to: cnt')\n optiondict['fldWineDescr'] = optiondict['fldWineDescrNew'] = 'cnt'\n elif 'winedescr' in wines[0]:\n print(\n 'setting values fldWineDescr to winedescr and fldWineDescrNew to winedescrnew'\n )\n optiondict['fldWineDescr'] = 'winedescr'\n optiondict['fldWineDescrNew'] = 'winedescrnew'\n else:\n print('could not find fldWineDescr in wines[0]-aborting:',\n optiondict['fldWineDescr'], '\\nwines[0]:', wines[0])\n error = wines[0][optiondict['fldWineDescr']]\n if False and optiondict['fldWineDescr'] == 'winedescr':\n if not optiondict['fldWineDescrMatch']:\n optiondict['fldWineDescrMatch'] = 'same'\n print('setting value fldWineDescrMatch to: same')\n if optiondict['csvfile_update_in'] == optiondict['csvfile_update_out']:\n file_path, base_filename, file_ext = kvutil.filename_split(optiondict\n ['csvfile_update_in'])\n backupfile = kvutil.filename_proper(base_filename + optiondict[\n 'backupfile_ext'], file_path)\n print('copying ', optiondict['csvfile_update_in'], ' to ', backupfile)\n shutil.copyfile(optiondict['csvfile_update_in'], backupfile)\n if optiondict['fldWineDescrNew'] == 'cnt':\n optiondict['csvdictkeys'] = ['cnt', 'date', 'search', 'store',\n 'wine', 'winesrt']\n elif optiondict['fldWineDescrMatch']:\n optiondict['csvdictkeys'] = [optiondict['fldWineDescr'], optiondict\n ['fldWineDescrNew'], optiondict['fldWineDescrMatch'], *header]\n else:\n optiondict['csvdictkeys'] = [optiondict['fldWineDescrNew']] + header[1:\n ]\n print('updateFileOptionDictCheck:set csvdictkeys to:', optiondict[\n 'csvdictkeys'])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef globalVariableCheck(debug=False):\n for liquor in liquorLookup:\n if liquor in noGrapeLookup:\n print(\n 'WARNING:liquorLookup regexs will never execute - they are in noGrapeLookup:'\n , liquor)\n if liquor in ignoreGrapeLookup:\n print(\n 'WARNING:liquorLookup regexs will never execute - they are in ignoreGrapeLookup:'\n , liquor)\n for winery in ignoreGrapeLookup:\n if winery in noGrapeLookup:\n print(\n 'WARNING:ignoreGrapeLookup regexs will never execute - they are in noGrapeLookup:'\n , winery)\n\n\ndef setOptionDictMasterFldValues(optiondict, debug=False):\n for fld in ('fldWine', 'fldWineDescr'):\n if not optiondict[fld + 'Master']:\n optiondict[fld + 'Master'] = optiondict[fld]\n\n\ndef wineLookupByName(nameLookup, lookupStr, other, msg, wineAbbrLookup=None,\n debug=False):\n funcname = 'wineLookupByName:' + msg + ':'\n if debug:\n print(funcname + 'nameLookup:', nameLookup)\n if nameLookup is None:\n if debug:\n print(funcname + 'match: value is none - continue on')\n return ''\n for name in nameLookup:\n if debug:\n print(funcname + 'match-name:', name)\n if name is None:\n if debug:\n print(funcname +\n 'name-matched: value is none - continue on:pass back blank'\n )\n return ''\n reName = re.compile('\\\\b' + name + '\\\\b', re.IGNORECASE)\n if reName.search(lookupStr):\n if debug:\n print(funcname + 'name-MATCHED:', name)\n for val in other:\n if reName.search(val):\n other.remove(val)\n if debug:\n print(funcname + 'name-remove-from-other:', val)\n return name\n if wineAbbrLookup and name in wineAbbrLookup:\n reName = re.compile(wineAbbrLookup[name], re.IGNORECASE)\n if debug:\n print(funcname + 'Abbr-match-name:', name)\n if reName.search(lookupStr):\n if debug:\n print(funcname + 'Abbr-name-MATCHED:', wineAbbrLookup[name]\n )\n for val in other:\n if reName.search(val):\n other.remove(val)\n if debug:\n print(funcname + 'name-remove-from-other:', val)\n return name\n if debug:\n print(funcname + 'name match not found:set to blank')\n return None\n\n\ndef findQualifier(wine, debug=False):\n for val, reSearch in reQualLookup:\n if reSearch.search(wine):\n if debug:\n print('findQualifier:matched-returning:', val)\n return val\n if debug:\n print('findQualifier:no-match-returning:', None)\n return None\n\n\ndef findWinery(rec, lastWinery, lastReWinery, fldWine, debug=False):\n if lastWinery:\n if debug:\n try:\n print('fw:new winery:', rec[fldWine])\n except Exception as e:\n print('debug error8-continuing:', str(e))\n print('rec[fldWine]:type:', type(rec[fldWine]))\n print('fw:checking if this is lastWinery:', lastWinery)\n if lastReWinery.search(rec[fldWine]):\n if debug:\n print('fw:this matches the last winery')\n return lastWinery, lastReWinery\n elif debug:\n print('fw:not last winery')\n for winery, reWinery in wineryLookup:\n if debug:\n print('fw:not lastWinery-checking winery:', winery)\n if fldWine not in rec:\n print('not a column in this record fldWine:', fldWine)\n print('rec:', rec)\n if reWinery.search(rec[fldWine]):\n if debug:\n print('fw:winery match found:', winery)\n return winery, reWinery\n return None, None\n\n\ndef findLiquor(rec, winery, fldWine, debug=False):\n for liquor, reLiquor in liquorLookup[winery]:\n if debug:\n print('fl:checking liquor:', liquor)\n if reLiquor.search(rec[fldWine]):\n if debug:\n print('fl:liquor match found:', liquor)\n return liquor, reLiquor\n return None, None\n\n\ndef findGrapeByRegex(rec, fldWine, debug=False):\n for grape, reGrape in grapeLookup:\n if debug:\n print('fgbr:grape:', grape)\n if grape is not None and reGrape.search(rec[fldWine]):\n if debug:\n print('fgbr:grape match found:', grape)\n return grape, reGrape\n return None, None\n\n\ndef findStrInRecReturnOther(rec, fldWineDescr, findStr, debug=False):\n matchLoc = rec[fldWineDescr].find(findStr)\n if matchLoc > -1:\n other = rec[fldWineDescr][matchLoc + len(findStr) + 1:].split()\n if debug:\n print('fsirro:findStr matched:', findStr)\n if debug:\n print('fsirro:findStr other:', other)\n return findStr, other\n if debug:\n print('fsirro:findStr did not match using:', findStr)\n return None, []\n\n\ndef findGrapeByStr(rec, fldWineDescr, debug=False):\n for grape, reGrape in grapeLookup:\n if debug:\n print('fg:grape:', grape)\n grape, other = findStrInRecReturnOther(rec, fldWineDescr, grape,\n debug=debug)\n if grape:\n return grape, other\n return None, []\n\n\ndef findVintage(rec, fldWine, debug=False):\n for reVintage in vintageLookup:\n m = reVintage.search(rec[fldWine])\n if m:\n if m.group(1):\n vintage = m.group(1)\n if debug:\n print('fv:vintage-match:', reVintage, ':group1')\n elif m.group(2):\n vintage = m.group(2)\n if debug:\n print('fv:vintage-match:', reVintage, ':group2')\n elif m.group(3):\n vintage = m.group(3)\n if debug:\n print('fv:vintage-match:', reVintage, ':group3')\n else:\n vintage = m.group(4)\n if debug:\n print('fv:vintage-match:', reVintage, ':group4')\n return vintage\n return None\n\n\ndef buildWineryGrapeLookup(wines, fldWineDescr='winedescr', fldWine='wine',\n debug=False):\n wgLookup = {}\n lastWinery = None\n lastReWinery = None\n for rec in wines:\n if debug:\n print('bwgl:new rec:', rec[fldWineDescr])\n if not fldWineDescr in rec:\n print('creating-field:', fldWineDescr)\n rec[fldWineDescr] = ''\n winery = grape = wine = liquor = None\n other = []\n lastWinery, lastReWinery = winery, reWinery = findWinery(rec,\n lastWinery, lastReWinery, fldWine, debug=debug)\n if not winery:\n if debug:\n print('bwgl:did not find winery-skipping:', rec[fldWine])\n continue\n if winery in ignoreGrapeLookup:\n wine = ''\n if debug:\n print('bwgl:wine check ignoreGrapeLookup on winery:', winery)\n elif winery in noGrapeLookup:\n if debug:\n print('bwgl:wine check noGrapeLookup on winery:', winery)\n wine = wineLookupByName(noGrapeLookup[winery], rec[fldWineDescr\n ], [], 'noGrapeLookup', debug=debug)\n if False and wine == '':\n if debug:\n print('bwgl:nograpelookup:no-match:set wine to None')\n wine = None\n elif winery in liquorLookup:\n if debug:\n print('bwgl:liquor check on winery:', winery)\n liquor, reLiquor = findLiquor(rec, winery, fldWine, debug=debug)\n if liquor is not None:\n wine = liquor\n if debug:\n print('bwgl:liquor found and put in wine:', wine)\n if wine is None:\n if debug:\n print('bwgl:grape check because wine is None')\n grape, other = findGrapeByStr(rec, fldWineDescr)\n if debug:\n print('bwgl:grape:', grape, ':other:', other)\n elif debug:\n print('bwgl:grape check skipped - we have a wine')\n if wine is None and grape is None:\n if debug:\n print('bwgl:record skipped - no grape or wine defined')\n continue\n if grape is None:\n if debug:\n print('bwgl:build other from winery')\n wineryFind, other = findStrInRecReturnOther(rec, fldWineDescr,\n winery, debug=debug)\n if 'case' in other:\n other.remove('case')\n if debug:\n print('bwgl:remove case from other')\n if other:\n if debug:\n print('bwgl:looking at other for quals, bottlesize and vintage'\n )\n if not other[-1].isdigit():\n for qual, reQual in reQualLookup:\n if qual == other[-1]:\n if debug:\n print('bwgl:remove qualifier from other:', qual)\n del other[-1]\n break\n if other and not other[-1].isdigit():\n for size, reSize in sizeLookup:\n if size == other[-1]:\n if debug:\n print('bwgl:remove bottlesize from other:', size)\n del other[-1]\n break\n if other and other[-1].isdigit():\n if winery in ignoreGrapeLookup and ignoreGrapeLookup[winery\n ] and other[-1] in ignoreGrapeLookup[winery]:\n if debug:\n print(\n 'bwgl:value is in ignoreLookupGrape - keeping it:',\n other[-1])\n else:\n if debug:\n print('bwgl:remove vintage from other:', other[-1])\n del other[-1]\n if wine and wine in other:\n other.remove(wine)\n if debug:\n print('bwgl:remove wine from other:', wine)\n if debug:\n try:\n print('bwgl:Final-Build:', winery, ':', grape, ':', wine,\n ':', liquor, ':', other, ':', rec[fldWineDescr], ':',\n rec[fldWine])\n except Exception as e:\n print('debug error2-continuing:', str(e))\n print('fldWine:', fldWine)\n if grape is None and wine is not None:\n grape = wine\n if debug:\n print('bwgl:set-grape-to-wine:', grape)\n if debug:\n print('bwgl:create wgLookup for winery:', winery, ':grape:', grape)\n if winery not in wgLookup:\n wgLookup[winery] = {grape: []}\n elif grape not in wgLookup[winery]:\n wgLookup[winery][grape] = []\n if other and other not in wgLookup[winery][grape]:\n wgLookup[winery][grape].append(other)\n if debug:\n print('bwgl:appending to wgLookup:other:', other)\n if debug:\n print('bwgl:complete-read-of-master-file:sort wgLookup')\n for winery in wgLookup:\n for grape in wgLookup[winery]:\n wgLookup[winery][grape] = sorted(wgLookup[winery][grape], key=\n len, reverse=True)\n if debug:\n print('\\n' * 5)\n print('START WGLOOKUP DUMPED')\n print('#' * 80)\n if ppFlag:\n pp.pprint(wgLookup)\n else:\n print('bwgl:final-wgLookup:\\n', wgLookup)\n print('#' * 80)\n return wgLookup\n\n\ndef findAddAttribWgLookup(rec, winery, value, fldWine, AbbrLookup=[],\n defaultorderlist=None, valueDescr='', debug=False):\n singlematch = []\n if debug:\n try:\n print('faawl:value:', valueDescr, ':match-wgLookup:', rec[\n fldWine], ':', wgLookup[winery][value])\n except Exception as e:\n print('debug error7-continuing:', str(e))\n print('fldWine:', fldWine)\n for valuematchset in wgLookup[winery][value]:\n if debug:\n print('faawl:testing valuematchset:', valuematchset, ':length:',\n len(valuematchset))\n allmatch = True\n for valuematch in valuematchset:\n reMatch1 = re.compile('\\\\b' + valuematch + '\\\\b', re.IGNORECASE)\n reMatch2 = re.compile('\\\\s' + valuematch + '\\\\s', re.IGNORECASE)\n m1 = reMatch1.search(rec[fldWine])\n m2 = reMatch2.search(rec[fldWine])\n if m1 or m2:\n allmatch = True and allmatch\n elif valuematch in AbbrLookup:\n if debug:\n print('faawl:valuematch-abbr:', valuematch, ':',\n wineAbbrLookup[valuematch])\n reMatch = re.compile(wineAbbrLookup[valuematch], re.IGNORECASE)\n allmatch = reMatch.search(rec[fldWine]) and allmatch\n else:\n allmatch = False and allmatch\n if debug:\n print('faawl:valuematch:', valuematch, ':allmatch:', allmatch)\n if allmatch:\n if debug:\n print('faawl:value matched:', valuematchset)\n if len(valuematchset) == 1:\n if debug:\n print('faawl:single-valuematch-set-added-to-singlematch:',\n valuematchset)\n singlematch.append(valuematchset)\n else:\n if debug:\n print('faawl:multivalue-valuematch-set-found:done')\n return valuematchset\n if not singlematch:\n if debug:\n print('faawl:exit with singlematch NOT populated return blank')\n return []\n if debug:\n print('faawl:exit with singlematch populated:', singlematch)\n if len(singlematch) == 1 or not defaultorderlist:\n if debug:\n print('faawl:return first entry in singlematch:', singlematch[0])\n return singlematch[0]\n defaultorder = defaultorderlist[:]\n if debug:\n print('faawl:multiple single match value-singlematch:', singlematch)\n for val in singlematch[::-1]:\n if val not in defaultorder:\n defaultorder.insert(0, val)\n if winery == 'Mondavi' and ['Tok'] in singlematch:\n if debug:\n print('faawl:Change from:', valuematchset, ':to Tok for mondavi')\n return ['Tok']\n for val in defaultorder:\n if val in singlematch:\n if debug:\n print('faawl:selected-singlematch-value:', val)\n return val\n if debug:\n print('faawl:valuematchset-empty')\n return []\n\n\ndef setWineryDescrFromWineryGrapeLookup(wgLookup, wines, fldWineDescr=\n 'winedescr', fldWine='wine', fldWineDescrNew='winedescrnew',\n fldWineDescrMatch=False, debug=False):\n if debug:\n print('\\n' * 10,\n 'START WINEDESCR SETTING HERE ---------------------------------------------'\n )\n for rec in wines:\n (winery) = (grape) = (wine) = (vintage) = (case) = (size) = (liquor\n ) = (nongrape) = (qual) = None\n winematchset = grapematchset = []\n if debug:\n try:\n print('setWinery:fldWine:', rec[fldWine])\n except Exception as e:\n print('debug error2-continuing:', str(e))\n print('fldWine:', fldWine)\n if fldWineDescrNew not in rec:\n rec[fldWineDescrNew] = rec[fldWineDescr]\n winery, reWinery = findWinery(rec, None, None, fldWine, debug=debug)\n if winery is None:\n if debug:\n print('setWinery:winery not found-next record:' + rec[fldWine])\n continue\n elif winery not in wgLookup:\n if debug:\n print('setWinery:winery not in wgLookup:', winery)\n continue\n grape, reGrape = findGrapeByRegex(rec, fldWine, debug=debug)\n if debug:\n print('setWinery:grape found:', grape)\n if winery in ignoreGrapeLookup:\n if debug:\n print(\n 'setWinery:winery-match-ignoreGrape:clear-wine:set-grape-to-None:set-nongrape-True:winery:'\n , winery)\n wine = ''\n grape = None\n nongrape = True\n if winery in noGrapeLookup:\n if debug:\n print('setWinery:noGrapeLookup wine check:', winery)\n wine = wineLookupByName(noGrapeLookup[winery], rec[fldWine], [],\n 'noGrapeLookup', wineAbbrLookup, debug=debug)\n if debug:\n print('setWinery:nogrape check:wine:', wine)\n if wine == '':\n if debug:\n print(\n 'setWinery:noGrapeLookup:matched:None::clear grape:set nongrape to True'\n )\n grape = None\n wine = ''\n nongrape = True\n elif wine:\n grape = None\n if debug:\n print(\n 'setWinery:nograpeLookup:wine found - clear grape field'\n )\n if wine is None and winery in liquorLookup:\n if debug:\n print('setWinery:liqourLookup:', winery)\n liquor, reLiquor = findLiquor(rec, winery, fldWine, debug=debug)\n if liquor is not None:\n wine = liquor\n if debug:\n print('setWinery:liquorLookup-match:', liquor)\n if not grape and not nongrape and not wine and liquor is None:\n if debug:\n print('setWinery:did not find grape-skipping record:', rec[\n fldWineDescr])\n continue\n if debug:\n print('setWinery:pre-vintage found values for wine/liquor:',\n wine, ':grape:', grape)\n vintage = findVintage(rec, fldWine, debug=debug)\n if debug:\n print('setWinery:vintage:', vintage)\n if reCase.search(rec[fldWine]):\n case = 'case'\n for size, reSize in sizeLookup:\n if debug:\n print('setWinery:sizeLookup:', size)\n if reSize.search(rec[fldWine]) and not reShipsAs.search(rec[\n fldWine]):\n if debug:\n print('setWinery:sizeLookup:matched:', reSize)\n break\n else:\n size = None\n if debug:\n print('setWinery:sizeLookup:None-found')\n qual = findQualifier(rec[fldWine], debug=debug)\n if debug:\n try:\n print('setWinery:FinalAttributes:', winery, ':', grape, ':',\n wine, ':', liquor, ':', vintage, ':', case, ':', size,\n ':', qual, ':', rec[fldWine])\n except Exception as e:\n print('debug error5-continuing:', str(e))\n print('fldWine:', fldWine)\n if liquor is not None:\n if debug:\n print(\n 'setWinery:liquor flag set - no additional data needs to be collected'\n )\n elif wine is not None:\n if debug:\n print(\n 'setWinery:wine is not None - do additional lookups:wine:',\n wine)\n if wine in wgLookup[winery] and wgLookup[winery][wine]:\n if debug:\n print('setWinery:lookup winematchset')\n winematchset = findAddAttribWgLookup(rec, winery, wine,\n fldWine, wineAbbrLookup, None, valueDescr='wine', debug\n =debug)\n else:\n print('setWinery:unable to perform wgLookup on winery:',\n winery, ':wine:', wine, ':rec-wine:', rec[fldWine])\n if debug:\n try:\n print('wgLookup[winery]:', wgLookup[winery])\n except Exception as e:\n print('debug error3-continuing:', str(e))\n print('winery:', winery)\n if debug:\n print('setWinery:winematchset:', winematchset)\n elif grape is not None:\n if debug:\n print('setWinery:grape is not None - do additional lookups:',\n grape)\n if grape in wgLookup[winery] and wgLookup[winery][grape]:\n grapematchset = findAddAttribWgLookup(rec, winery, grape,\n fldWine, wineAbbrLookup, defaultorderlist, valueDescr=\n 'grape', debug=debug)\n elif grape in wgLookup[winery]:\n if debug:\n print(\n 'setWinery:grape match: matching record set is blank - no action required'\n )\n else:\n print('setWinery:grape NONMATCH:', rec[fldWine])\n if debug:\n print('setWinery:liquor:', liquor, ':wine:', wine,\n ':grape:', grape, ':wgLookup[winery]:', wgLookup[\n winery])\n if debug:\n print('setWinery:grapematchset:', grapematchset)\n if vintage:\n newVintageLookupWine = rec[fldWine]\n for matchvalue in winematchset:\n if vintage in matchvalue:\n newVintageLookupWine = newVintageLookupWine.replace(\n matchvalue, '')\n if debug:\n print(\n 'setWinery:2nd-vintage:winematchset:wine-name-removal:'\n , matchvalue)\n for matchvalue in grapematchset:\n if vintage in matchvalue:\n newVintageLookupWine = newVintageLookupWine.replace(\n matchvalue, '')\n if debug:\n print(\n 'setWinery:2nd-vintage:grapematchset:wine-name-removal:'\n , matchvalue)\n if newVintageLookupWine != rec[fldWine]:\n if debug:\n print('setWinery:2nd-vintage:newVintageLookupWine:',\n newVintageLookupWine)\n newVintage = findVintage({fldWine: newVintageLookupWine},\n fldWine, debug=debug)\n if debug:\n print('setWinery:2nd-vintage:newVintage:', newVintage)\n vintage = newVintage\n wineDescr = ''\n if winery.startswith('z'):\n vintage = None\n if debug:\n print('setWinery:winery starts with z: clear vintage')\n if winematchset and ' '.join(winematchset) in wine:\n if debug:\n print('setWinery:clearing-winematchset:', winematchset,\n ':is-in-wine:', wine)\n winematchset = []\n if grapematchset and ' '.join(grapematchset) in grape:\n if not (len(grapematchset) == 1 and len(grapematchset[0]) == 1):\n if debug:\n print('setWinery:clearing-grapematchset:',\n grapematchset, ':is-in-grape:', grape)\n grapematchset = []\n if grapematchset and size and size in ' '.join(grapematchset):\n size = ''\n if winematchset and size and size in ' '.join(winematchset):\n size = ''\n if debug:\n print('setWinery:vallist1:', [winery, grape, wine] +\n grapematchset + winematchset + [vintage, size, qual, case])\n print('setWinery:vallist2:', [winery, grape, wine, *\n grapematchset, *winematchset, vintage, size, qual, case])\n wdList = []\n for val in ([winery, grape, wine] + grapematchset + winematchset +\n [vintage, size, qual, case]):\n if val:\n wdList.append(val)\n wineDescr = ' '.join(wdList)\n if False:\n if debug:\n print('setWinery:wdList:', wdList)\n if debug:\n print('setWinery:wineDescr:', wineDescr)\n if debug:\n try:\n print(':'.join(['setWinery:wineDescrList', wineDescr, rec[\n fldWineDescr], str(wineDescr == rec[fldWineDescr]), rec\n [fldWine]]))\n except Exception as e:\n print('debug error6-continuing:', str(e))\n print('fldWine:', fldWine)\n rec[fldWineDescrNew] = wineDescr\n if fldWineDescrMatch:\n rec[fldWineDescrMatch] = rec[fldWineDescr] == rec[fldWineDescrNew]\n\n\ndef setDigitFld2Value(wines, fld, value, debug=False):\n for rec in wines:\n if rec[fld].isdigit():\n rec[fld] = value\n\n\ndef updateFileOptionDictCheck(optiondict, wines, header, debug=False):\n if optiondict['fldWineDescr'] not in wines[0]:\n if debug:\n print('updateFileOptionDictCheck:fldWineDescr NOT in file read in:'\n , optiondict['fldWineDescr'])\n if 'cnt' in wines[0]:\n print('setting values fldWineDescr and fldWineDescrNew to: cnt')\n optiondict['fldWineDescr'] = optiondict['fldWineDescrNew'] = 'cnt'\n elif 'winedescr' in wines[0]:\n print(\n 'setting values fldWineDescr to winedescr and fldWineDescrNew to winedescrnew'\n )\n optiondict['fldWineDescr'] = 'winedescr'\n optiondict['fldWineDescrNew'] = 'winedescrnew'\n else:\n print('could not find fldWineDescr in wines[0]-aborting:',\n optiondict['fldWineDescr'], '\\nwines[0]:', wines[0])\n error = wines[0][optiondict['fldWineDescr']]\n if False and optiondict['fldWineDescr'] == 'winedescr':\n if not optiondict['fldWineDescrMatch']:\n optiondict['fldWineDescrMatch'] = 'same'\n print('setting value fldWineDescrMatch to: same')\n if optiondict['csvfile_update_in'] == optiondict['csvfile_update_out']:\n file_path, base_filename, file_ext = kvutil.filename_split(optiondict\n ['csvfile_update_in'])\n backupfile = kvutil.filename_proper(base_filename + optiondict[\n 'backupfile_ext'], file_path)\n print('copying ', optiondict['csvfile_update_in'], ' to ', backupfile)\n shutil.copyfile(optiondict['csvfile_update_in'], backupfile)\n if optiondict['fldWineDescrNew'] == 'cnt':\n optiondict['csvdictkeys'] = ['cnt', 'date', 'search', 'store',\n 'wine', 'winesrt']\n elif optiondict['fldWineDescrMatch']:\n optiondict['csvdictkeys'] = [optiondict['fldWineDescr'], optiondict\n ['fldWineDescrNew'], optiondict['fldWineDescrMatch'], *header]\n else:\n optiondict['csvdictkeys'] = [optiondict['fldWineDescrNew']] + header[1:\n ]\n print('updateFileOptionDictCheck:set csvdictkeys to:', optiondict[\n 'csvdictkeys'])\n\n\n<mask token>\n",
"step-4": "<mask token>\nimport kvutil\nimport kvcsv\nimport re\nimport sys\nimport shutil\nimport pprint\npp = pprint.PrettyPrinter(indent=4)\nppFlag = False\noptiondictconfig = {'AppVersion': {'value': '1.13', 'description':\n 'defines the version number for the app'}, 'debug': {'value': False,\n 'type': 'bool', 'description':\n 'defines if we are running in debug mode'}, 'verbose': {'value': 1,\n 'type': 'int', 'description':\n 'defines the display level for print messages'}, 'setup_check': {\n 'value': False, 'type': 'bool', 'description':\n 'defines if we checking out setup'}, 'pprint': {'value': False, 'type':\n 'bool', 'description':\n 'defines if we output with pretty print when debugging'},\n 'csvfile_master_in': {'value': 'wine_xref.csv', 'description':\n 'defines the name of the master data input file'}, 'csvfile_update_in':\n {'value': 'wineref.csv', 'description':\n 'defines the name of the input file to updated'}, 'csvfile_update_out':\n {'value': 'wineref2.csv', 'description':\n 'defines the name of the updated output file'}, 'fldWine': {'value':\n 'wine', 'description':\n 'defines the name of the field that holds the Wine '}, 'fldWineDescr':\n {'value': 'winedescr', 'description':\n 'defines the name of the field holding the wine description'},\n 'fldWineDescrNew': {'value': 'winedescrnew', 'description':\n 'defines the name of the NEW field holding the new description '},\n 'fldWineDescrMatch': {'value': None, 'description':\n 'defines the name of the NEW field holding the results of comparison existing to new description '\n }, 'fldWineMaster': {'value': None, 'description':\n 'defines the name of the field that holds the Wine when reading the master file '\n }, 'fldWineDescrMaster': {'value': None, 'description':\n 'defines the name of the field holding the wine description when reading the master file'\n }, 'backupfile_ext': {'value': '.bak', 'description':\n 'defines the extension to use to copy the update input file to if we are replacing it with output'\n }, 'defaultnew': {'value': None, 'description':\n 'defines if we should take field fldWineDescrNew and set to a value if not set'\n }}\nvintageLookup = re.compile('\\\\d\\\\d\\\\d\\\\d\\\\s+\\\\d\\\\d(\\\\d\\\\d)'), re.compile(\n '^\\\\d\\\\d(\\\\d\\\\d)'), re.compile('\\\\s\\\\d\\\\d(\\\\d\\\\d)$'), re.compile(\n '\\\\s\\\\d\\\\d(\\\\d\\\\d)\\\\s'), re.compile('XX\\\\d\\\\d(\\\\d\\\\d)\\\\s'), re.compile(\n '\\\\s\\\\d\\\\d(\\\\d\\\\d)\\\\/'), re.compile(\"\\\\s'?(\\\\d\\\\d)'?$|\\\\s'?(\\\\d\\\\d)'?\\\\s\")\nreCase = re.compile('12\\\\s*X\\\\s*750\\\\s*ML|\\\\bcase\\\\b|12\\\\/750\\\\s*ML', re.\n IGNORECASE)\nreQualLookup = (None, re.compile('\\\\bWithout\\\\s+Gift\\\\b|\\\\bNo\\\\s+Gift', re.\n IGNORECASE)), ('Gift', re.compile('\\\\bGift\\\\b', re.IGNORECASE)), ('VAP',\n re.compile('\\\\bVAP\\\\b', re.IGNORECASE)), ('VAP', re.compile(\n '\\\\bGlassVAP\\\\b', re.IGNORECASE)), ('Glass', re.compile('\\\\bGlass\\\\b',\n re.IGNORECASE)), ('Glass', re.compile('\\\\bGlasses\\\\b', re.IGNORECASE)), (\n 'Etch', re.compile('\\\\bEtch\\\\b', re.IGNORECASE)), ('Basket', re.compile\n ('\\\\bBasket\\\\b', re.IGNORECASE))\nsizeLookup = ('1.75L', re.compile('\\\\b1\\\\.75\\\\s*Li?|\\\\b1\\\\.75$', re.IGNORECASE)\n ), ('1.5L', re.compile('\\\\b1\\\\.5\\\\s*L?\\\\b|\\\\bMagnum\\\\b', re.IGNORECASE)), (\n '375mL', re.compile('Half\\\\s+Bottle|375ml', re.IGNORECASE)), ('200mL',\n re.compile('\\\\b200\\\\s*ML|\\\\(200\\\\s*ML', re.IGNORECASE)), ('50mL', re.\n compile('\\\\b50\\\\s*ML|\\\\(50\\\\s*ML', re.IGNORECASE)), ('500mL', re.\n compile('\\\\b500\\\\s*ML|\\\\(500\\\\s*ML', re.IGNORECASE)), ('3L', re.compile\n ('\\\\b3\\\\s*Li?', re.IGNORECASE)), ('6L', re.compile('\\\\b6\\\\s*Li?', re.\n IGNORECASE)), ('9L', re.compile('\\\\b9\\\\s*Li?', re.IGNORECASE)), ('1L',\n re.compile(\n '\\\\b1L\\\\b|\\\\b1\\\\s+L$|\\\\b1.0\\\\s*L\\\\b|\\\\b1\\\\s+Liter\\\\b|\\\\bOne\\\\s+Liter\\\\b|\\\\bLITER\\\\b|\\\\b1\\\\s*LTR'\n , re.IGNORECASE))\nwineryLookup = ('Alban', re.compile('\\\\bAlban\\\\b', re.IGNORECASE)), ('Arrowood'\n , re.compile('\\\\bArrowood\\\\b', re.IGNORECASE)), ('Atalon', re.compile(\n '\\\\bAtalon\\\\b', re.IGNORECASE)), ('Attune', re.compile('\\\\bAttune\\\\b',\n re.IGNORECASE)), ('Auteur', re.compile('\\\\bAuteur\\\\b', re.IGNORECASE)), (\n 'Austin Hope', re.compile('\\\\bAustin\\\\s+Hope\\\\b', re.IGNORECASE)), ('Badge'\n , re.compile('\\\\bBadge\\\\b', re.IGNORECASE)), ('Balletto', re.compile(\n '\\\\bBalletto\\\\b', re.IGNORECASE)), ('Bell', re.compile(\n '\\\\bBell\\\\s+Cellar', re.IGNORECASE)), ('BR Cohn', re.compile(\n '\\\\bB\\\\.?\\\\s?R\\\\.?\\\\s+Cohn\\\\b', re.IGNORECASE)), ('Bremer', re.compile(\n '\\\\bBremer\\\\b', re.IGNORECASE)), ('Brewer-Clifton', re.compile(\n '\\\\bBrewer[\\\\s\\\\-]Clifton\\\\b', re.IGNORECASE)), ('BV', re.compile(\n '\\\\bBeaulieu\\\\s+V|\\\\bBV\\\\b', re.IGNORECASE)), ('Belle Glos', re.compile\n ('\\\\bBelle\\\\s+Glos\\\\b', re.IGNORECASE)), ('Bennett Ln', re.compile(\n '\\\\bBennet+\\\\sLane\\\\b', re.IGNORECASE)), ('Benovia', re.compile(\n '\\\\bBenovia\\\\b', re.IGNORECASE)), ('Beringer', re.compile(\n '\\\\bBeringer\\\\b', re.IGNORECASE)), ('Blackstone', re.compile(\n '\\\\bBlackstone\\\\b', re.IGNORECASE)), ('Brancott', re.compile(\n '\\\\bBrancott\\\\b', re.IGNORECASE)), ('Cade', re.compile('\\\\bCade\\\\b', re\n .IGNORECASE)), ('Cain Five', re.compile(\n '\\\\bCain\\\\s+Five\\\\b|\\\\bCain\\\\s-\\\\sFive\\\\b|\\\\bCain\\\\s5\\\\b|\\\\bCainFive\\\\b',\n re.IGNORECASE)), ('Cakebread', re.compile('\\\\bCakebread\\\\b', re.IGNORECASE)\n ), ('Cardinale', re.compile('\\\\bCardinale\\\\b', re.IGNORECASE)), ('Caymus',\n re.compile('\\\\bCaymus\\\\b', re.IGNORECASE)), ('Chappellet', re.compile(\n '\\\\bChappellet\\\\b', re.IGNORECASE)), ('Chalk Hill', re.compile(\n '\\\\bChalk\\\\s+Hill\\\\b', re.IGNORECASE)), ('Clos Du Bois', re.compile(\n '\\\\bClos\\\\s+Du\\\\s+Bois\\\\b', re.IGNORECASE)), ('ClosDuVal', re.compile(\n '\\\\bClos\\\\s+du\\\\s+Val\\\\b', re.IGNORECASE)), ('Colgin', re.compile(\n '\\\\bColgin\\\\b', re.IGNORECASE)), ('Concha Don Melchor', re.compile(\n '\\\\bConcha\\\\s.*Don\\\\s+Melchor\\\\b|Don\\\\s+Melchor\\\\b', re.IGNORECASE)), (\n 'Continuum', re.compile('\\\\bContinuum\\\\b', re.IGNORECASE)), ('Corison',\n re.compile('\\\\bCorison\\\\b', re.IGNORECASE)), ('Cristal', re.compile(\n 'Roederer\\\\s?.*Cristal\\\\b|\\\\bCristal\\\\b.+Brut', re.IGNORECASE)), ('Curran',\n re.compile('\\\\bCurran\\\\b', re.IGNORECASE)), ('Darioush', re.compile(\n '\\\\bDarioush\\\\b', re.IGNORECASE)), ('Darioush', re.compile(\n '\\\\bCaravan\\\\b', re.IGNORECASE)), ('David Arthur', re.compile(\n '\\\\bDavid\\\\s+Arthur\\\\b', re.IGNORECASE)), ('David Bruce', re.compile(\n '\\\\bDavid\\\\s+Bruce\\\\b', re.IGNORECASE)), ('Davis Family', re.compile(\n '\\\\bDavis\\\\s+Family\\\\b', re.IGNORECASE)), ('Del Dotto', re.compile(\n '\\\\bDel\\\\s+Dotto\\\\b', re.IGNORECASE)), ('Dominus', re.compile(\n '\\\\bDominus\\\\b', re.IGNORECASE)), ('Goldeneye', re.compile(\n '\\\\bGoldeneye\\\\b', re.IGNORECASE)), ('Paraduxx', re.compile(\n '\\\\bParaduxx\\\\b', re.IGNORECASE)), ('Domaine Carneros', re.compile(\n '\\\\bDomaine\\\\s+Carneros\\\\b', re.IGNORECASE)), ('Dominus', re.compile(\n '\\\\Dominus\\\\b', re.IGNORECASE)), ('Drappier', re.compile(\n '\\\\bDrappier\\\\b', re.IGNORECASE)), ('Duckhorn', re.compile(\n '\\\\bDuckhorn\\\\b', re.IGNORECASE)), ('Dumol', re.compile('\\\\bDumol\\\\b',\n re.IGNORECASE)), ('Dunn', re.compile('\\\\bDunn\\\\b', re.IGNORECASE)), (\n 'Ehlers', re.compile('\\\\bEhlers\\\\b', re.IGNORECASE)), ('Etude', re.\n compile('\\\\bEtude\\\\b', re.IGNORECASE)), ('Far Niente', re.compile(\n '\\\\bFar Niente\\\\b', re.IGNORECASE)), ('Flora', re.compile(\n '\\\\bFlora\\\\s+Springs\\\\b', re.IGNORECASE)), ('Flowers', re.compile(\n '\\\\bFlowers\\\\b', re.IGNORECASE)), ('Robert Foley', re.compile(\n '\\\\bRobert\\\\s+\\\\bFoley\\\\b', re.IGNORECASE)), ('Foley', re.compile(\n '\\\\bFoley\\\\b', re.IGNORECASE)), ('Foxen', re.compile('\\\\bFoxen\\\\b', re.\n IGNORECASE)), ('Franciscan', re.compile('\\\\bFranciscan\\\\b', re.IGNORECASE)\n ), ('Frank Family', re.compile('\\\\bFrank Family\\\\b', re.IGNORECASE)), (\n 'Gary Farrell', re.compile('\\\\bGary\\\\s+Farrel+\\\\b', re.IGNORECASE)), (\n 'Ghost Block', re.compile('\\\\bGhost\\\\s+Block\\\\b', re.IGNORECASE)), (\n 'Grgich', re.compile('\\\\bGrgich\\\\b', re.IGNORECASE)), ('Groth', re.\n compile('\\\\bGroth\\\\b', re.IGNORECASE)), ('Gundlach', re.compile(\n '\\\\bGundlach\\\\b', re.IGNORECASE)), ('Hansel', re.compile('\\\\bHansel\\\\b',\n re.IGNORECASE)), ('Hanzell', re.compile('\\\\bHanzell\\\\b', re.IGNORECASE)), (\n 'Hess', re.compile('\\\\bHess\\\\b', re.IGNORECASE)), ('Hewitt', re.compile\n ('\\\\bHewitt\\\\b', re.IGNORECASE)), ('Hobbs', re.compile(\n '\\\\bHobbs\\\\b|\\\\bcrossbarn\\\\b', re.IGNORECASE)), ('Hundred Acre', re.\n compile('\\\\bHundred\\\\s+Acre\\\\b', re.IGNORECASE)), ('Jordan', re.compile\n ('\\\\bJordan\\\\b', re.IGNORECASE)), ('Justin', re.compile('\\\\bJustin\\\\b',\n re.IGNORECASE)), ('Kim Crawford', re.compile('\\\\bKim\\\\s+Crawford\\\\b',\n re.IGNORECASE)), ('Kistler', re.compile('\\\\bKistler\\\\b', re.IGNORECASE)), (\n 'Kosta', re.compile('\\\\bKosta\\\\s+Browne?\\\\b', re.IGNORECASE)), ('Krug',\n re.compile('\\\\bKrug\\\\b', re.IGNORECASE)), ('Kunde', re.compile(\n '\\\\bKunde\\\\b', re.IGNORECASE)), ('LaCrema', re.compile(\n '\\\\bLa\\\\s?Crema\\\\b', re.IGNORECASE)), ('Lewis', re.compile(\n '\\\\bLewis\\\\b', re.IGNORECASE)), ('Lokoya', re.compile('\\\\bLokoya\\\\b',\n re.IGNORECASE)), ('Meiomi', re.compile('\\\\bMeiomi\\\\b', re.IGNORECASE)), (\n 'Melville', re.compile('\\\\bMelville\\\\b', re.IGNORECASE)), ('Momento Mori',\n re.compile('\\\\bMomento\\\\s+Mori\\\\b', re.IGNORECASE)), ('Mondavi', re.\n compile('\\\\bMondavi\\\\b', re.IGNORECASE)), ('Montelena', re.compile(\n '\\\\bMontelena\\\\b', re.IGNORECASE)), ('Mt Veeder', re.compile(\n '^Mount\\\\s+Veeder\\\\b|^Mt\\\\.? Veeder\\\\b|\\\\d+\\\\s+M[^t]*t\\\\s+Veeder\\\\b',\n re.IGNORECASE)), ('Newton', re.compile('\\\\bNewton\\\\b', re.IGNORECASE)), (\n 'Nickel', re.compile('\\\\bNickel\\\\b', re.IGNORECASE)), ('Opus One', re.\n compile('\\\\bOpus\\\\s+One\\\\b', re.IGNORECASE)), ('P Togni', re.compile(\n '\\\\bTogni\\\\b', re.IGNORECASE)), ('Pahlmeyer Jayson', re.compile(\n '\\\\bJayson\\\\b', re.IGNORECASE)), ('Pahlmeyer', re.compile(\n '\\\\bPahlmeyer\\\\b(?!\\\\s*Jay)', re.IGNORECASE)), ('Papillon', re.compile(\n '\\\\bPapillon\\\\b', re.IGNORECASE)), ('Patz', re.compile('\\\\bPatz\\\\b', re\n .IGNORECASE)), ('Phelps', re.compile('\\\\bPhelps\\\\b', re.IGNORECASE)), (\n 'Plumpjack', re.compile('\\\\bPlumpjack\\\\b', re.IGNORECASE)), ('Pride',\n re.compile('\\\\bPride\\\\b', re.IGNORECASE)), ('Prisoner', re.compile(\n '\\\\bPrisoner\\\\b', re.IGNORECASE)), ('Provenance', re.compile(\n '\\\\bProvenance\\\\b', re.IGNORECASE)), ('R Sinskey', re.compile(\n '\\\\bSinskey\\\\b', re.IGNORECASE)), ('Ramey', re.compile('\\\\bRamey\\\\b',\n re.IGNORECASE)), ('Revana', re.compile('\\\\bRevana\\\\b', re.IGNORECASE)), (\n 'Raptor', re.compile('\\\\bRaptor\\\\s+Ridge\\\\b', re.IGNORECASE)), ('Revana',\n re.compile('\\\\bRevana\\\\b', re.IGNORECASE)), ('Ridge', re.compile(\n '\\\\bRidge\\\\b', re.IGNORECASE)), ('Robert Foley', re.compile(\n '\\\\bRobert\\\\s+Foley\\\\b', re.IGNORECASE)), ('Rombauer', re.compile(\n '\\\\bRombauer\\\\b', re.IGNORECASE)), ('Rudd', re.compile('\\\\bRudd\\\\b', re\n .IGNORECASE)), ('Scarecrow', re.compile('\\\\bScarecrow\\\\b', re.IGNORECASE)\n ), ('Sea Smoke', re.compile('\\\\bSea\\\\s+Smoke\\\\b', re.IGNORECASE)), (\n 'Seghesio', re.compile('\\\\bSeghesio\\\\b', re.IGNORECASE)), ('Shafer', re\n .compile('\\\\bShafer\\\\b', re.IGNORECASE)), ('Sherwin', re.compile(\n '\\\\bSherwin\\\\b', re.IGNORECASE)), ('Silver Oak', re.compile(\n '\\\\bSilver\\\\s+Oak\\\\b', re.IGNORECASE)), ('Silverado', re.compile(\n '\\\\bSilverado\\\\b', re.IGNORECASE)), ('Simi', re.compile('\\\\bSimi\\\\b',\n re.IGNORECASE)), ('Sonoma Cutrer', re.compile('\\\\bCutrer\\\\b', re.\n IGNORECASE)), ('Spottswoode', re.compile('\\\\bSpottswoode\\\\b', re.\n IGNORECASE)), ('Stag Leap', re.compile('\\\\bStag.*\\\\sLeap\\\\b', re.\n IGNORECASE)), ('Sullivan', re.compile('\\\\bSullivan\\\\b', re.IGNORECASE)), (\n 'Summerland', re.compile('\\\\bSummerland\\\\b', re.IGNORECASE)), ('Summers',\n re.compile('\\\\bSummers\\\\b', re.IGNORECASE)), ('Tantara', re.compile(\n '\\\\bTantara\\\\b', re.IGNORECASE)), ('Turnbull', re.compile(\n '\\\\bTurnbull\\\\b', re.IGNORECASE)), ('Veuve', re.compile('\\\\bVeuve\\\\b',\n re.IGNORECASE)), ('Viader', re.compile('\\\\bViader\\\\b', re.IGNORECASE)), (\n 'Waterstone', re.compile('\\\\bWaterstone\\\\b', re.IGNORECASE)), ('Whitehall',\n re.compile('\\\\bWhitehall\\\\b', re.IGNORECASE)), ('Wm Selyem', re.compile\n ('\\\\bWilliams\\\\s*\\\\-?Selyem\\\\b', re.IGNORECASE)), ('ZD', re.compile(\n '\\\\bZD\\\\b', re.IGNORECASE)), ('Zaca', re.compile('\\\\bZaca\\\\b', re.\n IGNORECASE)), ('zBourbon Woodford Res', re.compile(\n '\\\\bWoodford\\\\s+Reserve\\\\b', re.IGNORECASE)), ('zBourbon Woodford Res',\n re.compile('\\\\bWoodford\\\\s+Rsv\\\\b', re.IGNORECASE)), ('zCognac Courvoisier'\n , re.compile('\\\\bCourvoisier\\\\b', re.IGNORECASE)), ('zCognac Hennessy',\n re.compile('\\\\bHennesse?y\\\\b', re.IGNORECASE)), ('zCognac Remy', re.\n compile('\\\\bRemy\\\\s+Martin\\\\b|\\\\bRemy\\\\s+Louis', re.IGNORECASE)), (\n 'zCointreau', re.compile('\\\\bCointreau\\\\b', re.IGNORECASE)), (\n 'zGin Hendrick', re.compile('\\\\bHendrick', re.IGNORECASE)), (\n 'zGin Tanqueray', re.compile('\\\\bTanqueray\\\\b', re.IGNORECASE)), (\n 'zRum Mt Gay', re.compile('\\\\bMount\\\\s+Gay\\\\b|\\\\bMt\\\\s+Gay', re.IGNORECASE)\n ), ('zRum Ron Zacapa', re.compile('\\\\bRon\\\\s+Zacapa\\\\b', re.IGNORECASE)), (\n 'zRye Hayden', re.compile('\\\\bBasil\\\\s+Hayden\\\\b', re.IGNORECASE)), (\n 'zSambuca', re.compile('\\\\bSambuca\\\\b', re.IGNORECASE)), (\n 'zScotch Glenmorangie', re.compile('\\\\bGlenmorangie\\\\b', re.IGNORECASE)), (\n 'zScotch Hibiki Harmony', re.compile('\\\\bHibiki\\\\s.*Harmony\\\\b', re.\n IGNORECASE)), ('zScotch Hibiki', re.compile('\\\\bHibiki\\\\b(?!\\\\s*Har)',\n re.IGNORECASE)), ('zScotch Macallan', re.compile('\\\\bMacallan\\\\b', re.\n IGNORECASE)), ('zTeq Campo Azul', re.compile('\\\\bCampo\\\\s+Azul\\\\b', re.\n IGNORECASE)), ('zTeq Casamigos', re.compile('\\\\bCasamigos\\\\b', re.\n IGNORECASE)), ('zTeq Casino Azul', re.compile('\\\\bCasino\\\\s+Azul\\\\b',\n re.IGNORECASE)), ('zTeq Clase Azul', re.compile('\\\\bClase\\\\s+Azul\\\\b',\n re.IGNORECASE)), ('zTeq Cuervo', re.compile(\n '\\\\bJose\\\\s+Cuervo\\\\b|^Cuervo\\\\b', re.IGNORECASE)), ('zTeq Don Julio',\n re.compile('\\\\bDon\\\\s+Julio\\\\b', re.IGNORECASE)), ('zTeq Dos Artes', re\n .compile('\\\\bDos\\\\s+Artes\\\\b|^Cuervo\\\\b', re.IGNORECASE)), (\n 'zTeq Gran Cava', re.compile('\\\\bGran\\\\s+Cava\\\\b', re.IGNORECASE)), (\n 'zTeq Herradura', re.compile('\\\\bHerradura\\\\b', re.IGNORECASE)), (\n 'zTeq Loma Azul', re.compile('\\\\bLoma\\\\s+Azul\\\\b', re.IGNORECASE)), (\n 'zTeq Padre Azul', re.compile('\\\\bPadre\\\\s+Azul\\\\b', re.IGNORECASE)), (\n 'zTeq Partida', re.compile('\\\\bPartida\\\\b', re.IGNORECASE)), ('zTeq Patron'\n , re.compile('\\\\bPatron\\\\b', re.IGNORECASE)), ('zTripleSec Gr Marnier',\n re.compile('\\\\bGrand\\\\s+Marnier\\\\b', re.IGNORECASE)), (\n 'zTripleSec Dekuyper', re.compile('\\\\bDekuyper\\\\b', re.IGNORECASE)), (\n 'zTripleSec Hiram', re.compile('\\\\bHiram\\\\b', re.IGNORECASE)), (\n 'zVodka Absolut', re.compile('\\\\bAbsolut\\\\b', re.IGNORECASE)), (\n 'zVodka Skyy', re.compile('\\\\bSkyy\\\\b', re.IGNORECASE)), ('zVodka Tito',\n re.compile('\\\\bTito', re.IGNORECASE)), ('zWhiskey Balvenie', re.compile\n ('\\\\bBalvenie\\\\b', re.IGNORECASE)), ('zWhiskey J Walker', re.compile(\n '\\\\bJohn+ie\\\\s+Walker\\\\b', re.IGNORECASE))\ngrapeLookup = ('Cab Franc', re.compile(\n '\\\\bCabernet\\\\s+Franc|\\\\bCab\\\\s+Franc', re.IGNORECASE)), ('Cab', re.\n compile('\\\\bCabernet\\\\b|\\\\sCS\\\\s|\\\\sCS$|\\\\bCab\\\\b', re.IGNORECASE)), (\n 'Claret', re.compile('\\\\bClaret\\\\b', re.IGNORECASE)), ('Rose Pinot', re\n .compile('\\\\bRose\\\\b.*\\\\bPinot\\\\b|\\\\bPinot\\\\b.*\\\\bRose\\\\b', re.IGNORECASE)\n ), ('Pinot', re.compile('\\\\bPinot\\\\b|\\\\bPN\\\\b|\\\\bP\\\\s+Noir\\\\b', re.\n IGNORECASE)), ('Merlot', re.compile('\\\\bMerlot\\\\b|\\\\bME\\\\b', re.IGNORECASE)\n ), ('Sauv Blanc', re.compile('\\\\bSauvignon\\\\s+Blanc\\\\b|\\\\bSB\\\\b', re.\n IGNORECASE)), ('Sauv Blanc', re.compile(\n '\\\\bSauvignon\\\\/Fume\\\\s+Blanc\\\\b', re.IGNORECASE)), ('Meritage', re.\n compile('\\\\bMeritage\\\\b', re.IGNORECASE)), ('Fume', re.compile(\n '\\\\bFume\\\\b|\\\\bFumé', re.IGNORECASE)), ('Champagne', re.compile(\n '\\\\bChampagne\\\\b', re.IGNORECASE)), ('Chard', re.compile(\n '\\\\bChar+d|\\\\bCH\\\\b', re.IGNORECASE)), ('Shiraz', re.compile(\n '\\\\bShiraz\\\\b', re.IGNORECASE)), ('Syrah', re.compile(\n '\\\\bSyrah\\\\b|\\\\bSY\\\\b', re.IGNORECASE)), ('Zin', re.compile(\n '\\\\bZinfandel\\\\b|\\\\bZIN\\\\b|\\\\bZN\\\\b', re.IGNORECASE)), ('Rose', re.\n compile('\\\\bRose\\\\b|\\\\bRosé', re.IGNORECASE)), ('Sangiovese', re.\n compile('\\\\Sangiovese\\\\b', re.IGNORECASE)), ('Gewurzt', re.compile(\n '\\\\bGew.rztraminer\\\\b|\\\\bGewürzt', re.IGNORECASE)), ('Malbec', re.\n compile('\\\\bMalbec\\\\b', re.IGNORECASE)), ('Viognier', re.compile(\n '\\\\bViognier\\\\b', re.IGNORECASE)), ('Roussanne', re.compile(\n '\\\\bRoussanne\\\\b', re.IGNORECASE)), ('Charbono', re.compile(\n '\\\\bCharbono\\\\b', re.IGNORECASE)), ('PSirah', re.compile(\n '\\\\bPetite Sirah\\\\b', re.IGNORECASE)), ('Cuvee', re.compile(\n '\\\\bCuvee\\\\b', re.IGNORECASE)), ('Red', re.compile(\n '\\\\bRed\\\\b|\\\\bBordeaux\\\\s+Blend\\\\b', re.IGNORECASE)), ('Syrah-Cab', re.\n compile('\\\\bSyrcab\\\\b|\\\\bsyrah[-\\\\s\\\\/]+cab', re.IGNORECASE)), ('Grenache',\n re.compile('\\\\bGrenache\\\\b', re.IGNORECASE)), ('Tempranillo', re.\n compile('\\\\bTempranillo\\\\b', re.IGNORECASE))\nignoreGrapeLookup = {'Cristal': ['Rose', None], 'Domaine Carneros': ['Brut',\n None], 'Dominus': [None], 'Papillon': None, 'Paraduxx': None, 'Veuve':\n None, 'zCointreau': None, 'zGin Hendrick': None, 'zGin Tanqueray': [\n 'Ten', None], 'zTripleSec Gr Marnier': ['1880', '100th', 'Cent', 'Quin',\n None], 'zTripleSec Dekuyper': None, 'zTripleSec Hiram': None,\n 'zVodka Skyy': ['Citrus', None], 'zVodka Tito': None}\nnoGrapeLookup = {'Ehlers': ['120-80'], 'Alban': ['Pandora'], 'BV': [\n 'Tapestry', 'Latour'], 'Bennett Ln': ['Maximus'], 'Bremer': [\n 'Austintatious'], 'Cain Five': None, 'Colgin': ['Cariad', 'IX'],\n 'Concha Don Melchor': None, 'Continuum': None, 'Darioush': ['Duel',\n 'Darius'], 'Duckhorn': ['Discussion'], 'Far Niente': ['Dolce'], 'Flora':\n ['Trilogy'], 'Franciscan': ['Magnificat'], 'Grgich': ['Violetta'],\n 'Gundlach': ['Vintage Reserve'], 'Justin': ['Isosceles'], 'Krug': [\n 'Generations'], 'Mondavi': ['Maestro'], 'Newton': ['Puzzle'],\n 'Opus One': None, 'Phelps': ['Insignia'], 'Prisoner': ['Cuttings',\n 'Derange', 'Saldo', 'Blindfold'], 'Ridge': ['Monte Bello'],\n 'Robert Foley': ['Griffin'], 'Sullivan': ['Coeur de Vigne'], 'Zaca': [\n 'ZThree', 'ZCuvee'], 'zCognac Courvoisier': ['Napolean', 'VS', 'VSOP',\n 'XO'], 'zCognac Hennessy': ['Paradis', 'Richard', 'VS', 'VSOP', 'XO',\n 'Master'], 'zCognac Remy': ['1738', 'Louis XIII', 'VSOP', 'XO', 'VS'],\n 'zRum Ron Zacapa': ['23', 'Negra', 'XO'], 'zRye Hayden': ['Dark',\n 'Caribbean'], 'zScotch Hibiki Harmony': None, 'zTeq Campo Azul': [\n 'Extra Anejo', 'Anejo', 'Blanco', 'Reposado'], 'zTeq Casamigos': [\n 'Extra Anejo', 'Anejo', 'Blanco', 'Reposado'], 'zTeq Casino Azul': [\n 'Extra Anejo', 'Anejo', 'Blanco', 'Reposado', 'Silver'],\n 'zTeq Clase Azul': ['Ultra', 'Extra Anejo', 'Anejo', 'Blanco',\n 'Reposado', 'Mezcal', 'Plata', 'Platino'], 'zTeq Dos Artes': [\n 'Extra Anejo'], 'zTeq Gran Cava': ['Extra Anejo'], 'zTeq Loma Azul': [\n 'Extra Anejo', 'Anejo', 'Blanco', 'Reposado'], 'zTeq Partida': [\n 'Blanco', 'Elegante'], 'zVodka Absolut': ['Citron', 'Mandarin',\n 'Mandrin', 'Mango', 'Ruby', 'Vanilia', 'Raspberri', 'Grapevine', None],\n 'zWhiskey J Walker': ['Double Black', 'Black', 'Blue', 'Gold', 'Green',\n 'Platinum', 'Red', 'Swing', 'White', '18', '21']}\nliquorLookup = {'zRum Mt Gay': [('1703 Mst', re.compile('\\\\b1703\\\\b', re.\n IGNORECASE)), ('BB', re.compile('\\\\bBlack Barrel\\\\b', re.IGNORECASE)),\n ('Eclipse Silver', re.compile('\\\\bEclipse\\\\s+Silver\\\\b', re.IGNORECASE)\n ), ('Eclipse', re.compile('\\\\bEclipse\\\\b', re.IGNORECASE)), ('Old Peat',\n re.compile('\\\\bOld Peat', re.IGNORECASE)), ('Old Pot', re.compile(\n '\\\\bPot\\\\s+Still\\\\b', re.IGNORECASE)), ('Old', re.compile('\\\\bOld\\\\b',\n re.IGNORECASE)), ('Silver', re.compile('\\\\bSilver\\\\b', re.IGNORECASE)),\n ('XO Peat', re.compile('\\\\bXO\\\\b', re.IGNORECASE))],\n 'zScotch Glenmorangie': [('10', re.compile('\\\\b10(YR)?\\\\b', re.\n IGNORECASE)), ('14 Port', re.compile(\n '14.+\\\\bQuinta\\\\b|14.+\\\\bPort\\\\b|\\\\bQuinta\\\\b.+14|\\\\bPort\\\\b.+14', re.\n IGNORECASE)), ('12 Bacalta', re.compile('\\\\bBacalta\\\\b', re.IGNORECASE)\n ), ('12 Burgundy', re.compile('\\\\bBurgundy\\\\b', re.IGNORECASE)), (\n '12 Nectar', re.compile('\\\\bNectar\\\\b', re.IGNORECASE)), ('12 Port', re\n .compile('\\\\bQuinta\\\\b|\\\\bPort\\\\b', re.IGNORECASE)), ('12 Sherry', re.\n compile('\\\\bLa\\\\s?Santa\\\\b|\\\\bSherry\\\\b', re.IGNORECASE)), ('12 Signet',\n re.compile('\\\\bSignet\\\\b', re.IGNORECASE)), ('15 Cadboll', re.compile(\n '\\\\bCadboll', re.IGNORECASE)), ('15', re.compile('\\\\b15(YR)?\\\\b', re.\n IGNORECASE)), ('18', re.compile('\\\\b18(YR)?\\\\b|\\\\b18YEAR\\\\b', re.\n IGNORECASE)), ('25 Astar', re.compile('\\\\bAstar\\\\b', re.IGNORECASE)), (\n '25', re.compile('\\\\b25(YR)?\\\\b', re.IGNORECASE)), ('Companta', re.\n compile('\\\\bCompanta\\\\b', re.IGNORECASE)), ('Finealta', re.compile(\n '\\\\bFinealta\\\\b', re.IGNORECASE)), ('Milsean', re.compile(\n '\\\\bMilsean\\\\b', re.IGNORECASE)), ('Sonnalta', re.compile(\n '\\\\bSonnalta\\\\b', re.IGNORECASE))], 'zScotch Macallan': [('10 Fine', re\n .compile('\\\\bFine.*\\\\b10\\\\b|\\\\b10.*Fine')), ('10', re.compile(\n '\\\\b10\\\\b')), ('12 Double Gold', re.compile(\n '\\\\bDbl\\\\b.*Gold|\\\\bDouble\\\\b.*Gold', re.IGNORECASE)), ('12 Double', re\n .compile('\\\\bDouble\\\\s.*12(YR)?\\\\b', re.IGNORECASE)), ('12 Double', re.\n compile('\\\\b12\\\\s.*Double\\\\b', re.IGNORECASE)), ('12 Double', re.\n compile('\\\\bDbl\\\\b|\\\\bDouble\\\\b', re.IGNORECASE)), ('12 Edition 1', re.\n compile('\\\\bEdition\\\\s.*1\\\\b', re.IGNORECASE)), ('12 Edition 2', re.\n compile('\\\\bEdition\\\\s.*2\\\\b', re.IGNORECASE)), ('12 Edition 3', re.\n compile('\\\\bEdition\\\\s.*3\\\\b', re.IGNORECASE)), ('12 Edition 4', re.\n compile('\\\\bEdition\\\\s.*4\\\\b', re.IGNORECASE)), ('12 Sherry', re.\n compile('\\\\b12\\\\s.*Sherry\\\\b|\\\\bSherry\\\\b\\\\s.*\\\\b12', re.IGNORECASE)),\n ('12 Triple', re.compile('\\\\b12(YR)?\\\\s.*Triple\\\\b', re.IGNORECASE)), (\n '12 Triple', re.compile('\\\\bTriple\\\\s.*12\\\\b', re.IGNORECASE)), ('12',\n re.compile('\\\\b12(YR)?\\\\b', re.IGNORECASE)), ('15 Triple', re.compile(\n '\\\\b15(YR)?\\\\s.*Triple\\\\b|Triple.+\\\\b15(YR)?\\\\b', re.IGNORECASE)), (\n '15 Fine', re.compile('\\\\b15(YR)?\\\\b.*\\\\bFine\\\\b', re.IGNORECASE)), (\n '15', re.compile('\\\\b15(YR)?\\\\b', re.IGNORECASE)), ('17 Sherry', re.\n compile('\\\\b17(YR)?\\\\s.*Sherry\\\\b', re.IGNORECASE)), ('17 Fine', re.\n compile('\\\\b17(YR)?\\\\b.*\\\\bFine\\\\b', re.IGNORECASE)), ('17', re.compile\n ('\\\\b17(YR)?\\\\b', re.IGNORECASE)), ('18 Sherry', re.compile(\n '\\\\b18(YR)?\\\\s.*Sherry\\\\b|Sherry\\\\b.*18', re.IGNORECASE)), ('18 Triple',\n re.compile('\\\\b18(YR)?\\\\s.*Triple\\\\b|Triple.+\\\\b18(YR)?\\\\b', re.\n IGNORECASE)), ('18 Fine', re.compile('\\\\b18(YR)?\\\\b.*\\\\bFine\\\\b', re.\n IGNORECASE)), ('18 Gran', re.compile('Gran\\\\b.*\\\\b18', re.IGNORECASE)),\n ('18', re.compile('\\\\b18(YR)?\\\\b', re.IGNORECASE)), ('21 Fine', re.\n compile('\\\\b21.*Fine\\\\b', re.IGNORECASE)), ('21', re.compile(\n '\\\\b21(YR)?\\\\b', re.IGNORECASE)), ('25 Sherry', re.compile(\n '\\\\b25\\\\s.*Sherry\\\\b', re.IGNORECASE)), ('25', re.compile(\n '\\\\b25(YR)?\\\\b')), ('30 Sherry', re.compile('\\\\b30\\\\s.*Sherry', re.\n IGNORECASE)), ('30 Triple', re.compile(\n '\\\\b30(YR)?\\\\s.*Triple\\\\b|Triple.+\\\\b30(YR)?\\\\b', re.IGNORECASE)), (\n '30 Fine', re.compile('\\\\b30(YR)?\\\\b.*\\\\bFine\\\\b|Fine.*30', re.\n IGNORECASE)), ('30', re.compile('\\\\b30(YR)?\\\\b')), ('Rare', re.compile(\n '\\\\bRare\\\\b', re.IGNORECASE))], 'zTeq Cuervo': [('Especial Gold', re.\n compile('\\\\bEspecial\\\\b.*Gold\\\\b|Gold.*Especial', re.IGNORECASE)), (\n 'Especial Blue', re.compile('\\\\bEspecial\\\\b.*Blue\\\\b', re.IGNORECASE)),\n ('Especial', re.compile('\\\\bEspecial\\\\b', re.IGNORECASE)), (\n 'Familia Platino', re.compile('\\\\bPlatino\\\\b', re.IGNORECASE)), (\n 'Familia Anejo', re.compile('\\\\bFamilia\\\\b|\\\\bReserva\\\\b', re.\n IGNORECASE)), ('Gold', re.compile('\\\\bGold\\\\b', re.IGNORECASE)), (\n 'Reposado Lagavulin', re.compile('\\\\bReposado.*Lagavulin', re.\n IGNORECASE)), ('Tradicional Anejo', re.compile(\n 'Tradicional.*Anejo|Anejo.*Tradicional', re.IGNORECASE)), (\n 'Tradicional Reposado', re.compile(\n 'Tradicional.*Reposado|Reposado.*Tradicional', re.IGNORECASE)), (\n 'Tradicional Silver', re.compile('\\\\bTradicional\\\\b', re.IGNORECASE)),\n ('Tradicional Silver', re.compile('\\\\bTraditional\\\\b', re.IGNORECASE)),\n ('Reposado', re.compile('\\\\bReposado\\\\b', re.IGNORECASE)), ('Silver',\n re.compile('\\\\bSilver\\\\b', re.IGNORECASE))], 'zTeq Don Julio': [('1942',\n re.compile('\\\\b1942\\\\b', re.IGNORECASE)), ('Real', re.compile(\n '\\\\bReal\\\\b', re.IGNORECASE)), ('Anejo Claro 70th', re.compile(\n '\\\\b70th\\\\b', re.IGNORECASE)), ('Anejo Claro', re.compile(\n '\\\\bAnejo\\\\b\\\\s*Claro\\\\b', re.IGNORECASE)), ('Anejo', re.compile(\n '\\\\bAnejo\\\\b', re.IGNORECASE)), ('Blanco', re.compile('\\\\bBlanco\\\\b',\n re.IGNORECASE)), ('Reposado Lagavulin', re.compile(\n '\\\\bRepo.+Lagvulin\\\\b', re.IGNORECASE)), ('Reposado Dbl', re.compile(\n '\\\\bReposado.+Double\\\\b', re.IGNORECASE)), ('Reposado Dbl', re.compile(\n '\\\\bReposado.+Dbl\\\\b', re.IGNORECASE)), ('Reposado Dbl', re.compile(\n '\\\\bDouble.+Reposado\\\\b', re.IGNORECASE)), ('Reposado Private', re.\n compile('\\\\bReposado.+Private\\\\b', re.IGNORECASE)), ('Reposado', re.\n compile('\\\\bReposado\\\\b', re.IGNORECASE)), ('Silver', re.compile(\n '\\\\bSilver\\\\b', re.IGNORECASE))], 'zTeq Herradura': [('Ultra', re.\n compile('\\\\bUltra\\\\b', re.IGNORECASE)), ('Suprema', re.compile(\n '\\\\bSuprema\\\\b', re.IGNORECASE)), ('Anejo', re.compile('\\\\bAnejo\\\\b',\n re.IGNORECASE)), ('Blanco', re.compile('\\\\bBlanco\\\\b', re.IGNORECASE)),\n ('Reposado Gold', re.compile(\n '\\\\bReposado\\\\s+Gold\\\\b|\\\\bGold\\\\s+Reposado\\\\b', re.IGNORECASE)), (\n 'Reposado Scotch', re.compile(\n '\\\\bReposado.+Scotch\\\\b|\\\\bScotch.+Reposado\\\\b', re.IGNORECASE)), (\n 'Reposado Port', re.compile('\\\\bPort.+Reposado\\\\b|\\\\bReposado.+Port\\\\b',\n re.IGNORECASE)), ('Reposado', re.compile('\\\\bReposado\\\\b', re.\n IGNORECASE)), ('Silver', re.compile('\\\\bSilver\\\\b', re.IGNORECASE))],\n 'zTeq Patron': [('Gran Piedra', re.compile('\\\\bPiedra\\\\b', re.\n IGNORECASE)), ('DELETE Roca DELETE', re.compile('\\\\bRoca\\\\b', re.\n IGNORECASE)), ('Anejo Extra Lalique', re.compile('\\\\bLalique\\\\b', re.\n IGNORECASE)), ('Anejo Extra 7yr', re.compile(\n '\\\\b7YR\\\\b|\\\\b7 anos\\\\b|\\\\b7 year\\\\b', re.IGNORECASE)), (\n 'Anejo Extra 5yr', re.compile('\\\\b5YR\\\\b|\\\\b5 anos\\\\b|\\\\b5 year\\\\b', re\n .IGNORECASE)), ('Anejo Extra 10yr', re.compile(\n '\\\\b10\\\\b.+\\\\bExtra\\\\b|\\\\bExtra\\\\b.+10', re.IGNORECASE)), (\n 'Anejo Extra', re.compile('\\\\bExtra\\\\s+Anejo\\\\b', re.IGNORECASE)), (\n 'Gran Anejo', re.compile('\\\\bGran\\\\s+Anejo\\\\b', re.IGNORECASE)), (\n 'Gran Anejo', re.compile('\\\\bBurdeos\\\\b', re.IGNORECASE)), (\n 'Gran Smoky', re.compile('\\\\bGran\\\\s+.*Smoky\\\\b', re.IGNORECASE)), (\n 'Anejo', re.compile('\\\\bAnejo\\\\b', re.IGNORECASE)), ('Gran Platinum',\n re.compile('\\\\bPlatinum\\\\b', re.IGNORECASE)), ('Reposado', re.compile(\n '\\\\bReposado\\\\b', re.IGNORECASE)), ('Silver LTD', re.compile(\n '\\\\bSilver.*Limited\\\\b|\\\\bLimited.*Silver\\\\b', re.IGNORECASE)), (\n 'Silver Estate', re.compile('\\\\bEstate.*Silver\\\\b|\\\\bSilver.*Estate\\\\b',\n re.IGNORECASE)), ('Silver', re.compile('\\\\bSilver\\\\b', re.IGNORECASE)),\n ('Blanco', re.compile('\\\\bBlanco\\\\b', re.IGNORECASE))],\n 'zTeq Padre Azul': [('Blanco', re.compile('\\\\bsilver\\\\b', re.IGNORECASE\n ))], 'zWhiskey Balvenie': [('12 Double', re.compile(\n '\\\\bDouble.*12(YR)?\\\\b', re.IGNORECASE)), ('12 Double', re.compile(\n '\\\\b12(YR)?\\\\s.*Double', re.IGNORECASE)), ('12 First', re.compile(\n '\\\\b12(YR)?\\\\s.*First', re.IGNORECASE)), ('12 USA', re.compile(\n '\\\\b12.*American|American.*12', re.IGNORECASE)), ('12 Toast', re.\n compile('\\\\b12(YR)?\\\\s.*Toast', re.IGNORECASE)), ('12', re.compile(\n '\\\\b12(YR)?\\\\b', re.IGNORECASE)), ('14 Carib', re.compile(\n '\\\\b14(YR)?\\\\s.*Carib', re.IGNORECASE)), ('14 Carib', re.compile(\n '\\\\b14(YR)?\\\\s.*CB\\\\s+Cask', re.IGNORECASE)), ('14 Carib', re.compile(\n '\\\\bCarr?ib', re.IGNORECASE)), ('14 Peat', re.compile(\n '\\\\b14(YR)?\\\\s.*Peat', re.IGNORECASE)), ('15 Sherry', re.compile(\n '\\\\b15(YR)?\\\\s.*Sherry\\\\b', re.IGNORECASE)), ('15 Sherry', re.compile(\n '\\\\bSherry\\\\s+.*15(YR)?\\\\b', re.IGNORECASE)), ('15', re.compile(\n '\\\\b15(YR)?\\\\b', re.IGNORECASE)), ('16 Triple', re.compile(\n '\\\\b16(YR)?\\\\s.*Triple\\\\b', re.IGNORECASE)), ('17 Sherry Double', re.\n compile('\\\\b17(YR)?\\\\s.*Sherry\\\\s+Doub', re.IGNORECASE)), ('17 Sherry',\n re.compile('\\\\b17(YR)?\\\\s.*Sherry', re.IGNORECASE)), ('17 Double', re.\n compile('\\\\b17(YR)?\\\\s.*Double', re.IGNORECASE)), ('17 Double', re.\n compile('\\\\bDouble.*17(YR)?\\\\b', re.IGNORECASE)), ('17 Peat', re.\n compile('\\\\b17(YR)?\\\\s.*Peat', re.IGNORECASE)), ('17 Peat', re.compile(\n '\\\\bPeat.*17(YR)?\\\\b', re.IGNORECASE)), ('17', re.compile(\n '\\\\b17(YR)?\\\\b', re.IGNORECASE)), ('21 Port', re.compile('\\\\b21.*Port',\n re.IGNORECASE)), ('21 Port', re.compile('\\\\bPort.*21\\\\b', re.IGNORECASE\n )), ('21', re.compile('21', re.IGNORECASE)), ('25', re.compile(\n '\\\\b25(YR)?\\\\b', re.IGNORECASE)), ('30', re.compile('\\\\b30(YR)?\\\\b', re\n .IGNORECASE)), ('40', re.compile('\\\\b40(YR)?\\\\b', re.IGNORECASE))],\n 'zBourbon Woodford Res': [('Dbl', re.compile('\\\\bDouble\\\\b', re.\n IGNORECASE)), ('Derby', re.compile('\\\\bDerby\\\\b', re.IGNORECASE)), (\n 'Rye Choc', re.compile('\\\\bChocolate.*Rye\\\\b', re.IGNORECASE)), ('Rye',\n re.compile('\\\\bRye\\\\b', re.IGNORECASE)), ('Brandy', re.compile(\n '\\\\bBrandy\\\\b', re.IGNORECASE)), ('Batch', re.compile('\\\\bBatch\\\\b', re\n .IGNORECASE)), ('Barrel', re.compile('\\\\bBarrel\\\\b', re.IGNORECASE)), (\n 'Master', re.compile('\\\\bMasters?\\\\b', re.IGNORECASE)), ('Malt', re.\n compile('\\\\bMalt\\\\b', re.IGNORECASE)), ('Maple', re.compile(\n '\\\\bMaple\\\\b', re.IGNORECASE)), ('Wheat', re.compile('\\\\bWheat\\\\b', re.\n IGNORECASE)), ('', re.compile('\\\\bWoodford\\\\b', re.IGNORECASE))],\n 'zSambuca': [('Romana Black', re.compile(\n '\\\\bRomana.*\\\\bBlack\\\\b|\\\\bBlack\\\\s+Romana\\\\b', re.IGNORECASE)), (\n 'Romana', re.compile('\\\\bRomana\\\\b', re.IGNORECASE)), ('Di Amore', re.\n compile('\\\\bdi Amore\\\\b', re.IGNORECASE))], 'zScotch Hibiki': [('12',\n re.compile('\\\\b12\\\\s*YE?A?R\\\\b', re.IGNORECASE)), ('17 Limited', re.\n compile('\\\\b17\\\\s*YE?A?R\\\\b.+Limited', re.IGNORECASE)), ('17', re.\n compile('\\\\b17\\\\s*YE?A?R\\\\b', re.IGNORECASE)), ('21 Limited', re.\n compile('\\\\b21\\\\s*YE?A?R\\\\b.+Limited', re.IGNORECASE)), ('21', re.\n compile('\\\\b21\\\\s*YE?A?R\\\\b', re.IGNORECASE)), ('30', re.compile(\n '\\\\b30\\\\s*YE?A?R\\\\b', re.IGNORECASE))]}\nwineAbbrLookup = {'120-80': '\\\\bOne\\\\s+Twenty\\\\s+Over\\\\s+Eighty\\\\b',\n '3Amigos': '\\\\bThree\\\\s+Amigos\\\\b', '3Palms': '\\\\bThree\\\\s+Palms\\\\b',\n '3Sister': '\\\\bThree\\\\s+Sisters?\\\\b', '4Barrell':\n '\\\\b4[\\\\-\\\\s]Barrels?\\\\b', 'Alex': '\\\\bAlexander\\\\b', 'And':\n '\\\\bAnderson\\\\b', 'Car': '\\\\bCarneros\\\\b', 'Carries': '\\\\bCarrie', 'CC':\n '\\\\bC\\\\.?C\\\\.?\\\\s+Ranch\\\\b', 'Clone4': '\\\\bClone\\\\s+4\\\\b', 'Clone6':\n '\\\\bClone\\\\s+6\\\\b', 'Crossbarn': '\\\\bCross\\\\s+Barn\\\\b', 'Donna':\n '\\\\bDonna', 'Est': '\\\\bEstate\\\\b', 'Estate': '\\\\bEst\\\\b', 'Gap':\n '\\\\bGap|\\\\s%27Gap', 'Gary': '\\\\bGary', 'Julia': '\\\\bJulia', 'Knights':\n '\\\\bKnight', 'KistlerVnyd': '\\\\bKistler (Vineyard|VYD|EST)\\\\b', 'LP':\n '\\\\bLes Pierres\\\\b', 'Lyn': '\\\\bLyndenhur?st\\\\b', 'Mont':\n '\\\\bMonterey\\\\b', 'Mt': '\\\\bMount\\\\b|\\\\bMt\\\\.\\\\b', 'Napa/Son':\n '\\\\bNapa.*Son', 'Oak': '\\\\bOakville\\\\b', 'One-Pt-5':\n '\\\\bOne\\\\s+Point\\\\s+Five\\\\b', 'Pomm': '\\\\bPommeraie\\\\b', 'Priv':\n '\\\\bPrivate\\\\b', 'RR': '\\\\bRussian\\\\s+Rivers?\\\\b|RRV', 'RRR':\n '\\\\bRussian\\\\s+Rivers?\\\\b|RRV', 'Res':\n '\\\\bReserve\\\\b|\\\\bRsv\\\\b|\\\\bResrv\\\\b|\\\\bReserv\\\\b|\\\\bReserve$', 'Rose':\n '\\\\bRosé|\\\\bROS&EACUTE;|\\\\bRos%E9', 'Ruth': '\\\\bRutherford\\\\b',\n 'Sandy': '\\\\bSandy', 'Samanthas': '\\\\bSamantha', 'SC':\n '\\\\bSanta\\\\s+Cruz\\\\b', 'SLD': '\\\\bStag.*Leap\\\\b', 'SLH':\n '\\\\bSanta\\\\s+Lucia\\\\b', 'SMV': '\\\\bSanta\\\\s+Maria|\\\\bS\\\\s+Maria', 'SRH':\n '\\\\bSTA\\\\.?|\\\\bSANTA\\\\s+Rita\\\\b|\\\\bSTA\\\\sRITA\\\\sHILLS|\\\\bS\\\\s+RITA\\\\b',\n 'SS': '\\\\bSpecial\\\\s+\\\\Selection\\\\b', 'Stage': '\\\\bStagecoach\\\\b',\n 'Son': '\\\\bSonoma\\\\b', 'SYV': '\\\\bSanta\\\\s+Ynez\\\\s+Valley\\\\b', 'TD9':\n '\\\\bTD\\\\s+9\\\\b|\\\\bTD-9\\\\b', 'Terraces': '\\\\bTerrace', 'TheCutrer':\n '\\\\bThe Cutrer\\\\b|nnay Cutrer\\\\b', 'Tok':\n '\\\\bTo[\\\\s\\\\-]?Kolan|\\\\bTo[\\\\s\\\\-]?Kalon', 'Turn4': '\\\\bTurn\\\\s+4\\\\b',\n 'Vernas': '\\\\bVerna', 'Vine': '\\\\bVines\\\\b', 'Yount':\n '\\\\bYountville\\\\b', 'ZThree': '\\\\bZ.*\\\\bThree\\\\b', 'ZCuvee':\n '\\\\bZ.*\\\\bCuvee\\\\b|\\\\bCuvee Z\\\\b', 'Agustina': '\\\\bAugustina\\\\b',\n 'Durell': '\\\\bDurrell\\\\b', 'Benchland': '\\\\bBenchlands\\\\b', 'Pritchard':\n '\\\\bPitchard\\\\b'}\nreShipsAs = re.compile('\\\\(ships?\\\\s', re.IGNORECASE)\ndefaultorderlist = [['Tok'], ['Oak'], ['Res'], ['RR'], ['Landslide'], [\n 'Yount'], ['RRR'], ['Son'], ['Ruth'], ['Napa'], ['Helena'], ['SRH'], [\n 'SLH'], ['SMV'], ['SLD'], ['Paso'], ['Alex'], ['Single'], ['Estate']]\n\n\ndef globalVariableCheck(debug=False):\n for liquor in liquorLookup:\n if liquor in noGrapeLookup:\n print(\n 'WARNING:liquorLookup regexs will never execute - they are in noGrapeLookup:'\n , liquor)\n if liquor in ignoreGrapeLookup:\n print(\n 'WARNING:liquorLookup regexs will never execute - they are in ignoreGrapeLookup:'\n , liquor)\n for winery in ignoreGrapeLookup:\n if winery in noGrapeLookup:\n print(\n 'WARNING:ignoreGrapeLookup regexs will never execute - they are in noGrapeLookup:'\n , winery)\n\n\ndef setOptionDictMasterFldValues(optiondict, debug=False):\n for fld in ('fldWine', 'fldWineDescr'):\n if not optiondict[fld + 'Master']:\n optiondict[fld + 'Master'] = optiondict[fld]\n\n\ndef wineLookupByName(nameLookup, lookupStr, other, msg, wineAbbrLookup=None,\n debug=False):\n funcname = 'wineLookupByName:' + msg + ':'\n if debug:\n print(funcname + 'nameLookup:', nameLookup)\n if nameLookup is None:\n if debug:\n print(funcname + 'match: value is none - continue on')\n return ''\n for name in nameLookup:\n if debug:\n print(funcname + 'match-name:', name)\n if name is None:\n if debug:\n print(funcname +\n 'name-matched: value is none - continue on:pass back blank'\n )\n return ''\n reName = re.compile('\\\\b' + name + '\\\\b', re.IGNORECASE)\n if reName.search(lookupStr):\n if debug:\n print(funcname + 'name-MATCHED:', name)\n for val in other:\n if reName.search(val):\n other.remove(val)\n if debug:\n print(funcname + 'name-remove-from-other:', val)\n return name\n if wineAbbrLookup and name in wineAbbrLookup:\n reName = re.compile(wineAbbrLookup[name], re.IGNORECASE)\n if debug:\n print(funcname + 'Abbr-match-name:', name)\n if reName.search(lookupStr):\n if debug:\n print(funcname + 'Abbr-name-MATCHED:', wineAbbrLookup[name]\n )\n for val in other:\n if reName.search(val):\n other.remove(val)\n if debug:\n print(funcname + 'name-remove-from-other:', val)\n return name\n if debug:\n print(funcname + 'name match not found:set to blank')\n return None\n\n\ndef findQualifier(wine, debug=False):\n for val, reSearch in reQualLookup:\n if reSearch.search(wine):\n if debug:\n print('findQualifier:matched-returning:', val)\n return val\n if debug:\n print('findQualifier:no-match-returning:', None)\n return None\n\n\ndef findWinery(rec, lastWinery, lastReWinery, fldWine, debug=False):\n if lastWinery:\n if debug:\n try:\n print('fw:new winery:', rec[fldWine])\n except Exception as e:\n print('debug error8-continuing:', str(e))\n print('rec[fldWine]:type:', type(rec[fldWine]))\n print('fw:checking if this is lastWinery:', lastWinery)\n if lastReWinery.search(rec[fldWine]):\n if debug:\n print('fw:this matches the last winery')\n return lastWinery, lastReWinery\n elif debug:\n print('fw:not last winery')\n for winery, reWinery in wineryLookup:\n if debug:\n print('fw:not lastWinery-checking winery:', winery)\n if fldWine not in rec:\n print('not a column in this record fldWine:', fldWine)\n print('rec:', rec)\n if reWinery.search(rec[fldWine]):\n if debug:\n print('fw:winery match found:', winery)\n return winery, reWinery\n return None, None\n\n\ndef findLiquor(rec, winery, fldWine, debug=False):\n for liquor, reLiquor in liquorLookup[winery]:\n if debug:\n print('fl:checking liquor:', liquor)\n if reLiquor.search(rec[fldWine]):\n if debug:\n print('fl:liquor match found:', liquor)\n return liquor, reLiquor\n return None, None\n\n\ndef findGrapeByRegex(rec, fldWine, debug=False):\n for grape, reGrape in grapeLookup:\n if debug:\n print('fgbr:grape:', grape)\n if grape is not None and reGrape.search(rec[fldWine]):\n if debug:\n print('fgbr:grape match found:', grape)\n return grape, reGrape\n return None, None\n\n\ndef findStrInRecReturnOther(rec, fldWineDescr, findStr, debug=False):\n matchLoc = rec[fldWineDescr].find(findStr)\n if matchLoc > -1:\n other = rec[fldWineDescr][matchLoc + len(findStr) + 1:].split()\n if debug:\n print('fsirro:findStr matched:', findStr)\n if debug:\n print('fsirro:findStr other:', other)\n return findStr, other\n if debug:\n print('fsirro:findStr did not match using:', findStr)\n return None, []\n\n\ndef findGrapeByStr(rec, fldWineDescr, debug=False):\n for grape, reGrape in grapeLookup:\n if debug:\n print('fg:grape:', grape)\n grape, other = findStrInRecReturnOther(rec, fldWineDescr, grape,\n debug=debug)\n if grape:\n return grape, other\n return None, []\n\n\ndef findVintage(rec, fldWine, debug=False):\n for reVintage in vintageLookup:\n m = reVintage.search(rec[fldWine])\n if m:\n if m.group(1):\n vintage = m.group(1)\n if debug:\n print('fv:vintage-match:', reVintage, ':group1')\n elif m.group(2):\n vintage = m.group(2)\n if debug:\n print('fv:vintage-match:', reVintage, ':group2')\n elif m.group(3):\n vintage = m.group(3)\n if debug:\n print('fv:vintage-match:', reVintage, ':group3')\n else:\n vintage = m.group(4)\n if debug:\n print('fv:vintage-match:', reVintage, ':group4')\n return vintage\n return None\n\n\ndef buildWineryGrapeLookup(wines, fldWineDescr='winedescr', fldWine='wine',\n debug=False):\n wgLookup = {}\n lastWinery = None\n lastReWinery = None\n for rec in wines:\n if debug:\n print('bwgl:new rec:', rec[fldWineDescr])\n if not fldWineDescr in rec:\n print('creating-field:', fldWineDescr)\n rec[fldWineDescr] = ''\n winery = grape = wine = liquor = None\n other = []\n lastWinery, lastReWinery = winery, reWinery = findWinery(rec,\n lastWinery, lastReWinery, fldWine, debug=debug)\n if not winery:\n if debug:\n print('bwgl:did not find winery-skipping:', rec[fldWine])\n continue\n if winery in ignoreGrapeLookup:\n wine = ''\n if debug:\n print('bwgl:wine check ignoreGrapeLookup on winery:', winery)\n elif winery in noGrapeLookup:\n if debug:\n print('bwgl:wine check noGrapeLookup on winery:', winery)\n wine = wineLookupByName(noGrapeLookup[winery], rec[fldWineDescr\n ], [], 'noGrapeLookup', debug=debug)\n if False and wine == '':\n if debug:\n print('bwgl:nograpelookup:no-match:set wine to None')\n wine = None\n elif winery in liquorLookup:\n if debug:\n print('bwgl:liquor check on winery:', winery)\n liquor, reLiquor = findLiquor(rec, winery, fldWine, debug=debug)\n if liquor is not None:\n wine = liquor\n if debug:\n print('bwgl:liquor found and put in wine:', wine)\n if wine is None:\n if debug:\n print('bwgl:grape check because wine is None')\n grape, other = findGrapeByStr(rec, fldWineDescr)\n if debug:\n print('bwgl:grape:', grape, ':other:', other)\n elif debug:\n print('bwgl:grape check skipped - we have a wine')\n if wine is None and grape is None:\n if debug:\n print('bwgl:record skipped - no grape or wine defined')\n continue\n if grape is None:\n if debug:\n print('bwgl:build other from winery')\n wineryFind, other = findStrInRecReturnOther(rec, fldWineDescr,\n winery, debug=debug)\n if 'case' in other:\n other.remove('case')\n if debug:\n print('bwgl:remove case from other')\n if other:\n if debug:\n print('bwgl:looking at other for quals, bottlesize and vintage'\n )\n if not other[-1].isdigit():\n for qual, reQual in reQualLookup:\n if qual == other[-1]:\n if debug:\n print('bwgl:remove qualifier from other:', qual)\n del other[-1]\n break\n if other and not other[-1].isdigit():\n for size, reSize in sizeLookup:\n if size == other[-1]:\n if debug:\n print('bwgl:remove bottlesize from other:', size)\n del other[-1]\n break\n if other and other[-1].isdigit():\n if winery in ignoreGrapeLookup and ignoreGrapeLookup[winery\n ] and other[-1] in ignoreGrapeLookup[winery]:\n if debug:\n print(\n 'bwgl:value is in ignoreLookupGrape - keeping it:',\n other[-1])\n else:\n if debug:\n print('bwgl:remove vintage from other:', other[-1])\n del other[-1]\n if wine and wine in other:\n other.remove(wine)\n if debug:\n print('bwgl:remove wine from other:', wine)\n if debug:\n try:\n print('bwgl:Final-Build:', winery, ':', grape, ':', wine,\n ':', liquor, ':', other, ':', rec[fldWineDescr], ':',\n rec[fldWine])\n except Exception as e:\n print('debug error2-continuing:', str(e))\n print('fldWine:', fldWine)\n if grape is None and wine is not None:\n grape = wine\n if debug:\n print('bwgl:set-grape-to-wine:', grape)\n if debug:\n print('bwgl:create wgLookup for winery:', winery, ':grape:', grape)\n if winery not in wgLookup:\n wgLookup[winery] = {grape: []}\n elif grape not in wgLookup[winery]:\n wgLookup[winery][grape] = []\n if other and other not in wgLookup[winery][grape]:\n wgLookup[winery][grape].append(other)\n if debug:\n print('bwgl:appending to wgLookup:other:', other)\n if debug:\n print('bwgl:complete-read-of-master-file:sort wgLookup')\n for winery in wgLookup:\n for grape in wgLookup[winery]:\n wgLookup[winery][grape] = sorted(wgLookup[winery][grape], key=\n len, reverse=True)\n if debug:\n print('\\n' * 5)\n print('START WGLOOKUP DUMPED')\n print('#' * 80)\n if ppFlag:\n pp.pprint(wgLookup)\n else:\n print('bwgl:final-wgLookup:\\n', wgLookup)\n print('#' * 80)\n return wgLookup\n\n\ndef findAddAttribWgLookup(rec, winery, value, fldWine, AbbrLookup=[],\n defaultorderlist=None, valueDescr='', debug=False):\n singlematch = []\n if debug:\n try:\n print('faawl:value:', valueDescr, ':match-wgLookup:', rec[\n fldWine], ':', wgLookup[winery][value])\n except Exception as e:\n print('debug error7-continuing:', str(e))\n print('fldWine:', fldWine)\n for valuematchset in wgLookup[winery][value]:\n if debug:\n print('faawl:testing valuematchset:', valuematchset, ':length:',\n len(valuematchset))\n allmatch = True\n for valuematch in valuematchset:\n reMatch1 = re.compile('\\\\b' + valuematch + '\\\\b', re.IGNORECASE)\n reMatch2 = re.compile('\\\\s' + valuematch + '\\\\s', re.IGNORECASE)\n m1 = reMatch1.search(rec[fldWine])\n m2 = reMatch2.search(rec[fldWine])\n if m1 or m2:\n allmatch = True and allmatch\n elif valuematch in AbbrLookup:\n if debug:\n print('faawl:valuematch-abbr:', valuematch, ':',\n wineAbbrLookup[valuematch])\n reMatch = re.compile(wineAbbrLookup[valuematch], re.IGNORECASE)\n allmatch = reMatch.search(rec[fldWine]) and allmatch\n else:\n allmatch = False and allmatch\n if debug:\n print('faawl:valuematch:', valuematch, ':allmatch:', allmatch)\n if allmatch:\n if debug:\n print('faawl:value matched:', valuematchset)\n if len(valuematchset) == 1:\n if debug:\n print('faawl:single-valuematch-set-added-to-singlematch:',\n valuematchset)\n singlematch.append(valuematchset)\n else:\n if debug:\n print('faawl:multivalue-valuematch-set-found:done')\n return valuematchset\n if not singlematch:\n if debug:\n print('faawl:exit with singlematch NOT populated return blank')\n return []\n if debug:\n print('faawl:exit with singlematch populated:', singlematch)\n if len(singlematch) == 1 or not defaultorderlist:\n if debug:\n print('faawl:return first entry in singlematch:', singlematch[0])\n return singlematch[0]\n defaultorder = defaultorderlist[:]\n if debug:\n print('faawl:multiple single match value-singlematch:', singlematch)\n for val in singlematch[::-1]:\n if val not in defaultorder:\n defaultorder.insert(0, val)\n if winery == 'Mondavi' and ['Tok'] in singlematch:\n if debug:\n print('faawl:Change from:', valuematchset, ':to Tok for mondavi')\n return ['Tok']\n for val in defaultorder:\n if val in singlematch:\n if debug:\n print('faawl:selected-singlematch-value:', val)\n return val\n if debug:\n print('faawl:valuematchset-empty')\n return []\n\n\ndef setWineryDescrFromWineryGrapeLookup(wgLookup, wines, fldWineDescr=\n 'winedescr', fldWine='wine', fldWineDescrNew='winedescrnew',\n fldWineDescrMatch=False, debug=False):\n if debug:\n print('\\n' * 10,\n 'START WINEDESCR SETTING HERE ---------------------------------------------'\n )\n for rec in wines:\n (winery) = (grape) = (wine) = (vintage) = (case) = (size) = (liquor\n ) = (nongrape) = (qual) = None\n winematchset = grapematchset = []\n if debug:\n try:\n print('setWinery:fldWine:', rec[fldWine])\n except Exception as e:\n print('debug error2-continuing:', str(e))\n print('fldWine:', fldWine)\n if fldWineDescrNew not in rec:\n rec[fldWineDescrNew] = rec[fldWineDescr]\n winery, reWinery = findWinery(rec, None, None, fldWine, debug=debug)\n if winery is None:\n if debug:\n print('setWinery:winery not found-next record:' + rec[fldWine])\n continue\n elif winery not in wgLookup:\n if debug:\n print('setWinery:winery not in wgLookup:', winery)\n continue\n grape, reGrape = findGrapeByRegex(rec, fldWine, debug=debug)\n if debug:\n print('setWinery:grape found:', grape)\n if winery in ignoreGrapeLookup:\n if debug:\n print(\n 'setWinery:winery-match-ignoreGrape:clear-wine:set-grape-to-None:set-nongrape-True:winery:'\n , winery)\n wine = ''\n grape = None\n nongrape = True\n if winery in noGrapeLookup:\n if debug:\n print('setWinery:noGrapeLookup wine check:', winery)\n wine = wineLookupByName(noGrapeLookup[winery], rec[fldWine], [],\n 'noGrapeLookup', wineAbbrLookup, debug=debug)\n if debug:\n print('setWinery:nogrape check:wine:', wine)\n if wine == '':\n if debug:\n print(\n 'setWinery:noGrapeLookup:matched:None::clear grape:set nongrape to True'\n )\n grape = None\n wine = ''\n nongrape = True\n elif wine:\n grape = None\n if debug:\n print(\n 'setWinery:nograpeLookup:wine found - clear grape field'\n )\n if wine is None and winery in liquorLookup:\n if debug:\n print('setWinery:liqourLookup:', winery)\n liquor, reLiquor = findLiquor(rec, winery, fldWine, debug=debug)\n if liquor is not None:\n wine = liquor\n if debug:\n print('setWinery:liquorLookup-match:', liquor)\n if not grape and not nongrape and not wine and liquor is None:\n if debug:\n print('setWinery:did not find grape-skipping record:', rec[\n fldWineDescr])\n continue\n if debug:\n print('setWinery:pre-vintage found values for wine/liquor:',\n wine, ':grape:', grape)\n vintage = findVintage(rec, fldWine, debug=debug)\n if debug:\n print('setWinery:vintage:', vintage)\n if reCase.search(rec[fldWine]):\n case = 'case'\n for size, reSize in sizeLookup:\n if debug:\n print('setWinery:sizeLookup:', size)\n if reSize.search(rec[fldWine]) and not reShipsAs.search(rec[\n fldWine]):\n if debug:\n print('setWinery:sizeLookup:matched:', reSize)\n break\n else:\n size = None\n if debug:\n print('setWinery:sizeLookup:None-found')\n qual = findQualifier(rec[fldWine], debug=debug)\n if debug:\n try:\n print('setWinery:FinalAttributes:', winery, ':', grape, ':',\n wine, ':', liquor, ':', vintage, ':', case, ':', size,\n ':', qual, ':', rec[fldWine])\n except Exception as e:\n print('debug error5-continuing:', str(e))\n print('fldWine:', fldWine)\n if liquor is not None:\n if debug:\n print(\n 'setWinery:liquor flag set - no additional data needs to be collected'\n )\n elif wine is not None:\n if debug:\n print(\n 'setWinery:wine is not None - do additional lookups:wine:',\n wine)\n if wine in wgLookup[winery] and wgLookup[winery][wine]:\n if debug:\n print('setWinery:lookup winematchset')\n winematchset = findAddAttribWgLookup(rec, winery, wine,\n fldWine, wineAbbrLookup, None, valueDescr='wine', debug\n =debug)\n else:\n print('setWinery:unable to perform wgLookup on winery:',\n winery, ':wine:', wine, ':rec-wine:', rec[fldWine])\n if debug:\n try:\n print('wgLookup[winery]:', wgLookup[winery])\n except Exception as e:\n print('debug error3-continuing:', str(e))\n print('winery:', winery)\n if debug:\n print('setWinery:winematchset:', winematchset)\n elif grape is not None:\n if debug:\n print('setWinery:grape is not None - do additional lookups:',\n grape)\n if grape in wgLookup[winery] and wgLookup[winery][grape]:\n grapematchset = findAddAttribWgLookup(rec, winery, grape,\n fldWine, wineAbbrLookup, defaultorderlist, valueDescr=\n 'grape', debug=debug)\n elif grape in wgLookup[winery]:\n if debug:\n print(\n 'setWinery:grape match: matching record set is blank - no action required'\n )\n else:\n print('setWinery:grape NONMATCH:', rec[fldWine])\n if debug:\n print('setWinery:liquor:', liquor, ':wine:', wine,\n ':grape:', grape, ':wgLookup[winery]:', wgLookup[\n winery])\n if debug:\n print('setWinery:grapematchset:', grapematchset)\n if vintage:\n newVintageLookupWine = rec[fldWine]\n for matchvalue in winematchset:\n if vintage in matchvalue:\n newVintageLookupWine = newVintageLookupWine.replace(\n matchvalue, '')\n if debug:\n print(\n 'setWinery:2nd-vintage:winematchset:wine-name-removal:'\n , matchvalue)\n for matchvalue in grapematchset:\n if vintage in matchvalue:\n newVintageLookupWine = newVintageLookupWine.replace(\n matchvalue, '')\n if debug:\n print(\n 'setWinery:2nd-vintage:grapematchset:wine-name-removal:'\n , matchvalue)\n if newVintageLookupWine != rec[fldWine]:\n if debug:\n print('setWinery:2nd-vintage:newVintageLookupWine:',\n newVintageLookupWine)\n newVintage = findVintage({fldWine: newVintageLookupWine},\n fldWine, debug=debug)\n if debug:\n print('setWinery:2nd-vintage:newVintage:', newVintage)\n vintage = newVintage\n wineDescr = ''\n if winery.startswith('z'):\n vintage = None\n if debug:\n print('setWinery:winery starts with z: clear vintage')\n if winematchset and ' '.join(winematchset) in wine:\n if debug:\n print('setWinery:clearing-winematchset:', winematchset,\n ':is-in-wine:', wine)\n winematchset = []\n if grapematchset and ' '.join(grapematchset) in grape:\n if not (len(grapematchset) == 1 and len(grapematchset[0]) == 1):\n if debug:\n print('setWinery:clearing-grapematchset:',\n grapematchset, ':is-in-grape:', grape)\n grapematchset = []\n if grapematchset and size and size in ' '.join(grapematchset):\n size = ''\n if winematchset and size and size in ' '.join(winematchset):\n size = ''\n if debug:\n print('setWinery:vallist1:', [winery, grape, wine] +\n grapematchset + winematchset + [vintage, size, qual, case])\n print('setWinery:vallist2:', [winery, grape, wine, *\n grapematchset, *winematchset, vintage, size, qual, case])\n wdList = []\n for val in ([winery, grape, wine] + grapematchset + winematchset +\n [vintage, size, qual, case]):\n if val:\n wdList.append(val)\n wineDescr = ' '.join(wdList)\n if False:\n if debug:\n print('setWinery:wdList:', wdList)\n if debug:\n print('setWinery:wineDescr:', wineDescr)\n if debug:\n try:\n print(':'.join(['setWinery:wineDescrList', wineDescr, rec[\n fldWineDescr], str(wineDescr == rec[fldWineDescr]), rec\n [fldWine]]))\n except Exception as e:\n print('debug error6-continuing:', str(e))\n print('fldWine:', fldWine)\n rec[fldWineDescrNew] = wineDescr\n if fldWineDescrMatch:\n rec[fldWineDescrMatch] = rec[fldWineDescr] == rec[fldWineDescrNew]\n\n\ndef setDigitFld2Value(wines, fld, value, debug=False):\n for rec in wines:\n if rec[fld].isdigit():\n rec[fld] = value\n\n\ndef updateFileOptionDictCheck(optiondict, wines, header, debug=False):\n if optiondict['fldWineDescr'] not in wines[0]:\n if debug:\n print('updateFileOptionDictCheck:fldWineDescr NOT in file read in:'\n , optiondict['fldWineDescr'])\n if 'cnt' in wines[0]:\n print('setting values fldWineDescr and fldWineDescrNew to: cnt')\n optiondict['fldWineDescr'] = optiondict['fldWineDescrNew'] = 'cnt'\n elif 'winedescr' in wines[0]:\n print(\n 'setting values fldWineDescr to winedescr and fldWineDescrNew to winedescrnew'\n )\n optiondict['fldWineDescr'] = 'winedescr'\n optiondict['fldWineDescrNew'] = 'winedescrnew'\n else:\n print('could not find fldWineDescr in wines[0]-aborting:',\n optiondict['fldWineDescr'], '\\nwines[0]:', wines[0])\n error = wines[0][optiondict['fldWineDescr']]\n if False and optiondict['fldWineDescr'] == 'winedescr':\n if not optiondict['fldWineDescrMatch']:\n optiondict['fldWineDescrMatch'] = 'same'\n print('setting value fldWineDescrMatch to: same')\n if optiondict['csvfile_update_in'] == optiondict['csvfile_update_out']:\n file_path, base_filename, file_ext = kvutil.filename_split(optiondict\n ['csvfile_update_in'])\n backupfile = kvutil.filename_proper(base_filename + optiondict[\n 'backupfile_ext'], file_path)\n print('copying ', optiondict['csvfile_update_in'], ' to ', backupfile)\n shutil.copyfile(optiondict['csvfile_update_in'], backupfile)\n if optiondict['fldWineDescrNew'] == 'cnt':\n optiondict['csvdictkeys'] = ['cnt', 'date', 'search', 'store',\n 'wine', 'winesrt']\n elif optiondict['fldWineDescrMatch']:\n optiondict['csvdictkeys'] = [optiondict['fldWineDescr'], optiondict\n ['fldWineDescrNew'], optiondict['fldWineDescrMatch'], *header]\n else:\n optiondict['csvdictkeys'] = [optiondict['fldWineDescrNew']] + header[1:\n ]\n print('updateFileOptionDictCheck:set csvdictkeys to:', optiondict[\n 'csvdictkeys'])\n\n\nif __name__ == '__main__':\n optiondict = kvutil.kv_parse_command_line(optiondictconfig, debug=False)\n ppFlag = optiondict['pprint']\n setOptionDictMasterFldValues(optiondict, debug=False)\n if optiondict['setup_check']:\n print('Running global variable check')\n globalVariableCheck(debug=optiondict['debug'])\n sys.exit()\n print('reading in master file:', optiondict['csvfile_master_in'])\n wines, header = kvcsv.readcsv2list_with_header(optiondict[\n 'csvfile_master_in'], headerlc=True)\n wgLookup = buildWineryGrapeLookup(wines, optiondict[\n 'fldWineDescrMaster'], optiondict['fldWineMaster'], debug=\n optiondict['debug'])\n if optiondict['csvfile_master_in'] != optiondict['csvfile_update_in']:\n print('reading in update file:', optiondict['csvfile_update_in'])\n wines, header = kvcsv.readcsv2list_with_header(optiondict[\n 'csvfile_update_in'], headerlc=True)\n if not wines:\n print(\n 'wineset.py - no records read in - no work to be done - exitting'\n )\n sys.exit()\n updateFileOptionDictCheck(optiondict, wines, header, debug=optiondict[\n 'debug'])\n setWineryDescrFromWineryGrapeLookup(wgLookup, wines, optiondict[\n 'fldWineDescr'], optiondict['fldWine'], optiondict[\n 'fldWineDescrNew'], optiondict['fldWineDescrMatch'], debug=\n optiondict['debug'])\n if optiondict['defaultnew'] is not None:\n print('Setting ', optiondict['fldWineDescrNew'], ' to ', optiondict\n ['defaultnew'], 'if not set')\n setDigitFld2Value(wines, optiondict['fldWineDescrNew'], optiondict[\n 'defaultnew'], debug=optiondict['debug'])\n kvcsv.writelist2csv(optiondict['csvfile_update_out'], wines, optiondict\n ['csvdictkeys'])\n print('Saved results to:', optiondict['csvfile_update_out'])\n",
"step-5": "'''\r\n@author: Ken Venner\r\n@contact: [email protected]\r\n@version: 1.13\r\n\r\nRead in a file of wine names and create consistent wine descriptions \r\nfrom these names.\r\n\r\n'''\r\n\r\n\r\nimport kvutil\r\nimport kvcsv\r\n\r\nimport re\r\nimport sys\r\nimport shutil\r\n\r\n# may comment out in the future\r\nimport pprint\r\npp = pprint.PrettyPrinter(indent=4)\r\nppFlag = False\r\n\r\n# application variables\r\noptiondictconfig = {\r\n 'AppVersion' : {\r\n 'value' : '1.13',\r\n 'description' : 'defines the version number for the app',\r\n },\r\n 'debug' : {\r\n 'value' : False,\r\n 'type' : 'bool',\r\n 'description' : 'defines if we are running in debug mode',\r\n },\r\n 'verbose' : {\r\n 'value' : 1,\r\n 'type' : 'int',\r\n 'description' : 'defines the display level for print messages',\r\n },\r\n 'setup_check' : {\r\n 'value' : False,\r\n 'type' : 'bool',\r\n 'description' : 'defines if we checking out setup',\r\n },\r\n 'pprint' : {\r\n 'value' : False,\r\n 'type' : 'bool',\r\n 'description' : 'defines if we output with pretty print when debugging',\r\n },\r\n 'csvfile_master_in' : {\r\n 'value' : 'wine_xref.csv',\r\n 'description' : 'defines the name of the master data input file',\r\n },\r\n 'csvfile_update_in' : {\r\n 'value' : 'wineref.csv',\r\n 'description' : 'defines the name of the input file to updated',\r\n },\r\n 'csvfile_update_out' : {\r\n 'value' : 'wineref2.csv',\r\n 'description' : 'defines the name of the updated output file',\r\n },\r\n 'fldWine' : {\r\n 'value' : 'wine',\r\n 'description' : 'defines the name of the field that holds the Wine ',\r\n },\r\n 'fldWineDescr' : {\r\n 'value' : 'winedescr',\r\n 'description' : 'defines the name of the field holding the wine description',\r\n },\r\n 'fldWineDescrNew' : {\r\n 'value' : 'winedescrnew',\r\n 'description' : 'defines the name of the NEW field holding the new description ',\r\n },\r\n 'fldWineDescrMatch' : {\r\n 'value' : None,\r\n 'description' : 'defines the name of the NEW field holding the results of comparison existing to new description ',\r\n },\r\n 'fldWineMaster' : {\r\n 'value' : None,\r\n 'description' : 'defines the name of the field that holds the Wine when reading the master file ',\r\n },\r\n 'fldWineDescrMaster' : {\r\n 'value' : None,\r\n 'description' : 'defines the name of the field holding the wine description when reading the master file',\r\n },\r\n 'backupfile_ext' : {\r\n 'value' : '.bak',\r\n 'description' : 'defines the extension to use to copy the update input file to if we are replacing it with output',\r\n },\r\n 'defaultnew' : {\r\n 'value' : None,\r\n 'description' : 'defines if we should take field fldWineDescrNew and set to a value if not set',\r\n },\r\n}\r\n\r\n### GLOBAL VARIABLES / LOOKUPS ########################################\r\n\r\n# regex search for vintage in wine name\r\nvintageLookup = (\r\n re.compile('\\d\\d\\d\\d\\s+\\d\\d(\\d\\d)'), # two years together - get this one over early\r\n re.compile('^\\d\\d(\\d\\d)'), # four position start of line\r\n re.compile('\\s\\d\\d(\\d\\d)$'), # four position end of line\r\n re.compile('\\s\\d\\d(\\d\\d)\\s'), # four position middle of line\r\n re.compile('XX\\d\\d(\\d\\d)\\s'), # four position middle of line\r\n re.compile('\\s\\d\\d(\\d\\d)\\/'), # four position split\r\n re.compile('\\s\\'?(\\d\\d)\\'?$|\\s\\'?(\\d\\d)\\'?\\s'), # two position date with optional apostrophe front or back\r\n)\r\n\r\n# regex search for case in wine name\r\nreCase = re.compile(r'12\\s*X\\s*750\\s*ML|\\bcase\\b|12\\/750\\s*ML',re.IGNORECASE)\r\n\r\n# regex to pick up qualifiers from the wine\r\nreQualLookup = (\r\n (None, re.compile(r'\\bWithout\\s+Gift\\b|\\bNo\\s+Gift', re.IGNORECASE)), # the none gift do them first\r\n ('Gift', re.compile(r'\\bGift\\b', re.IGNORECASE)),\r\n ('VAP', re.compile(r'\\bVAP\\b', re.IGNORECASE)),\r\n ('VAP', re.compile(r'\\bGlassVAP\\b', re.IGNORECASE)),\r\n ('Glass', re.compile(r'\\bGlass\\b', re.IGNORECASE)),\r\n ('Glass', re.compile(r'\\bGlasses\\b', re.IGNORECASE)),\r\n ('Etch', re.compile(r'\\bEtch\\b', re.IGNORECASE)),\r\n ('Basket', re.compile(r'\\bBasket\\b', re.IGNORECASE)),\r\n)\r\n\r\n\r\n# regex search to define the size of the wine bottle\r\nsizeLookup = (\r\n ('1.75L', re.compile(r'\\b1\\.75\\s*Li?|\\b1\\.75$', re.IGNORECASE)),\r\n ('1.5L', re.compile(r'\\b1\\.5\\s*L?\\b|\\bMagnum\\b', re.IGNORECASE)),\r\n ('375mL', re.compile(r'Half\\s+Bottle|375ml', re.IGNORECASE)),\r\n ('200mL', re.compile(r'\\b200\\s*ML|\\(200\\s*ML', re.IGNORECASE)),\r\n ('50mL', re.compile(r'\\b50\\s*ML|\\(50\\s*ML', re.IGNORECASE)),\r\n ('500mL', re.compile(r'\\b500\\s*ML|\\(500\\s*ML', re.IGNORECASE)),\r\n ('3L', re.compile(r'\\b3\\s*Li?', re.IGNORECASE)),\r\n ('6L', re.compile(r'\\b6\\s*Li?', re.IGNORECASE)),\r\n ('9L', re.compile(r'\\b9\\s*Li?', re.IGNORECASE)),\r\n ('1L', re.compile(r'\\b1L\\b|\\b1\\s+L$|\\b1.0\\s*L\\b|\\b1\\s+Liter\\b|\\bOne\\s+Liter\\b|\\bLITER\\b|\\b1\\s*LTR', re.IGNORECASE)),\r\n)\r\n\r\n\r\n# regex extract winery names from the wine field\r\nwineryLookup = (\r\n ('Alban', re.compile(r'\\bAlban\\b', re.IGNORECASE)),\r\n ('Arrowood', re.compile(r'\\bArrowood\\b', re.IGNORECASE)),\r\n ('Atalon', re.compile(r'\\bAtalon\\b', re.IGNORECASE)),\r\n ('Attune', re.compile(r'\\bAttune\\b', re.IGNORECASE)),\r\n ('Auteur', re.compile(r'\\bAuteur\\b', re.IGNORECASE)),\r\n ('Austin Hope', re.compile(r'\\bAustin\\s+Hope\\b', re.IGNORECASE)),\r\n ('Badge', re.compile(r'\\bBadge\\b', re.IGNORECASE)),\r\n ('Balletto', re.compile(r'\\bBalletto\\b', re.IGNORECASE)),\r\n ('Bell', re.compile(r'\\bBell\\s+Cellar', re.IGNORECASE)),\r\n ('BR Cohn', re.compile(r'\\bB\\.?\\s?R\\.?\\s+Cohn\\b', re.IGNORECASE)),\r\n ('Bremer', re.compile(r'\\bBremer\\b', re.IGNORECASE)),\r\n ('Brewer-Clifton', re.compile(r'\\bBrewer[\\s\\-]Clifton\\b', re.IGNORECASE)),\r\n ('BV', re.compile(r'\\bBeaulieu\\s+V|\\bBV\\b', re.IGNORECASE)),\r\n ('Belle Glos', re.compile(r'\\bBelle\\s+Glos\\b', re.IGNORECASE)),\r\n ('Bennett Ln', re.compile(r'\\bBennet+\\sLane\\b', re.IGNORECASE)),\r\n ('Benovia', re.compile(r'\\bBenovia\\b', re.IGNORECASE)),\r\n ('Beringer', re.compile(r'\\bBeringer\\b', re.IGNORECASE)),\r\n ('Blackstone', re.compile(r'\\bBlackstone\\b', re.IGNORECASE)),\r\n ('Brancott', re.compile(r'\\bBrancott\\b', re.IGNORECASE)),\r\n ('Cade', re.compile(r'\\bCade\\b', re.IGNORECASE)),\r\n ('Cain Five', re.compile(r'\\bCain\\s+Five\\b|\\bCain\\s-\\sFive\\b|\\bCain\\s5\\b|\\bCainFive\\b', re.IGNORECASE)),\r\n ('Cakebread', re.compile(r'\\bCakebread\\b', re.IGNORECASE)),\r\n ('Cardinale', re.compile(r'\\bCardinale\\b', re.IGNORECASE)),\r\n ('Caymus', re.compile(r'\\bCaymus\\b', re.IGNORECASE)),\r\n ('Chappellet', re.compile(r'\\bChappellet\\b', re.IGNORECASE)),\r\n ('Chalk Hill', re.compile(r'\\bChalk\\s+Hill\\b', re.IGNORECASE)),\r\n ('Clos Du Bois', re.compile(r'\\bClos\\s+Du\\s+Bois\\b', re.IGNORECASE)),\r\n ('ClosDuVal', re.compile(r'\\bClos\\s+du\\s+Val\\b', re.IGNORECASE)),\r\n ('Colgin', re.compile(r'\\bColgin\\b', re.IGNORECASE)),\r\n ('Concha Don Melchor', re.compile(r'\\bConcha\\s.*Don\\s+Melchor\\b|Don\\s+Melchor\\b', re.IGNORECASE)),\r\n ('Continuum', re.compile(r'\\bContinuum\\b', re.IGNORECASE)),\r\n ('Corison', re.compile(r'\\bCorison\\b', re.IGNORECASE)),\r\n ('Cristal', re.compile(r'Roederer\\s?.*Cristal\\b|\\bCristal\\b.+Brut', re.IGNORECASE)),\r\n ('Curran', re.compile(r'\\bCurran\\b', re.IGNORECASE)),\r\n ('Darioush', re.compile(r'\\bDarioush\\b', re.IGNORECASE)),\r\n ('Darioush', re.compile(r'\\bCaravan\\b', re.IGNORECASE)),\r\n ('David Arthur', re.compile(r'\\bDavid\\s+Arthur\\b', re.IGNORECASE)),\r\n ('David Bruce', re.compile(r'\\bDavid\\s+Bruce\\b', re.IGNORECASE)),\r\n ('Davis Family', re.compile(r'\\bDavis\\s+Family\\b', re.IGNORECASE)),\r\n ('Del Dotto', re.compile(r'\\bDel\\s+Dotto\\b', re.IGNORECASE)),\r\n ('Dominus', re.compile(r'\\bDominus\\b', re.IGNORECASE)),\r\n ('Goldeneye', re.compile(r'\\bGoldeneye\\b', re.IGNORECASE)), # before duckhorn\r\n ('Paraduxx', re.compile(r'\\bParaduxx\\b', re.IGNORECASE)), # before duckhorn\r\n ('Domaine Carneros', re.compile(r'\\bDomaine\\s+Carneros\\b', re.IGNORECASE)),\r\n ('Dominus', re.compile(r'\\Dominus\\b', re.IGNORECASE)),\r\n ('Drappier', re.compile(r'\\bDrappier\\b', re.IGNORECASE)),\r\n ('Duckhorn', re.compile(r'\\bDuckhorn\\b', re.IGNORECASE)),\r\n ('Dumol', re.compile(r'\\bDumol\\b', re.IGNORECASE)),\r\n ('Dunn', re.compile(r'\\bDunn\\b', re.IGNORECASE)),\r\n ('Ehlers', re.compile(r'\\bEhlers\\b', re.IGNORECASE)),\r\n ('Etude', re.compile(r'\\bEtude\\b', re.IGNORECASE)),\r\n ('Far Niente', re.compile(r'\\bFar Niente\\b', re.IGNORECASE)),\r\n ('Flora', re.compile(r'\\bFlora\\s+Springs\\b', re.IGNORECASE)),\r\n ('Flowers', re.compile(r'\\bFlowers\\b', re.IGNORECASE)), \r\n ('Robert Foley', re.compile(r'\\bRobert\\s+\\bFoley\\b', re.IGNORECASE)), #before Foley\r\n ('Foley', re.compile(r'\\bFoley\\b', re.IGNORECASE)), \r\n ('Foxen', re.compile(r'\\bFoxen\\b', re.IGNORECASE)),\r\n ('Franciscan', re.compile(r'\\bFranciscan\\b', re.IGNORECASE)),\r\n ('Frank Family', re.compile(r'\\bFrank Family\\b', re.IGNORECASE)),\r\n ('Gary Farrell', re.compile(r'\\bGary\\s+Farrel+\\b', re.IGNORECASE)),\r\n ('Ghost Block', re.compile(r'\\bGhost\\s+Block\\b', re.IGNORECASE)),\r\n ('Grgich', re.compile(r'\\bGrgich\\b', re.IGNORECASE)),\r\n ('Groth', re.compile(r'\\bGroth\\b', re.IGNORECASE)),\r\n ('Gundlach', re.compile(r'\\bGundlach\\b', re.IGNORECASE)),\r\n ('Hansel', re.compile(r'\\bHansel\\b', re.IGNORECASE)),\r\n ('Hanzell', re.compile(r'\\bHanzell\\b', re.IGNORECASE)),\r\n ('Hess', re.compile(r'\\bHess\\b', re.IGNORECASE)),\r\n ('Hewitt', re.compile(r'\\bHewitt\\b', re.IGNORECASE)),\r\n ('Hobbs', re.compile(r'\\bHobbs\\b|\\bcrossbarn\\b', re.IGNORECASE)),\r\n ('Hundred Acre', re.compile(r'\\bHundred\\s+Acre\\b', re.IGNORECASE)),\r\n ('Jordan', re.compile(r'\\bJordan\\b', re.IGNORECASE)),\r\n ('Justin', re.compile(r'\\bJustin\\b', re.IGNORECASE)),\r\n ('Kim Crawford', re.compile(r'\\bKim\\s+Crawford\\b', re.IGNORECASE)),\r\n ('Kistler', re.compile(r'\\bKistler\\b', re.IGNORECASE)),\r\n ('Kosta', re.compile(r'\\bKosta\\s+Browne?\\b', re.IGNORECASE)),\r\n ('Krug', re.compile(r'\\bKrug\\b', re.IGNORECASE)),\r\n ('Kunde', re.compile(r'\\bKunde\\b', re.IGNORECASE)),\r\n ('LaCrema', re.compile(r'\\bLa\\s?Crema\\b', re.IGNORECASE)),\r\n ('Lewis', re.compile(r'\\bLewis\\b', re.IGNORECASE)),\r\n ('Lokoya', re.compile(r'\\bLokoya\\b', re.IGNORECASE)),\r\n ('Meiomi', re.compile(r'\\bMeiomi\\b', re.IGNORECASE)),\r\n ('Melville', re.compile(r'\\bMelville\\b', re.IGNORECASE)),\r\n ('Momento Mori', re.compile(r'\\bMomento\\s+Mori\\b', re.IGNORECASE)),\r\n ('Mondavi', re.compile(r'\\bMondavi\\b', re.IGNORECASE)),\r\n ('Montelena', re.compile(r'\\bMontelena\\b', re.IGNORECASE)),\r\n ('Mt Veeder', re.compile(r'^Mount\\s+Veeder\\b|^Mt\\.? Veeder\\b|\\d+\\s+M[^t]*t\\s+Veeder\\b', re.IGNORECASE)),\r\n ('Newton', re.compile(r'\\bNewton\\b', re.IGNORECASE)),\r\n ('Nickel', re.compile(r'\\bNickel\\b', re.IGNORECASE)),\r\n ('Opus One', re.compile(r'\\bOpus\\s+One\\b', re.IGNORECASE)),\r\n ('P Togni', re.compile(r'\\bTogni\\b', re.IGNORECASE)),\r\n ('Pahlmeyer Jayson', re.compile(r'\\bJayson\\b', re.IGNORECASE)), # this before pahlmeyer\r\n ('Pahlmeyer', re.compile(r'\\bPahlmeyer\\b(?!\\s*Jay)', re.IGNORECASE)),\r\n ('Papillon', re.compile(r'\\bPapillon\\b', re.IGNORECASE)),\r\n ('Patz', re.compile(r'\\bPatz\\b', re.IGNORECASE)),\r\n ('Phelps', re.compile(r'\\bPhelps\\b', re.IGNORECASE)),\r\n ('Plumpjack', re.compile(r'\\bPlumpjack\\b', re.IGNORECASE)),\r\n ('Pride', re.compile(r'\\bPride\\b', re.IGNORECASE)),\r\n ('Prisoner', re.compile(r'\\bPrisoner\\b', re.IGNORECASE)),\r\n ('Provenance', re.compile(r'\\bProvenance\\b', re.IGNORECASE)),\r\n ('R Sinskey', re.compile(r'\\bSinskey\\b', re.IGNORECASE)),\r\n ('Ramey', re.compile(r'\\bRamey\\b', re.IGNORECASE)),\r\n ('Revana', re.compile(r'\\bRevana\\b', re.IGNORECASE)),\r\n ('Raptor', re.compile(r'\\bRaptor\\s+Ridge\\b', re.IGNORECASE)),\r\n ('Revana', re.compile(r'\\bRevana\\b', re.IGNORECASE)),\r\n ('Ridge', re.compile(r'\\bRidge\\b', re.IGNORECASE)),\r\n ('Robert Foley', re.compile(r'\\bRobert\\s+Foley\\b', re.IGNORECASE)),\r\n ('Rombauer', re.compile(r'\\bRombauer\\b', re.IGNORECASE)),\r\n ('Rudd', re.compile(r'\\bRudd\\b', re.IGNORECASE)),\r\n ('Scarecrow', re.compile(r'\\bScarecrow\\b', re.IGNORECASE)),\r\n ('Sea Smoke', re.compile(r'\\bSea\\s+Smoke\\b', re.IGNORECASE)),\r\n ('Seghesio', re.compile(r'\\bSeghesio\\b', re.IGNORECASE)),\r\n ('Shafer', re.compile(r'\\bShafer\\b', re.IGNORECASE)),\r\n ('Sherwin', re.compile(r'\\bSherwin\\b', re.IGNORECASE)),\r\n ('Silver Oak', re.compile(r'\\bSilver\\s+Oak\\b', re.IGNORECASE)),\r\n ('Silverado', re.compile(r'\\bSilverado\\b', re.IGNORECASE)),\r\n ('Simi', re.compile(r'\\bSimi\\b', re.IGNORECASE)),\r\n ('Sonoma Cutrer', re.compile(r'\\bCutrer\\b', re.IGNORECASE)),\r\n ('Spottswoode', re.compile(r'\\bSpottswoode\\b', re.IGNORECASE)),\r\n ('Stag Leap', re.compile(r'\\bStag.*\\sLeap\\b', re.IGNORECASE)),\r\n ('Sullivan', re.compile(r'\\bSullivan\\b', re.IGNORECASE)),\r\n ('Summerland', re.compile(r'\\bSummerland\\b', re.IGNORECASE)),\r\n ('Summers', re.compile(r'\\bSummers\\b', re.IGNORECASE)),\r\n ('Tantara', re.compile(r'\\bTantara\\b', re.IGNORECASE)),\r\n ('Turnbull', re.compile(r'\\bTurnbull\\b', re.IGNORECASE)),\r\n ('Veuve', re.compile(r'\\bVeuve\\b', re.IGNORECASE)),\r\n ('Viader', re.compile(r'\\bViader\\b', re.IGNORECASE)),\r\n ('Waterstone', re.compile(r'\\bWaterstone\\b', re.IGNORECASE)),\r\n ('Whitehall', re.compile(r'\\bWhitehall\\b', re.IGNORECASE)),\r\n ('Wm Selyem', re.compile(r'\\bWilliams\\s*\\-?Selyem\\b', re.IGNORECASE)),\r\n ('ZD', re.compile(r'\\bZD\\b', re.IGNORECASE)),\r\n ('Zaca', re.compile(r'\\bZaca\\b', re.IGNORECASE)),\r\n\r\n \r\n ('zBourbon Woodford Res', re.compile(r'\\bWoodford\\s+Reserve\\b', re.IGNORECASE)),\r\n ('zBourbon Woodford Res', re.compile(r'\\bWoodford\\s+Rsv\\b', re.IGNORECASE)),\r\n ('zCognac Courvoisier', re.compile(r'\\bCourvoisier\\b', re.IGNORECASE)),\r\n ('zCognac Hennessy', re.compile(r'\\bHennesse?y\\b', re.IGNORECASE)),\r\n ('zCognac Remy', re.compile(r'\\bRemy\\s+Martin\\b|\\bRemy\\s+Louis', re.IGNORECASE)),\r\n ('zCointreau', re.compile(r'\\bCointreau\\b', re.IGNORECASE)),\r\n ('zGin Hendrick', re.compile(r'\\bHendrick', re.IGNORECASE)),\r\n ('zGin Tanqueray', re.compile(r'\\bTanqueray\\b', re.IGNORECASE)),\r\n ('zRum Mt Gay', re.compile(r'\\bMount\\s+Gay\\b|\\bMt\\s+Gay', re.IGNORECASE)),\r\n ('zRum Ron Zacapa', re.compile(r'\\bRon\\s+Zacapa\\b', re.IGNORECASE)),\r\n ('zRye Hayden', re.compile(r'\\bBasil\\s+Hayden\\b', re.IGNORECASE)),\r\n ('zSambuca', re.compile(r'\\bSambuca\\b', re.IGNORECASE)),\r\n ('zScotch Glenmorangie', re.compile(r'\\bGlenmorangie\\b', re.IGNORECASE)),\r\n ('zScotch Hibiki Harmony', re.compile(r'\\bHibiki\\s.*Harmony\\b', re.IGNORECASE)),\r\n ('zScotch Hibiki', re.compile(r'\\bHibiki\\b(?!\\s*Har)', re.IGNORECASE)),\r\n ('zScotch Macallan', re.compile(r'\\bMacallan\\b', re.IGNORECASE)),\r\n ('zTeq Campo Azul', re.compile(r'\\bCampo\\s+Azul\\b', re.IGNORECASE)),\r\n ('zTeq Casamigos', re.compile(r'\\bCasamigos\\b', re.IGNORECASE)),\r\n ('zTeq Casino Azul', re.compile(r'\\bCasino\\s+Azul\\b', re.IGNORECASE)),\r\n ('zTeq Clase Azul', re.compile(r'\\bClase\\s+Azul\\b', re.IGNORECASE)),\r\n ('zTeq Cuervo', re.compile(r'\\bJose\\s+Cuervo\\b|^Cuervo\\b', re.IGNORECASE)),\r\n ('zTeq Don Julio', re.compile(r'\\bDon\\s+Julio\\b', re.IGNORECASE)),\r\n ('zTeq Dos Artes', re.compile(r'\\bDos\\s+Artes\\b|^Cuervo\\b', re.IGNORECASE)),\r\n ('zTeq Gran Cava', re.compile(r'\\bGran\\s+Cava\\b', re.IGNORECASE)),\r\n ('zTeq Herradura', re.compile(r'\\bHerradura\\b', re.IGNORECASE)),\r\n ('zTeq Loma Azul', re.compile(r'\\bLoma\\s+Azul\\b', re.IGNORECASE)),\r\n ('zTeq Padre Azul', re.compile(r'\\bPadre\\s+Azul\\b', re.IGNORECASE)),\r\n ('zTeq Partida', re.compile(r'\\bPartida\\b', re.IGNORECASE)),\r\n ('zTeq Patron', re.compile(r'\\bPatron\\b', re.IGNORECASE)),\r\n ('zTripleSec Gr Marnier', re.compile(r'\\bGrand\\s+Marnier\\b', re.IGNORECASE)),\r\n ('zTripleSec Dekuyper', re.compile(r'\\bDekuyper\\b', re.IGNORECASE)),\r\n ('zTripleSec Hiram', re.compile(r'\\bHiram\\b', re.IGNORECASE)),\r\n ('zVodka Absolut', re.compile(r'\\bAbsolut\\b', re.IGNORECASE)),\r\n ('zVodka Skyy', re.compile(r'\\bSkyy\\b', re.IGNORECASE)),\r\n ('zVodka Tito', re.compile(r'\\bTito', re.IGNORECASE)),\r\n ('zWhiskey Balvenie', re.compile(r'\\bBalvenie\\b', re.IGNORECASE)),\r\n ('zWhiskey J Walker', re.compile(r'\\bJohn+ie\\s+Walker\\b', re.IGNORECASE)),\r\n# ('', re.compile(r'\\b\\b', re.IGNORECASE)),\r\n)\r\n\r\n# regex extract the grape from the wine fld\r\ngrapeLookup = (\r\n ('Cab Franc', re.compile(r'\\bCabernet\\s+Franc|\\bCab\\s+Franc', re.IGNORECASE)), # before cab\r\n ('Cab', re.compile(r'\\bCabernet\\b|\\sCS\\s|\\sCS$|\\bCab\\b', re.IGNORECASE)),\r\n ('Claret', re.compile(r'\\bClaret\\b', re.IGNORECASE)),\r\n ('Rose Pinot', re.compile(r'\\bRose\\b.*\\bPinot\\b|\\bPinot\\b.*\\bRose\\b', re.IGNORECASE)),\r\n ('Pinot', re.compile(r'\\bPinot\\b|\\bPN\\b|\\bP\\s+Noir\\b', re.IGNORECASE)),\r\n ('Merlot', re.compile(r'\\bMerlot\\b|\\bME\\b', re.IGNORECASE)),\r\n ('Sauv Blanc', re.compile(r'\\bSauvignon\\s+Blanc\\b|\\bSB\\b', re.IGNORECASE)),\r\n ('Sauv Blanc', re.compile(r'\\bSauvignon\\/Fume\\s+Blanc\\b', re.IGNORECASE)),\r\n ('Meritage', re.compile(r'\\bMeritage\\b', re.IGNORECASE)),\r\n ('Fume', re.compile(r'\\bFume\\b|\\bFumé', re.IGNORECASE)),\r\n ('Champagne', re.compile(r'\\bChampagne\\b', re.IGNORECASE)),\r\n ('Chard', re.compile(r'\\bChar+d|\\bCH\\b', re.IGNORECASE)),\r\n ('Shiraz', re.compile(r'\\bShiraz\\b', re.IGNORECASE)),\r\n ('Syrah', re.compile(r'\\bSyrah\\b|\\bSY\\b',re.IGNORECASE)),\r\n ('Zin', re.compile(r'\\bZinfandel\\b|\\bZIN\\b|\\bZN\\b', re.IGNORECASE)),\r\n ('Rose', re.compile(r'\\bRose\\b|\\bRosé', re.IGNORECASE)),\r\n ('Sangiovese', re.compile(r'\\Sangiovese\\b', re.IGNORECASE)),\r\n# ('Brandy', re.compile(r'\\bBrandy\\b', re.IGNORECASE)),\r\n ('Gewurzt', re.compile(r'\\bGew.rztraminer\\b|\\bGewürzt', re.IGNORECASE)),\r\n ('Malbec', re.compile(r'\\bMalbec\\b', re.IGNORECASE)),\r\n ('Viognier', re.compile(r'\\bViognier\\b', re.IGNORECASE)),\r\n ('Roussanne', re.compile(r'\\bRoussanne\\b', re.IGNORECASE)),\r\n ('Charbono', re.compile(r'\\bCharbono\\b', re.IGNORECASE)),\r\n ('PSirah', re.compile(r'\\bPetite Sirah\\b', re.IGNORECASE)),\r\n ('Cuvee', re.compile(r'\\bCuvee\\b', re.IGNORECASE)),\r\n ('Red', re.compile(r'\\bRed\\b|\\bBordeaux\\s+Blend\\b', re.IGNORECASE)),\r\n ('Syrah-Cab', re.compile(r'\\bSyrcab\\b|\\bsyrah[-\\s\\/]+cab', re.IGNORECASE)),\r\n ('Grenache', re.compile(r'\\bGrenache\\b', re.IGNORECASE)), \r\n ('Tempranillo', re.compile(r'\\bTempranillo\\b', re.IGNORECASE)),\r\n)\r\n\r\n# wineries that we don't want to look up the grape on\r\nignoreGrapeLookup = {\r\n 'Cristal' : ['Rose', None],\r\n 'Domaine Carneros' : ['Brut', None],\r\n 'Dominus' : [None],\r\n 'Papillon' : None,\r\n 'Paraduxx' : None,\r\n 'Veuve' : None,\r\n 'zCointreau' : None,\r\n 'zGin Hendrick' : None,\r\n 'zGin Tanqueray' : ['Ten', None],\r\n 'zTripleSec Gr Marnier' : ['1880', '100th', 'Cent', 'Quin', None],\r\n 'zTripleSec Dekuyper' : None,\r\n 'zTripleSec Hiram' : None,\r\n 'zVodka Skyy' : ['Citrus', None],\r\n 'zVodka Tito' : None,\r\n# 'Prisoner' : ['Cuttings', 'Red', 'Derange', 'Saldo', 'Blindfold', None],\r\n}\r\n\r\n# winery to wine lookup when no grape is found in the wine name\r\n#\r\n# extract the wine name from a winery - when a field does not have a grape lookup for the row\r\n# the name looked up and found will be the name used\r\nnoGrapeLookup = {\r\n 'Ehlers' : ['120-80'], # matches an abbreviations - and matches fldWineDescr\r\n 'Alban' : ['Pandora'],\r\n 'BV' : ['Tapestry', 'Latour'],\r\n 'Bennett Ln' : ['Maximus'],\r\n 'Bremer' : ['Austintatious'],\r\n 'Cain Five' : None,\r\n 'Colgin' : ['Cariad', 'IX'],\r\n 'Concha Don Melchor' : None,\r\n 'Continuum' : None,\r\n 'Darioush' : ['Duel', 'Darius'],\r\n 'Duckhorn' : ['Discussion'],\r\n 'Far Niente' : ['Dolce'],\r\n 'Flora' : ['Trilogy'],\r\n 'Franciscan' : ['Magnificat'],\r\n 'Grgich' : ['Violetta'],\r\n 'Gundlach' : ['Vintage Reserve'],\r\n 'Justin' : ['Isosceles'],\r\n 'Krug' : ['Generations'],\r\n 'Mondavi' : ['Maestro'],\r\n 'Newton' : ['Puzzle'],\r\n 'Opus One' : None,\r\n 'Phelps' : ['Insignia'],\r\n 'Prisoner' : ['Cuttings', 'Derange', 'Saldo', 'Blindfold'],\r\n 'Ridge' : ['Monte Bello'],\r\n 'Robert Foley' : ['Griffin'],\r\n 'Sullivan' : ['Coeur de Vigne'],\r\n 'Zaca' : ['ZThree', 'ZCuvee'],\r\n 'zCognac Courvoisier' : ['Napolean', 'VS', 'VSOP', 'XO'],\r\n 'zCognac Hennessy' : ['Paradis', 'Richard', 'VS', 'VSOP', 'XO', 'Master'],\r\n 'zCognac Remy' : ['1738', 'Louis XIII', 'VSOP', 'XO', 'VS'],\r\n 'zRum Ron Zacapa' : ['23', 'Negra', 'XO'],\r\n 'zRye Hayden' : ['Dark', 'Caribbean'],\r\n 'zScotch Hibiki Harmony' : None,\r\n# 'zScotch Hibiki' : ['Toki', '12', '17', '21', '30'],\r\n 'zTeq Campo Azul' : ['Extra Anejo', 'Anejo', 'Blanco', 'Reposado'],\r\n 'zTeq Casamigos' : ['Extra Anejo', 'Anejo', 'Blanco', 'Reposado'],\r\n 'zTeq Casino Azul' : ['Extra Anejo', 'Anejo', 'Blanco', 'Reposado', 'Silver'],\r\n 'zTeq Clase Azul' : ['Ultra', 'Extra Anejo', 'Anejo', 'Blanco', 'Reposado', 'Mezcal', 'Plata', 'Platino'],\r\n 'zTeq Dos Artes' : ['Extra Anejo'],\r\n 'zTeq Gran Cava' : ['Extra Anejo'],\r\n 'zTeq Loma Azul' : ['Extra Anejo', 'Anejo', 'Blanco', 'Reposado'],\r\n# 'zTeq Padre Azul' : ['Extra Anejo', 'Anejo', 'Blanco', 'Reposado'],\r\n 'zTeq Partida' : ['Blanco', 'Elegante'],\r\n 'zVodka Absolut' : ['Citron', 'Mandarin', 'Mandrin', 'Mango', 'Ruby', 'Vanilia', 'Raspberri', 'Grapevine', None],\r\n 'zWhiskey J Walker' : ['Double Black', 'Black', 'Blue', 'Gold', 'Green', 'Platinum', 'Red','Swing', 'White', '18', '21'],\r\n}\r\n\r\n\r\n# regex to use to determine if this is a liquor not a wine\r\n#\r\n# winery -> [ liquor, regex ]\r\n# if there is no grape, and no noGrapeLookup found, but the winery has a liquorLookup\r\n# use the list of lookups to find the additional infomratoin to add to the winery\r\n#\r\nliquorLookup = {\r\n 'zRum Mt Gay' : [\r\n ('1703 Mst', re.compile(r'\\b1703\\b', re.IGNORECASE)),\r\n ('BB', re.compile(r'\\bBlack Barrel\\b', re.IGNORECASE)),\r\n ('Eclipse Silver', re.compile(r'\\bEclipse\\s+Silver\\b', re.IGNORECASE)),\r\n ('Eclipse', re.compile(r'\\bEclipse\\b', re.IGNORECASE)),\r\n ('Old Peat', re.compile(r'\\bOld Peat', re.IGNORECASE)),\r\n ('Old Pot', re.compile(r'\\bPot\\s+Still\\b', re.IGNORECASE)),\r\n ('Old', re.compile(r'\\bOld\\b', re.IGNORECASE)),\r\n ('Silver', re.compile(r'\\bSilver\\b', re.IGNORECASE)),\r\n ('XO Peat', re.compile(r'\\bXO\\b', re.IGNORECASE)),\r\n ],\r\n 'zScotch Glenmorangie' : [\r\n ('10', re.compile(r'\\b10(YR)?\\b', re.IGNORECASE)),\r\n ('14 Port', re.compile(r'14.+\\bQuinta\\b|14.+\\bPort\\b|\\bQuinta\\b.+14|\\bPort\\b.+14', re.IGNORECASE)),\r\n ('12 Bacalta', re.compile(r'\\bBacalta\\b', re.IGNORECASE)),\r\n ('12 Burgundy', re.compile(r'\\bBurgundy\\b', re.IGNORECASE)),\r\n ('12 Nectar', re.compile(r'\\bNectar\\b', re.IGNORECASE)),\r\n ('12 Port', re.compile(r'\\bQuinta\\b|\\bPort\\b', re.IGNORECASE)),\r\n ('12 Sherry', re.compile(r'\\bLa\\s?Santa\\b|\\bSherry\\b', re.IGNORECASE)),\r\n ('12 Signet', re.compile(r'\\bSignet\\b', re.IGNORECASE)),\r\n ('15 Cadboll', re.compile(r'\\bCadboll', re.IGNORECASE)),\r\n ('15', re.compile(r'\\b15(YR)?\\b', re.IGNORECASE)),\r\n ('18', re.compile(r'\\b18(YR)?\\b|\\b18YEAR\\b', re.IGNORECASE)),\r\n ('25 Astar', re.compile(r'\\bAstar\\b', re.IGNORECASE)),\r\n ('25', re.compile(r'\\b25(YR)?\\b', re.IGNORECASE)),\r\n ('Companta', re.compile(r'\\bCompanta\\b', re.IGNORECASE)),\r\n ('Finealta', re.compile(r'\\bFinealta\\b', re.IGNORECASE)),\r\n ('Milsean', re.compile(r'\\bMilsean\\b', re.IGNORECASE)),\r\n ('Sonnalta', re.compile(r'\\bSonnalta\\b', re.IGNORECASE)),\r\n ],\r\n 'zScotch Macallan' : [\r\n ('10 Fine', re.compile(r'\\bFine.*\\b10\\b|\\b10.*Fine')),\r\n ('10', re.compile(r'\\b10\\b')),\r\n ('12 Double Gold', re.compile(r'\\bDbl\\b.*Gold|\\bDouble\\b.*Gold', re.IGNORECASE)),\r\n ('12 Double', re.compile(r'\\bDouble\\s.*12(YR)?\\b', re.IGNORECASE)),\r\n ('12 Double', re.compile(r'\\b12\\s.*Double\\b', re.IGNORECASE)),\r\n ('12 Double', re.compile(r'\\bDbl\\b|\\bDouble\\b', re.IGNORECASE)),\r\n ('12 Edition 1', re.compile(r'\\bEdition\\s.*1\\b', re.IGNORECASE)),\r\n ('12 Edition 2', re.compile(r'\\bEdition\\s.*2\\b', re.IGNORECASE)),\r\n ('12 Edition 3', re.compile(r'\\bEdition\\s.*3\\b', re.IGNORECASE)),\r\n ('12 Edition 4', re.compile(r'\\bEdition\\s.*4\\b', re.IGNORECASE)),\r\n ('12 Sherry', re.compile(r'\\b12\\s.*Sherry\\b|\\bSherry\\b\\s.*\\b12', re.IGNORECASE)),\r\n ('12 Triple', re.compile(r'\\b12(YR)?\\s.*Triple\\b', re.IGNORECASE)),\r\n ('12 Triple', re.compile(r'\\bTriple\\s.*12\\b', re.IGNORECASE)),\r\n ('12', re.compile(r'\\b12(YR)?\\b', re.IGNORECASE)),\r\n ('15 Triple', re.compile(r'\\b15(YR)?\\s.*Triple\\b|Triple.+\\b15(YR)?\\b', re.IGNORECASE)),\r\n ('15 Fine', re.compile(r'\\b15(YR)?\\b.*\\bFine\\b', re.IGNORECASE)),\r\n ('15', re.compile(r'\\b15(YR)?\\b', re.IGNORECASE)),\r\n ('17 Sherry', re.compile(r'\\b17(YR)?\\s.*Sherry\\b', re.IGNORECASE)),\r\n ('17 Fine', re.compile(r'\\b17(YR)?\\b.*\\bFine\\b', re.IGNORECASE)),\r\n ('17', re.compile(r'\\b17(YR)?\\b', re.IGNORECASE)),\r\n ('18 Sherry', re.compile(r'\\b18(YR)?\\s.*Sherry\\b|Sherry\\b.*18', re.IGNORECASE)),\r\n ('18 Triple', re.compile(r'\\b18(YR)?\\s.*Triple\\b|Triple.+\\b18(YR)?\\b', re.IGNORECASE)),\r\n ('18 Fine', re.compile(r'\\b18(YR)?\\b.*\\bFine\\b', re.IGNORECASE)),\r\n ('18 Gran', re.compile(r'Gran\\b.*\\b18', re.IGNORECASE)),\r\n ('18', re.compile(r'\\b18(YR)?\\b', re.IGNORECASE)),\r\n ('21 Fine', re.compile(r'\\b21.*Fine\\b', re.IGNORECASE)),\r\n ('21', re.compile(r'\\b21(YR)?\\b', re.IGNORECASE)),\r\n ('25 Sherry', re.compile(r'\\b25\\s.*Sherry\\b', re.IGNORECASE)),\r\n ('25', re.compile(r'\\b25(YR)?\\b')),\r\n ('30 Sherry', re.compile(r'\\b30\\s.*Sherry', re.IGNORECASE)),\r\n ('30 Triple', re.compile(r'\\b30(YR)?\\s.*Triple\\b|Triple.+\\b30(YR)?\\b', re.IGNORECASE)),\r\n ('30 Fine', re.compile(r'\\b30(YR)?\\b.*\\bFine\\b|Fine.*30', re.IGNORECASE)),\r\n ('30', re.compile(r'\\b30(YR)?\\b')),\r\n ('Rare', re.compile(r'\\bRare\\b', re.IGNORECASE)),\r\n ],\r\n 'zTeq Cuervo' : [\r\n ('Especial Gold', re.compile(r'\\bEspecial\\b.*Gold\\b|Gold.*Especial', re.IGNORECASE)),\r\n ('Especial Blue', re.compile(r'\\bEspecial\\b.*Blue\\b', re.IGNORECASE)),\r\n ('Especial', re.compile(r'\\bEspecial\\b', re.IGNORECASE)),\r\n ('Familia Platino', re.compile(r'\\bPlatino\\b', re.IGNORECASE)),\r\n ('Familia Anejo', re.compile(r'\\bFamilia\\b|\\bReserva\\b', re.IGNORECASE)),\r\n ('Gold', re.compile(r'\\bGold\\b', re.IGNORECASE)),\r\n ('Reposado Lagavulin', re.compile(r'\\bReposado.*Lagavulin', re.IGNORECASE)),\r\n ('Tradicional Anejo', re.compile(r'Tradicional.*Anejo|Anejo.*Tradicional', re.IGNORECASE)),\r\n ('Tradicional Reposado', re.compile(r'Tradicional.*Reposado|Reposado.*Tradicional', re.IGNORECASE)),\r\n ('Tradicional Silver', re.compile(r'\\bTradicional\\b', re.IGNORECASE)),\r\n ('Tradicional Silver', re.compile(r'\\bTraditional\\b', re.IGNORECASE)),\r\n ('Reposado', re.compile(r'\\bReposado\\b', re.IGNORECASE)),\r\n ('Silver', re.compile(r'\\bSilver\\b', re.IGNORECASE)),\r\n ],\r\n 'zTeq Don Julio' : [\r\n ('1942', re.compile(r'\\b1942\\b', re.IGNORECASE)),\r\n ('Real', re.compile(r'\\bReal\\b', re.IGNORECASE)),\r\n ('Anejo Claro 70th', re.compile(r'\\b70th\\b', re.IGNORECASE)),\r\n ('Anejo Claro', re.compile(r'\\bAnejo\\b\\s*Claro\\b', re.IGNORECASE)),\r\n ('Anejo', re.compile(r'\\bAnejo\\b', re.IGNORECASE)),\r\n ('Blanco', re.compile(r'\\bBlanco\\b', re.IGNORECASE)),\r\n ('Reposado Lagavulin', re.compile(r'\\bRepo.+Lagvulin\\b', re.IGNORECASE)),\r\n ('Reposado Dbl', re.compile(r'\\bReposado.+Double\\b', re.IGNORECASE)),\r\n ('Reposado Dbl', re.compile(r'\\bReposado.+Dbl\\b', re.IGNORECASE)),\r\n ('Reposado Dbl', re.compile(r'\\bDouble.+Reposado\\b', re.IGNORECASE)),\r\n ('Reposado Private', re.compile(r'\\bReposado.+Private\\b', re.IGNORECASE)),\r\n ('Reposado', re.compile(r'\\bReposado\\b', re.IGNORECASE)),\r\n ('Silver', re.compile(r'\\bSilver\\b', re.IGNORECASE)),\r\n ],\r\n 'zTeq Herradura' : [\r\n ('Ultra', re.compile(r'\\bUltra\\b', re.IGNORECASE)),\r\n ('Suprema', re.compile(r'\\bSuprema\\b', re.IGNORECASE)),\r\n ('Anejo', re.compile(r'\\bAnejo\\b', re.IGNORECASE)),\r\n ('Blanco', re.compile(r'\\bBlanco\\b', re.IGNORECASE)),\r\n ('Reposado Gold', re.compile(r'\\bReposado\\s+Gold\\b|\\bGold\\s+Reposado\\b', re.IGNORECASE)),\r\n ('Reposado Scotch', re.compile(r'\\bReposado.+Scotch\\b|\\bScotch.+Reposado\\b', re.IGNORECASE)),\r\n ('Reposado Port', re.compile(r'\\bPort.+Reposado\\b|\\bReposado.+Port\\b', re.IGNORECASE)),\r\n ('Reposado', re.compile(r'\\bReposado\\b', re.IGNORECASE)),\r\n ('Silver', re.compile(r'\\bSilver\\b', re.IGNORECASE)),\r\n ],\r\n 'zTeq Patron' : [\r\n ('Gran Piedra', re.compile(r'\\bPiedra\\b', re.IGNORECASE)),\r\n ('DELETE Roca DELETE', re.compile(r'\\bRoca\\b', re.IGNORECASE)),\r\n ('Anejo Extra Lalique', re.compile(r'\\bLalique\\b', re.IGNORECASE)),\r\n ('Anejo Extra 7yr', re.compile(r'\\b7YR\\b|\\b7 anos\\b|\\b7 year\\b', re.IGNORECASE)),\r\n ('Anejo Extra 5yr', re.compile(r'\\b5YR\\b|\\b5 anos\\b|\\b5 year\\b', re.IGNORECASE)),\r\n ('Anejo Extra 10yr', re.compile(r'\\b10\\b.+\\bExtra\\b|\\bExtra\\b.+10', re.IGNORECASE)),\r\n ('Anejo Extra', re.compile(r'\\bExtra\\s+Anejo\\b', re.IGNORECASE)),\r\n ('Gran Anejo', re.compile(r'\\bGran\\s+Anejo\\b', re.IGNORECASE)),\r\n ('Gran Anejo', re.compile(r'\\bBurdeos\\b', re.IGNORECASE)),\r\n ('Gran Smoky', re.compile(r'\\bGran\\s+.*Smoky\\b', re.IGNORECASE)),\r\n ('Anejo', re.compile(r'\\bAnejo\\b', re.IGNORECASE)),\r\n ('Gran Platinum', re.compile(r'\\bPlatinum\\b', re.IGNORECASE)),\r\n ('Reposado', re.compile(r'\\bReposado\\b', re.IGNORECASE)),\r\n ('Silver LTD', re.compile(r'\\bSilver.*Limited\\b|\\bLimited.*Silver\\b', re.IGNORECASE)),\r\n ('Silver Estate', re.compile(r'\\bEstate.*Silver\\b|\\bSilver.*Estate\\b', re.IGNORECASE)),\r\n ('Silver', re.compile(r'\\bSilver\\b', re.IGNORECASE)),\r\n ('Blanco', re.compile(r'\\bBlanco\\b', re.IGNORECASE)),\r\n# ('', re.compile(r'\\b\\b', re.IGNORECASE)),\r\n ],\r\n 'zTeq Padre Azul' : [\r\n ('Blanco', re.compile(r'\\bsilver\\b', re.IGNORECASE)),\r\n ],\r\n 'zWhiskey Balvenie' : [\r\n ('12 Double', re.compile(r'\\bDouble.*12(YR)?\\b', re.IGNORECASE)),\r\n ('12 Double', re.compile(r'\\b12(YR)?\\s.*Double', re.IGNORECASE)),\r\n ('12 First', re.compile(r'\\b12(YR)?\\s.*First', re.IGNORECASE)),\r\n ('12 USA', re.compile(r'\\b12.*American|American.*12', re.IGNORECASE)),\r\n ('12 Toast', re.compile(r'\\b12(YR)?\\s.*Toast', re.IGNORECASE)),\r\n ('12', re.compile(r'\\b12(YR)?\\b', re.IGNORECASE)),\r\n ('14 Carib', re.compile(r'\\b14(YR)?\\s.*Carib', re.IGNORECASE)),\r\n ('14 Carib', re.compile(r'\\b14(YR)?\\s.*CB\\s+Cask', re.IGNORECASE)),\r\n ('14 Carib', re.compile(r'\\bCarr?ib', re.IGNORECASE)),\r\n ('14 Peat', re.compile(r'\\b14(YR)?\\s.*Peat', re.IGNORECASE)),\r\n ('15 Sherry', re.compile(r'\\b15(YR)?\\s.*Sherry\\b', re.IGNORECASE)),\r\n ('15 Sherry', re.compile(r'\\bSherry\\s+.*15(YR)?\\b', re.IGNORECASE)),\r\n ('15', re.compile(r'\\b15(YR)?\\b', re.IGNORECASE)),\r\n ('16 Triple', re.compile(r'\\b16(YR)?\\s.*Triple\\b', re.IGNORECASE)),\r\n ('17 Sherry Double', re.compile(r'\\b17(YR)?\\s.*Sherry\\s+Doub', re.IGNORECASE)),\r\n ('17 Sherry', re.compile(r'\\b17(YR)?\\s.*Sherry', re.IGNORECASE)),\r\n ('17 Double', re.compile(r'\\b17(YR)?\\s.*Double', re.IGNORECASE)),\r\n ('17 Double', re.compile(r'\\bDouble.*17(YR)?\\b', re.IGNORECASE)),\r\n# 17 Double Sherry\r\n# 17 Islay\r\n# 17 New Oak\r\n ('17 Peat', re.compile(r'\\b17(YR)?\\s.*Peat', re.IGNORECASE)),\r\n ('17 Peat', re.compile(r'\\bPeat.*17(YR)?\\b', re.IGNORECASE)),\r\n ('17', re.compile(r'\\b17(YR)?\\b', re.IGNORECASE)),\r\n ('21 Port', re.compile(r'\\b21.*Port', re.IGNORECASE)),\r\n ('21 Port', re.compile(r'\\bPort.*21\\b', re.IGNORECASE)),\r\n ('21', re.compile(r'21', re.IGNORECASE)),\r\n ('25', re.compile(r'\\b25(YR)?\\b', re.IGNORECASE)),\r\n ('30', re.compile(r'\\b30(YR)?\\b', re.IGNORECASE)),\r\n ('40', re.compile(r'\\b40(YR)?\\b', re.IGNORECASE)),\r\n ],\r\n 'zBourbon Woodford Res' : [\r\n ('Dbl', re.compile(r'\\bDouble\\b', re.IGNORECASE)),\r\n ('Derby', re.compile(r'\\bDerby\\b', re.IGNORECASE)),\r\n ('Rye Choc', re.compile(r'\\bChocolate.*Rye\\b', re.IGNORECASE)),\r\n ('Rye', re.compile(r'\\bRye\\b', re.IGNORECASE)),\r\n ('Brandy', re.compile(r'\\bBrandy\\b', re.IGNORECASE)),\r\n ('Batch', re.compile(r'\\bBatch\\b', re.IGNORECASE)),\r\n ('Barrel', re.compile(r'\\bBarrel\\b', re.IGNORECASE)),\r\n ('Master', re.compile(r'\\bMasters?\\b', re.IGNORECASE)),\r\n ('Malt', re.compile(r'\\bMalt\\b', re.IGNORECASE)),\r\n ('Maple', re.compile(r'\\bMaple\\b', re.IGNORECASE)),\r\n ('Wheat', re.compile(r'\\bWheat\\b', re.IGNORECASE)),\r\n ('', re.compile(r'\\bWoodford\\b', re.IGNORECASE)),\r\n ],\r\n 'zSambuca' : [\r\n ('Romana Black', re.compile(r'\\bRomana.*\\bBlack\\b|\\bBlack\\s+Romana\\b', re.IGNORECASE)),\r\n ('Romana', re.compile(r'\\bRomana\\b', re.IGNORECASE)),\r\n ('Di Amore', re.compile(r'\\bdi Amore\\b', re.IGNORECASE)),\r\n ],\r\n 'zScotch Hibiki' : [\r\n ('12', re.compile(r'\\b12\\s*YE?A?R\\b', re.IGNORECASE)),\r\n ('17 Limited', re.compile(r'\\b17\\s*YE?A?R\\b.+Limited', re.IGNORECASE)),\r\n ('17', re.compile(r'\\b17\\s*YE?A?R\\b', re.IGNORECASE)),\r\n ('21 Limited', re.compile(r'\\b21\\s*YE?A?R\\b.+Limited', re.IGNORECASE)),\r\n ('21', re.compile(r'\\b21\\s*YE?A?R\\b', re.IGNORECASE)),\r\n ('30', re.compile(r'\\b30\\s*YE?A?R\\b', re.IGNORECASE)),\r\n ]\r\n}\r\n# regex to expand out optional values in the optoinal values to find a match against wine fld\r\nwineAbbrLookup = {\r\n '120-80' : r'\\bOne\\s+Twenty\\s+Over\\s+Eighty\\b',\r\n '3Amigos' : r'\\bThree\\s+Amigos\\b',\r\n '3Palms' : r'\\bThree\\s+Palms\\b',\r\n '3Sister' : r'\\bThree\\s+Sisters?\\b',\r\n '4Barrell' : r'\\b4[\\-\\s]Barrels?\\b',\r\n 'Alex' : r'\\bAlexander\\b',\r\n 'And' : r'\\bAnderson\\b',\r\n 'Car' : r'\\bCarneros\\b',\r\n 'Carries' : r'\\bCarrie',\r\n 'CC' : r'\\bC\\.?C\\.?\\s+Ranch\\b',\r\n 'Clone4' : r'\\bClone\\s+4\\b',\r\n 'Clone6' : r'\\bClone\\s+6\\b',\r\n 'Crossbarn' : r'\\bCross\\s+Barn\\b',\r\n 'Donna' : r'\\bDonna',\r\n 'Est' : r'\\bEstate\\b',\r\n 'Estate' : r'\\bEst\\b',\r\n 'Gap' : r'\\bGap|\\s%27Gap',\r\n 'Gary' : r'\\bGary',\r\n 'Julia' : r'\\bJulia',\r\n 'Knights' : r'\\bKnight',\r\n 'KistlerVnyd' : r'\\bKistler (Vineyard|VYD|EST)\\b',\r\n 'LP' : r'\\bLes Pierres\\b',\r\n 'Lyn' : r'\\bLyndenhur?st\\b',\r\n 'Mont' : r'\\bMonterey\\b',\r\n 'Mt' : r'\\bMount\\b|\\bMt\\.\\b',\r\n 'Napa/Son' : r'\\bNapa.*Son',\r\n 'Oak' : r'\\bOakville\\b',\r\n 'One-Pt-5' : r'\\bOne\\s+Point\\s+Five\\b',\r\n 'Pomm' : r'\\bPommeraie\\b',\r\n 'Priv' : r'\\bPrivate\\b',\r\n 'RR' : r'\\bRussian\\s+Rivers?\\b|RRV',\r\n 'RRR' : r'\\bRussian\\s+Rivers?\\b|RRV',\r\n 'Res' : r'\\bReserve\\b|\\bRsv\\b|\\bResrv\\b|\\bReserv\\b|\\bReserve$',\r\n 'Rose' : r'\\bRosé|\\bROS&EACUTE;|\\bRos%E9',\r\n 'Ruth' : r'\\bRutherford\\b',\r\n 'Sandy' : r'\\bSandy',\r\n 'Samanthas' : r'\\bSamantha',\r\n 'SC' : r'\\bSanta\\s+Cruz\\b',\r\n 'SLD' : r'\\bStag.*Leap\\b',\r\n 'SLH' : r'\\bSanta\\s+Lucia\\b',\r\n 'SMV' : r'\\bSanta\\s+Maria|\\bS\\s+Maria',\r\n 'SRH' : r'\\bSTA\\.?|\\bSANTA\\s+Rita\\b|\\bSTA\\sRITA\\sHILLS|\\bS\\s+RITA\\b',\r\n 'SS' : r'\\bSpecial\\s+\\Selection\\b',\r\n 'Stage' : r'\\bStagecoach\\b',\r\n 'Son' : r'\\bSonoma\\b',\r\n 'SYV' : r'\\bSanta\\s+Ynez\\s+Valley\\b',\r\n 'TD9' : r'\\bTD\\s+9\\b|\\bTD-9\\b',\r\n 'Terraces' : r'\\bTerrace',\r\n 'TheCutrer' : r'\\bThe Cutrer\\b|nnay Cutrer\\b',\r\n 'Tok' : r'\\bTo[\\s\\-]?Kolan|\\bTo[\\s\\-]?Kalon',\r\n 'Turn4' : r'\\bTurn\\s+4\\b',\r\n 'Vernas' : r'\\bVerna',\r\n 'Vine' : r'\\bVines\\b',\r\n 'Yount' : r'\\bYountville\\b',\r\n 'ZThree' : r'\\bZ.*\\bThree\\b',\r\n 'ZCuvee' : r'\\bZ.*\\bCuvee\\b|\\bCuvee Z\\b', \r\n\r\n # misspellings\r\n 'Agustina' : r'\\bAugustina\\b',\r\n 'Durell' : r'\\bDurrell\\b',\r\n 'Benchland' : r'\\bBenchlands\\b',\r\n 'Pritchard' : r'\\bPitchard\\b',\r\n}\r\n\r\n# regex search - set the ships as\r\nreShipsAs = re.compile(r'\\(ships?\\s', re.IGNORECASE)\r\n\r\n# the order in which we pull multiple single match attributes \r\ndefaultorderlist=[['Tok'], ['Oak'], ['Res'], ['RR'], ['Landslide'], ['Yount'], ['RRR'], ['Son'], ['Ruth'], ['Napa'], ['Helena'], ['SRH'], ['SLH'], ['SMV'], ['SLD'], ['Paso'], ['Alex'], ['Single'], ['Estate']]\r\n \r\n### FUNCTIONS ############################################\r\n\r\n#########################################################################################\r\ndef globalVariableCheck( debug=False ):\r\n # check for liquor definitions that are in noGrapeLookup\r\n # these will never execute\r\n for liquor in liquorLookup:\r\n if liquor in noGrapeLookup:\r\n print('WARNING:liquorLookup regexs will never execute - they are in noGrapeLookup:', liquor)\r\n if liquor in ignoreGrapeLookup:\r\n print('WARNING:liquorLookup regexs will never execute - they are in ignoreGrapeLookup:', liquor)\r\n for winery in ignoreGrapeLookup:\r\n if winery in noGrapeLookup:\r\n print('WARNING:ignoreGrapeLookup regexs will never execute - they are in noGrapeLookup:', winery)\r\n \r\n#########################################################################################\r\ndef setOptionDictMasterFldValues( optiondict, debug=False ):\r\n # default these fields to the fld values if they are not set\r\n # otherwise leave them alone\r\n for fld in ('fldWine', 'fldWineDescr'):\r\n if not optiondict[fld+'Master']:\r\n optiondict[fld+'Master'] = optiondict[fld]\r\n \r\n\r\n#########################################################################################\r\n# having a list of names to look at and match on - see if this record has a match\r\n# nameLookup - list of names could have 'None' as the last value, or just the value of None\r\n# lookupStr - string to be searched\r\n# other - array of strings that will have the matching name removed from\r\n# msg - string defining who called this function\r\n#\r\n# returns: string - if a matching string is found\r\n# None - did not find a match\r\n# '' - valid match with \"None\"\r\n#\r\ndef wineLookupByName( nameLookup, lookupStr, other, msg, wineAbbrLookup=None, debug=False ):\r\n\r\n # string for debugging messages\r\n funcname = 'wineLookupByName:' + msg + ':'\r\n\r\n # debugging\r\n if debug: print(funcname + 'nameLookup:', nameLookup)\r\n \r\n # if the value for this winery is None - than there is no additiona work we are done\r\n if nameLookup is None:\r\n # no additional processing\r\n # debugging\r\n if debug: print(funcname + 'match: value is none - continue on')\r\n # return empty string\r\n return ''\r\n\r\n \r\n # there are additional lookups for this winery - not using grape as part of the description\r\n # check each of the things to look up\r\n for name in nameLookup:\r\n # debugging\r\n if debug: print(funcname + 'match-name:', name)\r\n \r\n # special processing of a lookup value of none\r\n if name is None:\r\n # Lookup on none - means just use what we found\r\n # debugging\r\n if debug: print(funcname + 'name-matched: value is none - continue on:pass back blank')\r\n # stop iterating on nameLookup - by returning empty string\r\n return ''\r\n\r\n # we have not encountered 'None' - so build the regex based on the text provided\r\n reName = re.compile( r'\\b'+name+r'\\b', re.IGNORECASE)\r\n\r\n # check to see if we have a match with this regex\r\n if reName.search(lookupStr):\r\n # we have a match - so this is the additional attribute we are looking for\r\n # debugging\r\n if debug: print(funcname+'name-MATCHED:', name)\r\n # remove from other if it is in there\r\n for val in other:\r\n if reName.search(val):\r\n other.remove(val)\r\n # debugging\r\n if debug: print(funcname + 'name-remove-from-other:', val)\r\n # stop iterating on nameLookup - return what we found\r\n return name\r\n\r\n # 2nd check see if have a translation and this name is translatable\r\n if wineAbbrLookup and name in wineAbbrLookup:\r\n # build the regex with the look up value\r\n reName = re.compile(wineAbbrLookup[name], re.IGNORECASE)\r\n # debugging\r\n if debug: print(funcname + 'Abbr-match-name:', name)\r\n # check to see if we have a match with this regext\r\n if reName.search(lookupStr):\r\n # we have a match - so this is the additional attribute we are looking for\r\n # debugging\r\n if debug: print(funcname+'Abbr-name-MATCHED:', wineAbbrLookup[name])\r\n # remove from other if it is in there\r\n for val in other:\r\n if reName.search(val):\r\n other.remove(val)\r\n # debugging\r\n if debug: print(funcname + 'name-remove-from-other:', val)\r\n # stop iterating on nameLookup - return what we found\r\n return name\r\n\r\n # checked all the namelookupd - and did not find any matches\r\n # debuging\r\n if debug: print(funcname + 'name match not found:set to blank')\r\n # return none meaning we did not find a match\r\n return None\r\n\r\n\r\n#########################################################################################\r\n# find the qualifer like gift, etch, glass tied to this string\r\n#\r\n# \r\n#\r\n# returns: first qualifier or None\r\n#\r\ndef findQualifier( wine, debug=False ):\r\n for (val, reSearch) in reQualLookup:\r\n if reSearch.search(wine):\r\n if debug: print('findQualifier:matched-returning:', val)\r\n return val\r\n\r\n if debug: print('findQualifier:no-match-returning:', None)\r\n return None\r\n\r\n\r\n#########################################################################################\r\n# find the winery tied to the rec\r\n#\r\n# Global Variable Used: wineryLookup (an array of regex that define the winery)\r\n#\r\n# returns: (winery, reWinery)\r\n#\r\ndef findWinery( rec, lastWinery, lastReWinery, fldWine, debug=False ):\r\n # if we had a prior winery - test for this match first\r\n if lastWinery:\r\n # debugging\r\n if debug:\r\n try:\r\n print('fw:new winery:', rec[fldWine])\r\n except Exception as e:\r\n print('debug error8-continuing:', str(e))\r\n print('rec[fldWine]:type:', type(rec[fldWine]))\r\n # print('fw:new winery:', rec[fldWine].decode('windows-1252'))\r\n print('fw:checking if this is lastWinery:', lastWinery)\r\n \r\n # check to see if the winery is a match again for this record\r\n if lastReWinery.search(rec[fldWine]):\r\n # debugging\r\n if debug: print('fw:this matches the last winery')\r\n # match again - return values\r\n return(lastWinery, lastReWinery)\r\n else:\r\n # not match - debugging\r\n if debug: print('fw:not last winery')\r\n\r\n # if we did not match lastWinery - lets look through the list\r\n # go through the list of wineries (global variable),\r\n # each row contains wineryName, wineryRegex\r\n # pulling out the tuple from the lookup\r\n for (winery, reWinery) in wineryLookup:\r\n # debugging\r\n if debug: print('fw:not lastWinery-checking winery:', winery)\r\n\r\n if fldWine not in rec:\r\n print('not a column in this record fldWine:', fldWine)\r\n print('rec:', rec)\r\n \r\n # check to see if this winery is a match\r\n if reWinery.search(rec[fldWine]):\r\n # debugging\r\n if debug: print('fw:winery match found:', winery)\r\n # this is a match - set the variables\r\n return (winery, reWinery)\r\n\r\n # for loop ends without a match\r\n # did not find a matching winery in the for loop - clear values\r\n return (None, None)\r\n\r\n#########################################################################################\r\n# find the liquor tied to the rec, leveraging the winery\r\n# Global Variable Used: liquorLookup\r\n#\r\n# returns: (liquor, reLiquor)\r\n#\r\ndef findLiquor( rec, winery, fldWine, debug=False ):\r\n\r\n # go through the list of liquors (global variable), pulling out the tuple from the lookup\r\n for (liquor, reLiquor) in liquorLookup[winery]:\r\n # debugging\r\n if debug: print('fl:checking liquor:', liquor)\r\n\r\n # check to see if this liquor is a match\r\n if reLiquor.search(rec[fldWine]):\r\n # debugging\r\n if debug: print('fl:liquor match found:', liquor)\r\n # this is a match - set the variables\r\n return (liquor, reLiquor)\r\n\r\n # for loop ends without a match\r\n # did not find a matching liquor in the for loop - clear values\r\n return (None, None)\r\n\r\n#########################################################################################\r\n# find the grape tied to the rec by regex evaluation\r\n#\r\n# Global Variable Used: grapeLookup\r\n#\r\n# returns: (grape, reGrape)\r\n#\r\ndef findGrapeByRegex( rec, fldWine, debug=False ):\r\n\r\n # go through the list of liquors (global variable), pulling out the tuple from the lookup\r\n for (grape, reGrape) in grapeLookup:\r\n # debugging\r\n if debug: print('fgbr:grape:', grape)\r\n\r\n # check to see if this liquor is a match\r\n if grape is not None and reGrape.search(rec[fldWine]):\r\n # debugging\r\n if debug: print('fgbr:grape match found:', grape)\r\n # this is a match - set the variables\r\n return (grape, reGrape)\r\n\r\n # for loop ends without a match\r\n # did not find a matching grape in the for loop - clear values\r\n return (None, None)\r\n\r\n#########################################################################################\r\n# find a string in a field of a record using string match and \r\n# on match, return that it matched and the remainder of the string as an array\r\n#\r\n# returns: (findStr, other)\r\n#\r\ndef findStrInRecReturnOther( rec, fldWineDescr, findStr, debug=False ):\r\n # find where in the string this findStr is positioned\r\n matchLoc = rec[fldWineDescr].find(findStr)\r\n # if we found a location\r\n if matchLoc > -1:\r\n # then strip everthing to the left of the findStr value and then split this to create other attributes\r\n other = rec[fldWineDescr][matchLoc+len(findStr)+1:].split()\r\n \r\n # debugging\r\n if debug: print('fsirro:findStr matched:', findStr)\r\n if debug: print('fsirro:findStr other:', other)\r\n \r\n # return what we found\r\n return (findStr, other)\r\n \r\n #no match found - debugging\r\n if debug: print('fsirro:findStr did not match using:', findStr)\r\n # did not find a matching findStr - return that fact\r\n return (None, [])\r\n \r\n#########################################################################################\r\n# find the grape tied to the rec and the list of other attributes\r\n# to the right of the grape in that description\r\n#\r\n# Global Variable Used: grapeLookup\r\n#\r\n# returns: (grape, other)\r\n#\r\ndef findGrapeByStr( rec, fldWineDescr, debug=False ):\r\n # find the grape and strip everything right of that from the fldWineDescr field\r\n for (grape,reGrape) in grapeLookup:\r\n # debugging\r\n if debug: print('fg:grape:', grape)\r\n\r\n # find where in the string this grape is positioned\r\n (grape, other) = findStrInRecReturnOther( rec, fldWineDescr, grape, debug=debug)\r\n\r\n # if we have a match return that match\r\n if grape:\r\n return (grape, other)\r\n \r\n # did not find a matching grape - return that fact\r\n return (None, [])\r\n \r\n#########################################################################################\r\n# find the vintage tied to the rec\r\n#\r\n# Global Variable Used: vintageLookup\r\n#\r\n# returns: vintage\r\n#\r\ndef findVintage( rec, fldWine, debug=False ):\r\n # loop through the vintage lookup records\r\n for reVintage in vintageLookup:\r\n # search for match\r\n m = reVintage.search(rec[fldWine])\r\n # if there is a match\r\n if m:\r\n # extract the vlaue from the first regex group with a value\r\n if m.group(1):\r\n vintage = m.group(1)\r\n if debug: print('fv:vintage-match:', reVintage,':group1')\r\n elif m.group(2):\r\n vintage = m.group(2)\r\n if debug: print('fv:vintage-match:', reVintage,':group2')\r\n elif m.group(3):\r\n vintage = m.group(3)\r\n if debug: print('fv:vintage-match:', reVintage,':group3')\r\n else:\r\n vintage = m.group(4)\r\n if debug: print('fv:vintage-match:', reVintage,':group4')\r\n # return what we vound\r\n return vintage\r\n\r\n # did not find it\r\n return None\r\n \r\n#########################################################################################\r\n# Create the winery/grape-wine-liquour conversion table based on the\r\n# array of records passed in\r\n#\r\n# this routine takes the already read in list of definitions and parses them up\r\n# in order to create a winery-wine-attributes file - that will be used\r\n# later to take new records from searching the internet and properly assign\r\n# an aligned/consistent wine description to that wine string\r\n#\r\n# we expect the wines array to have attributes: fldWineDescr (winedescr), and fldWine (wine_name)\r\n#\r\n# returns: wgLookup - dictionary - which is built from parsing winedescr NOT wine_name\r\n#\r\n# wgLookup[winery][grape] = list of lists of attributes to perform lookups with\r\n#\r\ndef buildWineryGrapeLookup( wines, fldWineDescr='winedescr', fldWine='wine', debug=False ):\r\n\r\n # local variables\r\n wgLookup = {}\r\n lastWinery = None\r\n lastReWinery = None\r\n\r\n\r\n # step through the records read in\r\n for rec in wines:\r\n # debugging\r\n if debug: print('bwgl:new rec:', rec[fldWineDescr])\r\n\r\n # set the variable\r\n if not fldWineDescr in rec:\r\n print('creating-field:', fldWineDescr)\r\n rec[fldWineDescr] = ''\r\n \r\n # local loop variables\r\n winery = grape = wine = liquor = None\r\n other = []\r\n \r\n ### WINERY\r\n (lastWinery, lastReWinery) = (winery, reWinery) = findWinery( rec, lastWinery, lastReWinery, fldWine, debug=debug )\r\n \r\n # if we did not find the winery - skipt this record\r\n if not winery:\r\n # debugging\r\n if debug: print('bwgl:did not find winery-skipping:', rec[fldWine])\r\n # don't process this record - get the next record to process\r\n continue\r\n\r\n ### IGNOREGRAPE and NOGRAPE and LIQUOR\r\n\r\n # if this winery has a noGrapeLookup option - use that to split up the record\r\n if winery in ignoreGrapeLookup:\r\n ### BLANK WINE\r\n \r\n # don't get the grape for this winery\r\n # set wine to blank\r\n wine = ''\r\n # debugging\r\n if debug: print('bwgl:wine check ignoreGrapeLookup on winery:', winery)\r\n elif winery in noGrapeLookup:\r\n ### NO GRAPE WINE -- fldWineDescr\r\n \r\n # debugging\r\n if debug: print('bwgl:wine check noGrapeLookup on winery:', winery)\r\n \r\n # find which wine is a match from the noGrapeLookup\r\n wine = wineLookupByName( noGrapeLookup[winery], rec[fldWineDescr], [], 'noGrapeLookup', debug=debug )\r\n\r\n # not getting a match - we want to continue to have the wine as blank\r\n if False and wine == '':\r\n # debugging\r\n if debug: print('bwgl:nograpelookup:no-match:set wine to None')\r\n wine = None\r\n elif winery in liquorLookup:\r\n ### LIQUOR ---- fldWine\r\n # debugging\r\n if debug: print('bwgl:liquor check on winery:', winery)\r\n # see if a liquor matches\r\n (liquor, reLiquor) = findLiquor( rec, winery, fldWine, debug=debug )\r\n # if we found match - populate wine so we don't look for grape\r\n if liquor is not None:\r\n wine = liquor\r\n # debugging\r\n if debug: print('bwgl:liquor found and put in wine:', wine)\r\n\r\n \r\n ### GRAPE (if we have not filled in wine) --- fldWineDescr\r\n if wine is None:\r\n # debugging\r\n if debug: print('bwgl:grape check because wine is None')\r\n # determine if there is a grape in this string\r\n # if ther\r\n (grape,other) = findGrapeByStr( rec, fldWineDescr )\r\n # debugging\r\n if debug: print('bwgl:grape:', grape, ':other:', other)\r\n else:\r\n # debugging\r\n if debug: print('bwgl:grape check skipped - we have a wine')\r\n\r\n ### Skip this record if we don't have a wine or a grape\r\n if wine is None and grape is None:\r\n # debugging\r\n if debug: print('bwgl:record skipped - no grape or wine defined')\r\n continue\r\n\r\n ### OTHER (if not already created by grape lookup) ---- fldWineDescr\r\n #\r\n # if we did not find the grape in the string\r\n # so other was not populated\r\n # we need to look up other using 'winery' as the filter\r\n if grape is None:\r\n # debugging\r\n if debug: print('bwgl:build other from winery')\r\n # find where in the string this grape is positioned\r\n (wineryFind, other) = findStrInRecReturnOther( rec, fldWineDescr, winery, debug=debug)\r\n\r\n \r\n ### OTHER Additional Processing\r\n\r\n # remove CASE - the keyword case if it exists\r\n if 'case' in other:\r\n other.remove('case')\r\n # debugging\r\n if debug: print('bwgl:remove case from other')\r\n \r\n # remove VINTAGE and/or BOTTLESIZE and/or other QUALIFIERS\r\n # the last element will either be the vintage (no bottle size)\r\n # or will be the bottle size and then next is the vintage\r\n # if the last position is not vintage, attempt to remove the bottle size\r\n # then remove vintage - this should be the vintage (validated by isdigit lookup)\r\n if other:\r\n if debug: print('bwgl:looking at other for quals, bottlesize and vintage')\r\n # remove qualifiers if exist\r\n if not other[-1].isdigit():\r\n # first we check to see if there is a qualifier appended\r\n # we are not vintage as the position posiition - see if it is size\r\n for qual,reQual in reQualLookup:\r\n if qual == other[-1]:\r\n if debug: print('bwgl:remove qualifier from other:', qual)\r\n del other[-1]\r\n break\r\n \r\n # remove bottle size if exist\r\n if other and not other[-1].isdigit():\r\n # we are not vintage as the position posiition - see if it is size\r\n for size,reSize in sizeLookup:\r\n if size == other[-1]:\r\n if debug: print('bwgl:remove bottlesize from other:', size)\r\n del other[-1]\r\n break\r\n\r\n # remove vintage if it is there\r\n if other and other[-1].isdigit():\r\n # first check to see if this is part of the ignore grape solution\r\n if winery in ignoreGrapeLookup and ignoreGrapeLookup[winery]and other[-1] in ignoreGrapeLookup[winery]:\r\n if debug: print('bwgl:value is in ignoreLookupGrape - keeping it:', other[-1])\r\n else:\r\n # debugging\r\n if debug: print('bwgl:remove vintage from other:', other[-1])\r\n del other[-1]\r\n\r\n # remove WINE - the element if the element is the same as the wine\r\n if wine and wine in other:\r\n other.remove(wine)\r\n # debugging\r\n if debug: print('bwgl:remove wine from other:', wine)\r\n\r\n # debugging\r\n if debug:\r\n try:\r\n print('bwgl:Final-Build:', winery, ':', grape, ':', wine, ':', liquor, ':', other, ':', rec[fldWineDescr], ':', rec[fldWine])\r\n except Exception as e:\r\n print('debug error2-continuing:', str(e))\r\n print('fldWine:', fldWine)\r\n\r\n ### BUILD LOOKUP FOR CONVERSION (we use the grape attribute to build the dictionary)\r\n\r\n # move liquor value into grape because we did not find the\r\n if grape is None and wine is not None:\r\n grape = wine\r\n # debugging\r\n if debug: print('bwgl:set-grape-to-wine:', grape)\r\n \r\n\r\n ### WINERY:GRAPE-WINE-LIQOUR Dictionary creation\r\n\r\n # debugging\r\n if debug: print('bwgl:create wgLookup for winery:', winery, ':grape:', grape)\r\n \r\n # validate we have an entry for this winery in the lookup dict\r\n if winery not in wgLookup:\r\n # one does not create - so create a stub for winery:grape\r\n wgLookup[winery] = { grape : [] }\r\n else:\r\n # one DOES exist - check to see if the grape is already here\r\n if grape not in wgLookup[winery]:\r\n # grape is not here - so create an empty list to stuff values into\r\n wgLookup[winery][grape] = []\r\n\r\n # check to see if we have OTHER attributes\r\n # and if we do - check to see that this list of attributes\r\n # is not already in the wineLookup array\r\n # and if this list does not exist - then append this list\r\n if other and other not in wgLookup[winery][grape]:\r\n # add this list of other to this entry\r\n wgLookup[winery][grape].append(other)\r\n # debugging\r\n if debug: print('bwgl:appending to wgLookup:other:', other)\r\n \r\n # end loop on wines\r\n\r\n ### SORTED WINERY:GRAPE lookup - most optional attributes first in the list\r\n\r\n # debbuging\r\n if debug: print('bwgl:complete-read-of-master-file:sort wgLookup')\r\n\r\n # now sort the list of lookups from most specific (greatest number of attributes) to least\r\n for winery in wgLookup:\r\n for grape in wgLookup[winery]:\r\n wgLookup[winery][grape] = sorted(wgLookup[winery][grape], key=len, reverse=True)\r\n \r\n\r\n # debugging\r\n if debug:\r\n print('\\n'*5)\r\n print('START WGLOOKUP DUMPED')\r\n print('#'*80)\r\n if ppFlag:\r\n pp.pprint(wgLookup)\r\n else:\r\n print('bwgl:final-wgLookup:\\n', wgLookup)\r\n print('#'*80)\r\n\r\n \r\n # done with for loop - return the lookup\r\n return wgLookup\r\n\r\n#########################################################################################\r\n# find the matching set of additional attributes that match this record\r\n# from the global lookup.\r\n#\r\n# we assume that we have already tested that winery and value exist in wgLookup prior to calling this routine\r\n#\r\n# the special paramaters here are:\r\n# value - this is either \"wine\" or \"grape\" - this routine allows you to lookup on different attributes\r\n# valueDescr - passed in string for debugging telling us which value was passed in\r\n#\r\n# defaultorderlist = array of array of string - gives the default order of singlematch looks to determine which of\r\n# many matches is the one we will select\r\n#\r\n# Global Variable Used: wgLookup\r\n#\r\n# returns: valuematchset array selected\r\n#\r\ndef findAddAttribWgLookup( rec, winery, value, fldWine, AbbrLookup=[], defaultorderlist=None, valueDescr='', debug=False ):\r\n\r\n # local variable - capture all the entries that are single match entries\r\n singlematch=[]\r\n\r\n # debugging\r\n if debug:\r\n try:\r\n print('faawl:value:', valueDescr, ':match-wgLookup:', rec[fldWine], ':', wgLookup[winery][value])\r\n except Exception as e:\r\n print('debug error7-continuing:', str(e))\r\n print('fldWine:', fldWine)\r\n\r\n # for each set of values that could be a match\r\n for valuematchset in wgLookup[winery][value]:\r\n # debugging\r\n if debug: print('faawl:testing valuematchset:', valuematchset, ':length:', len(valuematchset))\r\n # set the flag to start\r\n allmatch = True\r\n # loop through the set of values that make up this set\r\n for valuematch in valuematchset:\r\n # for each entry - build a regex and test it and add it up\r\n # we need all values in this valueset to be true for this valueset to be match\r\n reMatch1 = re.compile(r'\\b'+valuematch+r'\\b', re.IGNORECASE)\r\n reMatch2 = re.compile(r'\\s'+valuematch+r'\\s', re.IGNORECASE)\r\n # check to see if this regex is a match\r\n m1 = reMatch1.search(rec[fldWine])\r\n m2 = reMatch2.search(rec[fldWine])\r\n if m1 or m2:\r\n # this regex is a match\r\n allmatch = True and allmatch\r\n elif valuematch in AbbrLookup:\r\n # this regex was not a match - but we want to check if the value also has\r\n # a translation - and if it has a translation - then we test the translation also\r\n # the value did not work but there is an alternate value to check\r\n # debugging\r\n if debug: print('faawl:valuematch-abbr:', valuematch, ':', wineAbbrLookup[valuematch])\r\n # create the regex\r\n reMatch = re.compile(wineAbbrLookup[valuematch], re.IGNORECASE)\r\n # test the regex and attach the results to allmatch\r\n allmatch = reMatch.search(rec[fldWine]) and allmatch\r\n else:\r\n # not a match - update allmatch\r\n allmatch = False and allmatch\r\n \r\n # debugging\r\n if debug: print('faawl:valuematch:', valuematch, ':allmatch:', allmatch)\r\n\r\n # check to see if all matched\r\n if allmatch:\r\n # all matched - so this is a match - so break out of the valuematchset group\r\n # debugging\r\n if debug: print('faawl:value matched:', valuematchset)\r\n # different action based on # of items being match\r\n if len(valuematchset) == 1:\r\n # debugging\r\n if debug: print('faawl:single-valuematch-set-added-to-singlematch:', valuematchset)\r\n # single value matching - we don't stop when we find a match\r\n singlematch.append(valuematchset)\r\n else:\r\n # debugging\r\n if debug: print('faawl:multivalue-valuematch-set-found:done')\r\n # multi value match so we are done when we find a match - so return\r\n return valuematchset\r\n\r\n # did not find matchset in the for loop - check to see if we have singlematch\r\n if not singlematch:\r\n # debugging\r\n if debug: print('faawl:exit with singlematch NOT populated return blank')\r\n # did not have singlematch found - we are done - return empty\r\n return []\r\n \r\n\r\n # singlematch populated\r\n # debugging\r\n if debug: print('faawl:exit with singlematch populated:', singlematch)\r\n # check to see how many matches we got\r\n if len(singlematch) == 1 or not defaultorderlist:\r\n # debugging\r\n if debug: print('faawl:return first entry in singlematch:', singlematch[0])\r\n # if there is only one entry in here\r\n # or we don't have a default order so we pick the first found\r\n # and we set the value to this\r\n return singlematch[0]\r\n\r\n # we need to define which of the singlematch values we will return\r\n # the defaultorderlist will be used to set that ordering\r\n #\r\n # create a local copy of the list that can be changed in this routine\r\n defaultorder = defaultorderlist[:]\r\n \r\n # multiple singlematch values so lets find and pick the best one\r\n # debugging\r\n if debug: print('faawl:multiple single match value-singlematch:', singlematch)\r\n\r\n\r\n # get the values from singlematch that are not in defaultorder\r\n # and put them at the start of defaultorder list\r\n # go in reverse order when doing this lookup\r\n for val in singlematch[::-1]:\r\n if val not in defaultorder:\r\n defaultorder.insert(0,val)\r\n \r\n ### HARDCODED ###\r\n # very short term fix - we need to prioritze these single tags (mondavi problem)\r\n if winery == 'Mondavi' and ['Tok'] in singlematch:\r\n if debug: print('faawl:Change from:', valuematchset, ':to Tok for mondavi')\r\n return ['Tok']\r\n\r\n # find the first matching value from priority order list\r\n for val in defaultorder:\r\n if val in singlematch:\r\n # debugging\r\n if debug: print('faawl:selected-singlematch-value:', val)\r\n # we found the first match - set it and break out\r\n return val\r\n\r\n # debugging\r\n if debug: print('faawl:valuematchset-empty')\r\n\r\n # did not match - return empty\r\n return []\r\n\r\n\r\n\r\n#########################################################################################\r\n# create a consistent wine name for a list or records with store based wine descriptions\r\n#\r\n# the special paramaters here are:\r\n# wgLookup - dictionary of winery, wine, list of wines\r\n# wines - list of records to be processed\r\n#\r\n# Global Variable Used: ignoreGrapeLookup, noGrapeLookup, wineAbbrLookup, liquorLookup\r\n# reCase, sizeLookup\r\n#\r\n# returns: [updated values in teh wines array]\r\n#\r\n#### Use the winery/grape-wine-liquour conversion table to define a wine description for the records\r\ndef setWineryDescrFromWineryGrapeLookup( wgLookup, wines, fldWineDescr = 'winedescr', fldWine = 'wine', fldWineDescrNew = 'winedescrnew', fldWineDescrMatch=False, debug=False ):\r\n\r\n if debug:\r\n print('\\n'*10,'START WINEDESCR SETTING HERE ---------------------------------------------')\r\n \r\n # step through all the records passed in\r\n for rec in wines:\r\n\r\n # local variables\r\n winery = grape = wine = vintage = case = size = liquor = nongrape = qual = None\r\n winematchset = grapematchset = []\r\n \r\n # debugging\r\n if debug:\r\n try:\r\n print('setWinery:fldWine:', rec[fldWine])\r\n except Exception as e:\r\n print('debug error2-continuing:', str(e))\r\n print('fldWine:', fldWine)\r\n \r\n # make the field if it does not exist\r\n if fldWineDescrNew not in rec:\r\n rec[fldWineDescrNew] = rec[fldWineDescr]\r\n \r\n ### WINERY\r\n (winery, reWinery) = findWinery( rec, None, None, fldWine, debug=debug )\r\n \r\n # validate the winery\r\n if winery is None:\r\n ### WINERY NONE - go to next record\r\n # debugging\r\n if debug: print('setWinery:winery not found-next record:' + rec[fldWine])\r\n # get the next record\r\n continue\r\n elif winery not in wgLookup:\r\n ### WINERY NOT IN LOOKUP\r\n # skip this record - nothing to process\r\n # debugging\r\n if debug: print('setWinery:winery not in wgLookup:', winery)\r\n continue\r\n\r\n ### GRAPE\r\n # find the grape that is this record\r\n (grape, reGrape) = findGrapeByRegex( rec, fldWine, debug=debug )\r\n\r\n # debugging\r\n if debug: print('setWinery:grape found:', grape)\r\n \r\n ### OVERRIDES\r\n if winery in ignoreGrapeLookup:\r\n ### IGNORE GRAPE\r\n \r\n # debugging\r\n if debug: print('setWinery:winery-match-ignoreGrape:clear-wine:set-grape-to-None:set-nongrape-True:winery:', winery)\r\n \r\n # clear wine and grape\r\n wine = ''\r\n\r\n # clear the grape field\r\n grape = None\r\n \r\n # set the liquor flag to control processing\r\n nongrape = True\r\n \r\n if winery in noGrapeLookup:\r\n ### NOGRAPE - WINE\r\n\r\n # debugging\r\n if debug: print('setWinery:noGrapeLookup wine check:', winery)\r\n\r\n # do the lookup and if a search is a match on None take appropriate action\r\n wine = wineLookupByName( noGrapeLookup[winery], rec[fldWine], [], 'noGrapeLookup', wineAbbrLookup, debug=debug )\r\n\r\n # debugging\r\n if debug: print('setWinery:nogrape check:wine:', wine)\r\n \r\n # test the value we got back\r\n if wine == '':\r\n # debugging\r\n if debug: print('setWinery:noGrapeLookup:matched:None::clear grape:set nongrape to True')\r\n # the lookup match None - so we want to ignore any grape found and we blank out the wine\r\n grape = None\r\n wine = ''\r\n nongrape = True\r\n elif wine:\r\n # matched a wine - so clear the grape value\r\n grape = None\r\n # debugging\r\n if debug: print('setWinery:nograpeLookup:wine found - clear grape field') \r\n\r\n if wine is None and winery in liquorLookup:\r\n ### LIQUOR\r\n # debugging\r\n if debug: print('setWinery:liqourLookup:', winery)\r\n\r\n (liquor, reLiquor) = findLiquor( rec, winery, fldWine, debug=debug)\r\n # if we found something update wine to be what we found\r\n if liquor is not None:\r\n wine = liquor\r\n # debugging\r\n if debug: print('setWinery:liquorLookup-match:', liquor)\r\n\r\n if not grape and not nongrape and not wine and liquor is None:\r\n # NO GRAPE - and not connected to noGrapeLookup or liquorLookkup\r\n # get the next record\r\n # debugging\r\n if debug: print('setWinery:did not find grape-skipping record:', rec[fldWineDescr])\r\n continue\r\n\r\n # debugging\r\n if debug: print('setWinery:pre-vintage found values for wine/liquor:', wine, ':grape:', grape)\r\n \r\n ### VINTAGE\r\n vintage = findVintage( rec, fldWine, debug=debug )\r\n\r\n # debugging\r\n if debug: print('setWinery:vintage:', vintage)\r\n \r\n ### CASE information\r\n if reCase.search(rec[fldWine]):\r\n case = 'case'\r\n \r\n ### BOTTLE SIZE - get the size information\r\n for (size, reSize) in sizeLookup:\r\n # debugging\r\n if debug: print('setWinery:sizeLookup:',size)\r\n if reSize.search(rec[fldWine]) and not reShipsAs.search(rec[fldWine]):\r\n # debugging\r\n if debug: print('setWinery:sizeLookup:matched:',reSize)\r\n break\r\n else:\r\n size = None\r\n if debug: print('setWinery:sizeLookup:None-found')\r\n\r\n ### QUAL for this wine\r\n qual = findQualifier(rec[fldWine], debug=debug)\r\n\r\n # debugging\r\n if debug:\r\n try:\r\n print('setWinery:FinalAttributes:', winery, ':', grape, ':', wine, ':', liquor, ':', vintage, ':', case, ':', size, ':', qual, ':', rec[fldWine])\r\n except Exception as e:\r\n print('debug error5-continuing:', str(e))\r\n print('fldWine:', fldWine)\r\n \r\n\r\n ### WINE - ADDITIONAL INFORMATION\r\n if liquor is not None:\r\n # debugging\r\n if debug: print('setWinery:liquor flag set - no additional data needs to be collected')\r\n elif wine is not None:\r\n\r\n # debugging\r\n if debug: print('setWinery:wine is not None - do additional lookups:wine:', wine) \r\n \r\n # we found a wine / liquor - so see if there are additional attributes\r\n if wine in wgLookup[winery] and wgLookup[winery][wine]:\r\n # debugging\r\n if debug: print('setWinery:lookup winematchset')\r\n # there is one or more additional lookups for this winery/wine\r\n winematchset = findAddAttribWgLookup( rec, winery, wine, fldWine, wineAbbrLookup, None, valueDescr='wine', debug=debug )\r\n else:\r\n # wine not in wgLookup so thing to work\r\n print('setWinery:unable to perform wgLookup on winery:', winery, ':wine:', wine, ':rec-wine:', rec[fldWine])\r\n # debugging\r\n if debug:\r\n try:\r\n print('wgLookup[winery]:', wgLookup[winery])\r\n except Exception as e:\r\n print('debug error3-continuing:', str(e))\r\n print('winery:', winery)\r\n \r\n # debugging - wine is not None - what is the final winematchset\r\n if debug: print('setWinery:winematchset:', winematchset)\r\n elif grape is not None:\r\n # debugging\r\n if debug: print('setWinery:grape is not None - do additional lookups:', grape)\r\n\r\n # grape was returned (not wine) so do the lookup on grape\r\n if grape in wgLookup[winery] and wgLookup[winery][grape]:\r\n # see if we can create a match based on attributes and the grape\r\n grapematchset = findAddAttribWgLookup( rec, winery, grape, fldWine, wineAbbrLookup, defaultorderlist, valueDescr='grape', debug=debug )\r\n\r\n elif grape in wgLookup[winery]:\r\n # do nothing this is a empty set\r\n if debug: print('setWinery:grape match: matching record set is blank - no action required')\r\n else:\r\n # wine not in wgLookup so thing to work\r\n # debugging\r\n print('setWinery:grape NONMATCH:', rec[fldWine])\r\n if debug: print('setWinery:liquor:', liquor, ':wine:', wine, ':grape:', grape, ':wgLookup[winery]:', wgLookup[winery])\r\n\r\n # debugging - wine is not None - what is the final grapematchset\r\n if debug: print('setWinery:grapematchset:', grapematchset)\r\n\r\n ### check the matchsets we got back - if any of them look like vintage values\r\n ### remove them from the string and look at up vintage again\r\n if vintage:\r\n newVintageLookupWine = rec[fldWine]\r\n for matchvalue in winematchset:\r\n if vintage in matchvalue:\r\n newVintageLookupWine = newVintageLookupWine.replace(matchvalue,'')\r\n if debug: print('setWinery:2nd-vintage:winematchset:wine-name-removal:', matchvalue)\r\n for matchvalue in grapematchset:\r\n if vintage in matchvalue:\r\n newVintageLookupWine = newVintageLookupWine.replace(matchvalue,'')\r\n if debug: print('setWinery:2nd-vintage:grapematchset:wine-name-removal:', matchvalue)\r\n if newVintageLookupWine != rec[fldWine]:\r\n if debug: print('setWinery:2nd-vintage:newVintageLookupWine:', newVintageLookupWine)\r\n newVintage = findVintage( { fldWine : newVintageLookupWine}, fldWine, debug=debug )\r\n if debug: print('setWinery:2nd-vintage:newVintage:', newVintage)\r\n vintage = newVintage\r\n\r\n ### FINAL WINEDESCR\r\n\r\n # create initial value\r\n wineDescr = ''\r\n\r\n\r\n # if winery starts with a z then we don't have a vintage\r\n if winery.startswith('z'):\r\n vintage = None\r\n # debugging\r\n if debug: print('setWinery:winery starts with z: clear vintage')\r\n\r\n # quick test - does the wine and the winematchset the same\r\n if winematchset and ' '.join(winematchset) in wine:\r\n #debugging\r\n if debug: print('setWinery:clearing-winematchset:', winematchset,':is-in-wine:', wine)\r\n winematchset = []\r\n if grapematchset and ' '.join(grapematchset) in grape:\r\n #TODO - work around for single letter matches\r\n if not (len(grapematchset)==1 and len(grapematchset[0])==1):\r\n #debugging\r\n if debug: print('setWinery:clearing-grapematchset:',grapematchset,':is-in-grape:', grape)\r\n grapematchset = []\r\n if grapematchset and size and size in ' '.join(grapematchset):\r\n size = ''\r\n if winematchset and size and size in ' '.join(winematchset):\r\n size = ''\r\n\r\n if debug:\r\n print('setWinery:vallist1:', [winery, grape, wine] + grapematchset + winematchset + [vintage, size, qual, case])\r\n print('setWinery:vallist2:', [winery, grape, wine, *grapematchset, *winematchset, vintage, size, qual, case])\r\n \r\n # create a list\r\n wdList= []\r\n # step through the values\r\n for val in [winery, grape, wine] + grapematchset + winematchset + [vintage, size, qual, case]:\r\n # and if there is a value add to the list - otherwise skip\r\n if val: wdList.append(val)\r\n\r\n # build the wine description by joining all these values together\r\n wineDescr = ' '.join(wdList)\r\n\r\n # debugging\r\n if False:\r\n if debug: print('setWinery:wdList:', wdList)\r\n if debug: print('setWinery:wineDescr:', wineDescr)\r\n \r\n # debugging\r\n if debug:\r\n try:\r\n print(':'.join(['setWinery:wineDescrList', wineDescr, rec[fldWineDescr], str(wineDescr==rec[fldWineDescr]), rec[fldWine]]) )\r\n except Exception as e:\r\n print('debug error6-continuing:', str(e))\r\n print('fldWine:', fldWine)\r\n\r\n # fill thew new value into the array\r\n rec[fldWineDescrNew] = wineDescr\r\n\r\n # fill in the matching field\r\n if fldWineDescrMatch:\r\n rec[fldWineDescrMatch] = (rec[fldWineDescr] == rec[fldWineDescrNew])\r\n \r\n\r\n#########################################################################################\r\n# set any digit only field to the word passed \r\ndef setDigitFld2Value( wines, fld, value, debug=False ):\r\n for rec in wines:\r\n if rec[fld].isdigit():\r\n rec[fld] = value\r\n\r\n#########################################################################################\r\n# validate the field settings match the file we read in for update\r\ndef updateFileOptionDictCheck( optiondict, wines, header, debug=False ):\r\n # check to see if the description field is in the file we read in\r\n if optiondict['fldWineDescr'] not in wines[0]:\r\n if debug: print('updateFileOptionDictCheck:fldWineDescr NOT in file read in:', optiondict['fldWineDescr'])\r\n # field needed is not in the record - see if we know what to do\r\n if 'cnt' in wines[0]:\r\n # the cnt field is in the file - so set to that structure\r\n # we will put the updated values into the 'cnt' field\r\n print('setting values fldWineDescr and fldWineDescrNew to: cnt')\r\n # change the field we are updating\r\n optiondict['fldWineDescr'] = optiondict['fldWineDescrNew'] = 'cnt'\r\n elif 'winedescr' in wines[0]:\r\n # the WineDescr field is in the file - so set to that structure\r\n print('setting values fldWineDescr to winedescr and fldWineDescrNew to winedescrnew')\r\n # change the field we are updating\r\n optiondict['fldWineDescr'] = 'winedescr'\r\n optiondict['fldWineDescrNew'] = 'winedescrnew'\r\n else:\r\n # no idea - we need to error out\r\n print('could not find fldWineDescr in wines[0]-aborting:', optiondict['fldWineDescr'], '\\nwines[0]:', wines[0])\r\n # force the error\r\n error = wines[0][optiondict['fldWineDescr']]\r\n\r\n # determine if we should create the match column (may want ot remove this section later)\r\n # removed this logic - require the person to set this field - we will not set it for them.\r\n if False and optiondict['fldWineDescr'] == 'winedescr':\r\n # we are using the file format that is the xref file\r\n # so check to see if we have match enabled\r\n if not optiondict['fldWineDescrMatch']:\r\n # create the default value\r\n optiondict['fldWineDescrMatch'] = 'same'\r\n # provide message\r\n print('setting value fldWineDescrMatch to: same')\r\n\r\n # check to see if the input file is the same as the output file\r\n if optiondict['csvfile_update_in'] == optiondict['csvfile_update_out']:\r\n # they are the same file (in and out) - so we need to move the input file to a backup location\r\n (file_path, base_filename, file_ext) = kvutil.filename_split(optiondict['csvfile_update_in'])\r\n # create the new filename\r\n backupfile = kvutil.filename_proper( base_filename + optiondict['backupfile_ext'], file_path )\r\n # messaging\r\n print('copying ', optiondict['csvfile_update_in'], ' to ', backupfile)\r\n # copy the input file to the backup filename\r\n shutil.copyfile(optiondict['csvfile_update_in'], backupfile)\r\n\r\n # set the output keys we are going to assign\r\n if optiondict['fldWineDescrNew'] == 'cnt':\r\n # output matches the original ref file format with the \"cnt\" field\r\n optiondict['csvdictkeys'] = ['cnt','date','search','store','wine','winesrt']\r\n elif optiondict['fldWineDescrMatch']:\r\n # output is a modified xref format so you can look at old and new definitions\r\n# optiondict['csvdictkeys'] = [optiondict['fldWineDescr'],optiondict['fldWineDescrNew'],optiondict['fldWineDescrMatch'], 'date','search','company','wine','winesrt']\r\n optiondict['csvdictkeys'] = [optiondict['fldWineDescr'],optiondict['fldWineDescrNew'],optiondict['fldWineDescrMatch'], *header]\r\n else:\r\n # copy over the read in format\r\n optiondict['csvdictkeys'] = [optiondict['fldWineDescrNew']] + header[1:]\r\n # output matches expected input - should really change this to be the format of the read in file\r\n #optiondict['csvdictkeys'] = [optiondict['fldWineDescrNew'], 'date','search','company','wine','winesrt']\r\n\r\n print('updateFileOptionDictCheck:set csvdictkeys to:',optiondict['csvdictkeys'])\r\n\r\n \r\n# ---------------------------------------------------------------------------\r\nif __name__ == '__main__':\r\n\r\n # capture the command line\r\n optiondict = kvutil.kv_parse_command_line( optiondictconfig, debug=False )\r\n\r\n # set the global debug flag\r\n ppFlag = optiondict['pprint']\r\n\r\n # set master fields\r\n setOptionDictMasterFldValues( optiondict, debug=False )\r\n \r\n ### global variable checks ###\r\n if optiondict['setup_check']:\r\n print('Running global variable check')\r\n globalVariableCheck( debug = optiondict['debug'] )\r\n sys.exit()\r\n \r\n # messaging\r\n print('reading in master file:', optiondict['csvfile_master_in'])\r\n\r\n # read in the MASTER FILE INPUT file\r\n wines,header = kvcsv.readcsv2list_with_header(optiondict['csvfile_master_in'], headerlc=True)\r\n\r\n # build the wine lookup dictionary\r\n wgLookup = buildWineryGrapeLookup( wines, optiondict['fldWineDescrMaster'], optiondict['fldWineMaster'], debug=optiondict['debug'] )\r\n\r\n # read in the UPDATE FILE INPUT file - if not updating the master file\r\n if optiondict['csvfile_master_in'] != optiondict['csvfile_update_in']:\r\n # messaging\r\n print('reading in update file:', optiondict['csvfile_update_in'])\r\n # read in the INPUT file\r\n wines,header = kvcsv.readcsv2list_with_header(optiondict['csvfile_update_in'], headerlc=True)\r\n # check to see if we read in any records and if not just return\r\n if not wines:\r\n print('wineset.py - no records read in - no work to be done - exitting')\r\n sys.exit()\r\n \r\n\r\n # test to see if we should set the fields based on what we just read in\r\n updateFileOptionDictCheck( optiondict, wines, header, debug=optiondict['debug'] )\r\n\r\n\r\n # do the assignment of wines to records\r\n setWineryDescrFromWineryGrapeLookup( wgLookup, wines, optiondict['fldWineDescr'], optiondict['fldWine'], optiondict['fldWineDescrNew'], optiondict['fldWineDescrMatch'], debug=optiondict['debug'] )\r\n\r\n # if enabled - set all unassigned new descriptions the default value\r\n if optiondict['defaultnew'] is not None:\r\n # message\r\n print('Setting ', optiondict['fldWineDescrNew'], ' to ', optiondict['defaultnew'], 'if not set')\r\n # do the work\r\n setDigitFld2Value( wines, optiondict['fldWineDescrNew'], optiondict['defaultnew'], debug=optiondict['debug'] )\r\n\r\n # save the output to the file of interest\r\n kvcsv.writelist2csv( optiondict['csvfile_update_out'], wines, optiondict['csvdictkeys'] )\r\n\r\n # messaging\r\n print('Saved results to:', optiondict['csvfile_update_out'])\r\n\r\n",
"step-ids": [
7,
13,
15,
18,
19
]
}
|
[
7,
13,
15,
18,
19
] |
# -*- coding:utf-8 -*-
# pylint: disable=line-too-long
_BASE_REPRESENTATIONS = [
"Primitive(field='f1', op='eq', value='value')",
"Primitive(field='f1', op='eq', value=42)",
"Primitive(field='f1', op='eq', value=3.14)",
"Primitive(field='f1', op='eq', value=True)",
"Condition(op=Operator.OR, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5)])",
"Condition(op=Operator.OR, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5), Primitive(field='f1', op='eq', value='bbb')])",
"Condition(op=Operator.AND, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5)])",
"Condition(op=Operator.AND, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5), Primitive(field='f1', op='eq', value='bbb')])",
"Condition(op=Operator.OR, values=[Condition(op=Operator.AND, values=[Primitive(field='f1', op='eq', value=50), Primitive(field='f2', op='eq', value='aaa')]), Primitive(field='f2', op='eq', value='bbb')])",
]
REPRESENTATIONS = _BASE_REPRESENTATIONS + [
val.replace(
"field='f1', op='eq", "field='f1', op='gt"
).replace(
"field='f2', op='eq'", "field='f2', op='match'"
)
for val in _BASE_REPRESENTATIONS
]
|
normal
|
{
"blob_id": "137842d50355563b2df6c2fc48864c01a22afa80",
"index": 5567,
"step-1": "<mask token>\n",
"step-2": "_BASE_REPRESENTATIONS = [\"Primitive(field='f1', op='eq', value='value')\",\n \"Primitive(field='f1', op='eq', value=42)\",\n \"Primitive(field='f1', op='eq', value=3.14)\",\n \"Primitive(field='f1', op='eq', value=True)\",\n \"Condition(op=Operator.OR, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5)])\"\n ,\n \"Condition(op=Operator.OR, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5), Primitive(field='f1', op='eq', value='bbb')])\"\n ,\n \"Condition(op=Operator.AND, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5)])\"\n ,\n \"Condition(op=Operator.AND, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5), Primitive(field='f1', op='eq', value='bbb')])\"\n ,\n \"Condition(op=Operator.OR, values=[Condition(op=Operator.AND, values=[Primitive(field='f1', op='eq', value=50), Primitive(field='f2', op='eq', value='aaa')]), Primitive(field='f2', op='eq', value='bbb')])\"\n ]\nREPRESENTATIONS = _BASE_REPRESENTATIONS + [val.replace(\"field='f1', op='eq\",\n \"field='f1', op='gt\").replace(\"field='f2', op='eq'\",\n \"field='f2', op='match'\") for val in _BASE_REPRESENTATIONS]\n",
"step-3": "# -*- coding:utf-8 -*-\n# pylint: disable=line-too-long\n\n_BASE_REPRESENTATIONS = [\n \"Primitive(field='f1', op='eq', value='value')\",\n \"Primitive(field='f1', op='eq', value=42)\",\n \"Primitive(field='f1', op='eq', value=3.14)\",\n \"Primitive(field='f1', op='eq', value=True)\",\n \"Condition(op=Operator.OR, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5)])\",\n \"Condition(op=Operator.OR, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5), Primitive(field='f1', op='eq', value='bbb')])\",\n \"Condition(op=Operator.AND, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5)])\",\n \"Condition(op=Operator.AND, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5), Primitive(field='f1', op='eq', value='bbb')])\",\n \"Condition(op=Operator.OR, values=[Condition(op=Operator.AND, values=[Primitive(field='f1', op='eq', value=50), Primitive(field='f2', op='eq', value='aaa')]), Primitive(field='f2', op='eq', value='bbb')])\",\n]\n\nREPRESENTATIONS = _BASE_REPRESENTATIONS + [\n val.replace(\n \"field='f1', op='eq\", \"field='f1', op='gt\"\n ).replace(\n \"field='f2', op='eq'\", \"field='f2', op='match'\"\n )\n for val in _BASE_REPRESENTATIONS\n]\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class YieldPeriodicCallback(object):
<|reserved_special_token_0|>
def __init__(self, callback, callback_time, io_loop=None, faststart=False):
"""Init method it can be used like tornado periodic callback, but it has
extra paramtetr
:param faststart: if true callback will be run after application start
"""
self.callback = callback
if callback_time <= 0:
raise ValueError(
'Periodic callback must have a positive callback_time')
self.callback_time = callback_time
self.io_loop = io_loop or tornado.ioloop.IOLoop.current()
self._running = False
self._timeout = None
if faststart:
self._running = True
self._next_timeout = self.io_loop.time()
self._timeout = self.io_loop.add_timeout(self._next_timeout,
self._run)
def start(self):
"""Starts the timer"""
if self._running:
return
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()
def stop(self):
"""Stops the timer"""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
<|reserved_special_token_0|>
def _schedule_next(self):
"""Schedule next callback method"""
if self._running:
current_time = self.io_loop.time()
while self._next_timeout <= current_time:
self._next_timeout += self.callback_time / 1000.0
self._timeout = self.io_loop.add_timeout(self._next_timeout,
self._run)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class YieldPeriodicCallback(object):
<|reserved_special_token_0|>
def __init__(self, callback, callback_time, io_loop=None, faststart=False):
"""Init method it can be used like tornado periodic callback, but it has
extra paramtetr
:param faststart: if true callback will be run after application start
"""
self.callback = callback
if callback_time <= 0:
raise ValueError(
'Periodic callback must have a positive callback_time')
self.callback_time = callback_time
self.io_loop = io_loop or tornado.ioloop.IOLoop.current()
self._running = False
self._timeout = None
if faststart:
self._running = True
self._next_timeout = self.io_loop.time()
self._timeout = self.io_loop.add_timeout(self._next_timeout,
self._run)
def start(self):
"""Starts the timer"""
if self._running:
return
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()
def stop(self):
"""Stops the timer"""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
@tornado.gen.coroutine
def _run(self):
"""Run the run method and schedule next time"""
if not self._running:
return
try:
yield self.callback()
except Exception:
logging.error('Error in periodic callback', exc_info=True)
self._schedule_next()
def _schedule_next(self):
"""Schedule next callback method"""
if self._running:
current_time = self.io_loop.time()
while self._next_timeout <= current_time:
self._next_timeout += self.callback_time / 1000.0
self._timeout = self.io_loop.add_timeout(self._next_timeout,
self._run)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class YieldPeriodicCallback(object):
"""Class for better periodic call"""
def __init__(self, callback, callback_time, io_loop=None, faststart=False):
"""Init method it can be used like tornado periodic callback, but it has
extra paramtetr
:param faststart: if true callback will be run after application start
"""
self.callback = callback
if callback_time <= 0:
raise ValueError(
'Periodic callback must have a positive callback_time')
self.callback_time = callback_time
self.io_loop = io_loop or tornado.ioloop.IOLoop.current()
self._running = False
self._timeout = None
if faststart:
self._running = True
self._next_timeout = self.io_loop.time()
self._timeout = self.io_loop.add_timeout(self._next_timeout,
self._run)
def start(self):
"""Starts the timer"""
if self._running:
return
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()
def stop(self):
"""Stops the timer"""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
@tornado.gen.coroutine
def _run(self):
"""Run the run method and schedule next time"""
if not self._running:
return
try:
yield self.callback()
except Exception:
logging.error('Error in periodic callback', exc_info=True)
self._schedule_next()
def _schedule_next(self):
"""Schedule next callback method"""
if self._running:
current_time = self.io_loop.time()
while self._next_timeout <= current_time:
self._next_timeout += self.callback_time / 1000.0
self._timeout = self.io_loop.add_timeout(self._next_timeout,
self._run)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import tornado
import tornado.gen
import logging
class YieldPeriodicCallback(object):
"""Class for better periodic call"""
def __init__(self, callback, callback_time, io_loop=None, faststart=False):
"""Init method it can be used like tornado periodic callback, but it has
extra paramtetr
:param faststart: if true callback will be run after application start
"""
self.callback = callback
if callback_time <= 0:
raise ValueError(
'Periodic callback must have a positive callback_time')
self.callback_time = callback_time
self.io_loop = io_loop or tornado.ioloop.IOLoop.current()
self._running = False
self._timeout = None
if faststart:
self._running = True
self._next_timeout = self.io_loop.time()
self._timeout = self.io_loop.add_timeout(self._next_timeout,
self._run)
def start(self):
"""Starts the timer"""
if self._running:
return
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()
def stop(self):
"""Stops the timer"""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
@tornado.gen.coroutine
def _run(self):
"""Run the run method and schedule next time"""
if not self._running:
return
try:
yield self.callback()
except Exception:
logging.error('Error in periodic callback', exc_info=True)
self._schedule_next()
def _schedule_next(self):
"""Schedule next callback method"""
if self._running:
current_time = self.io_loop.time()
while self._next_timeout <= current_time:
self._next_timeout += self.callback_time / 1000.0
self._timeout = self.io_loop.add_timeout(self._next_timeout,
self._run)
<|reserved_special_token_1|>
"""Class for better periodic call handling"""
import tornado
import tornado.gen
import logging
class YieldPeriodicCallback(object):
"""Class for better periodic call"""
def __init__(self, callback, callback_time, io_loop=None, faststart=False):
"""Init method it can be used like tornado periodic callback, but it has
extra paramtetr
:param faststart: if true callback will be run after application start
"""
self.callback = callback
if callback_time <= 0:
raise ValueError("Periodic callback must have a positive callback_time")
self.callback_time = callback_time
self.io_loop = io_loop or tornado.ioloop.IOLoop.current()
self._running = False
self._timeout = None
if faststart:
self._running = True
self._next_timeout = self.io_loop.time()
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
def start(self):
"""Starts the timer"""
if self._running:
return
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()
def stop(self):
"""Stops the timer"""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
@tornado.gen.coroutine
def _run(self):
"""Run the run method and schedule next time"""
if not self._running:
return
try:
yield self.callback()
except Exception: # pylint: disable=W0703
logging.error("Error in periodic callback", exc_info=True)
self._schedule_next()
def _schedule_next(self):
"""Schedule next callback method"""
if self._running:
current_time = self.io_loop.time()
while self._next_timeout <= current_time:
self._next_timeout += self.callback_time / 1000.0
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
|
flexible
|
{
"blob_id": "7726f8cc9adf15823cccdaa4ba316800bb134460",
"index": 1920,
"step-1": "<mask token>\n\n\nclass YieldPeriodicCallback(object):\n <mask token>\n\n def __init__(self, callback, callback_time, io_loop=None, faststart=False):\n \"\"\"Init method it can be used like tornado periodic callback, but it has\n extra paramtetr\n :param faststart: if true callback will be run after application start\n \"\"\"\n self.callback = callback\n if callback_time <= 0:\n raise ValueError(\n 'Periodic callback must have a positive callback_time')\n self.callback_time = callback_time\n self.io_loop = io_loop or tornado.ioloop.IOLoop.current()\n self._running = False\n self._timeout = None\n if faststart:\n self._running = True\n self._next_timeout = self.io_loop.time()\n self._timeout = self.io_loop.add_timeout(self._next_timeout,\n self._run)\n\n def start(self):\n \"\"\"Starts the timer\"\"\"\n if self._running:\n return\n self._running = True\n self._next_timeout = self.io_loop.time()\n self._schedule_next()\n\n def stop(self):\n \"\"\"Stops the timer\"\"\"\n self._running = False\n if self._timeout is not None:\n self.io_loop.remove_timeout(self._timeout)\n self._timeout = None\n <mask token>\n\n def _schedule_next(self):\n \"\"\"Schedule next callback method\"\"\"\n if self._running:\n current_time = self.io_loop.time()\n while self._next_timeout <= current_time:\n self._next_timeout += self.callback_time / 1000.0\n self._timeout = self.io_loop.add_timeout(self._next_timeout,\n self._run)\n",
"step-2": "<mask token>\n\n\nclass YieldPeriodicCallback(object):\n <mask token>\n\n def __init__(self, callback, callback_time, io_loop=None, faststart=False):\n \"\"\"Init method it can be used like tornado periodic callback, but it has\n extra paramtetr\n :param faststart: if true callback will be run after application start\n \"\"\"\n self.callback = callback\n if callback_time <= 0:\n raise ValueError(\n 'Periodic callback must have a positive callback_time')\n self.callback_time = callback_time\n self.io_loop = io_loop or tornado.ioloop.IOLoop.current()\n self._running = False\n self._timeout = None\n if faststart:\n self._running = True\n self._next_timeout = self.io_loop.time()\n self._timeout = self.io_loop.add_timeout(self._next_timeout,\n self._run)\n\n def start(self):\n \"\"\"Starts the timer\"\"\"\n if self._running:\n return\n self._running = True\n self._next_timeout = self.io_loop.time()\n self._schedule_next()\n\n def stop(self):\n \"\"\"Stops the timer\"\"\"\n self._running = False\n if self._timeout is not None:\n self.io_loop.remove_timeout(self._timeout)\n self._timeout = None\n\n @tornado.gen.coroutine\n def _run(self):\n \"\"\"Run the run method and schedule next time\"\"\"\n if not self._running:\n return\n try:\n yield self.callback()\n except Exception:\n logging.error('Error in periodic callback', exc_info=True)\n self._schedule_next()\n\n def _schedule_next(self):\n \"\"\"Schedule next callback method\"\"\"\n if self._running:\n current_time = self.io_loop.time()\n while self._next_timeout <= current_time:\n self._next_timeout += self.callback_time / 1000.0\n self._timeout = self.io_loop.add_timeout(self._next_timeout,\n self._run)\n",
"step-3": "<mask token>\n\n\nclass YieldPeriodicCallback(object):\n \"\"\"Class for better periodic call\"\"\"\n\n def __init__(self, callback, callback_time, io_loop=None, faststart=False):\n \"\"\"Init method it can be used like tornado periodic callback, but it has\n extra paramtetr\n :param faststart: if true callback will be run after application start\n \"\"\"\n self.callback = callback\n if callback_time <= 0:\n raise ValueError(\n 'Periodic callback must have a positive callback_time')\n self.callback_time = callback_time\n self.io_loop = io_loop or tornado.ioloop.IOLoop.current()\n self._running = False\n self._timeout = None\n if faststart:\n self._running = True\n self._next_timeout = self.io_loop.time()\n self._timeout = self.io_loop.add_timeout(self._next_timeout,\n self._run)\n\n def start(self):\n \"\"\"Starts the timer\"\"\"\n if self._running:\n return\n self._running = True\n self._next_timeout = self.io_loop.time()\n self._schedule_next()\n\n def stop(self):\n \"\"\"Stops the timer\"\"\"\n self._running = False\n if self._timeout is not None:\n self.io_loop.remove_timeout(self._timeout)\n self._timeout = None\n\n @tornado.gen.coroutine\n def _run(self):\n \"\"\"Run the run method and schedule next time\"\"\"\n if not self._running:\n return\n try:\n yield self.callback()\n except Exception:\n logging.error('Error in periodic callback', exc_info=True)\n self._schedule_next()\n\n def _schedule_next(self):\n \"\"\"Schedule next callback method\"\"\"\n if self._running:\n current_time = self.io_loop.time()\n while self._next_timeout <= current_time:\n self._next_timeout += self.callback_time / 1000.0\n self._timeout = self.io_loop.add_timeout(self._next_timeout,\n self._run)\n",
"step-4": "<mask token>\nimport tornado\nimport tornado.gen\nimport logging\n\n\nclass YieldPeriodicCallback(object):\n \"\"\"Class for better periodic call\"\"\"\n\n def __init__(self, callback, callback_time, io_loop=None, faststart=False):\n \"\"\"Init method it can be used like tornado periodic callback, but it has\n extra paramtetr\n :param faststart: if true callback will be run after application start\n \"\"\"\n self.callback = callback\n if callback_time <= 0:\n raise ValueError(\n 'Periodic callback must have a positive callback_time')\n self.callback_time = callback_time\n self.io_loop = io_loop or tornado.ioloop.IOLoop.current()\n self._running = False\n self._timeout = None\n if faststart:\n self._running = True\n self._next_timeout = self.io_loop.time()\n self._timeout = self.io_loop.add_timeout(self._next_timeout,\n self._run)\n\n def start(self):\n \"\"\"Starts the timer\"\"\"\n if self._running:\n return\n self._running = True\n self._next_timeout = self.io_loop.time()\n self._schedule_next()\n\n def stop(self):\n \"\"\"Stops the timer\"\"\"\n self._running = False\n if self._timeout is not None:\n self.io_loop.remove_timeout(self._timeout)\n self._timeout = None\n\n @tornado.gen.coroutine\n def _run(self):\n \"\"\"Run the run method and schedule next time\"\"\"\n if not self._running:\n return\n try:\n yield self.callback()\n except Exception:\n logging.error('Error in periodic callback', exc_info=True)\n self._schedule_next()\n\n def _schedule_next(self):\n \"\"\"Schedule next callback method\"\"\"\n if self._running:\n current_time = self.io_loop.time()\n while self._next_timeout <= current_time:\n self._next_timeout += self.callback_time / 1000.0\n self._timeout = self.io_loop.add_timeout(self._next_timeout,\n self._run)\n",
"step-5": "\"\"\"Class for better periodic call handling\"\"\"\nimport tornado\nimport tornado.gen\nimport logging\n\nclass YieldPeriodicCallback(object):\n \"\"\"Class for better periodic call\"\"\"\n\n def __init__(self, callback, callback_time, io_loop=None, faststart=False):\n \"\"\"Init method it can be used like tornado periodic callback, but it has\n extra paramtetr\n :param faststart: if true callback will be run after application start\n \"\"\"\n self.callback = callback\n if callback_time <= 0:\n raise ValueError(\"Periodic callback must have a positive callback_time\")\n self.callback_time = callback_time\n self.io_loop = io_loop or tornado.ioloop.IOLoop.current()\n self._running = False\n self._timeout = None\n\n if faststart:\n self._running = True\n self._next_timeout = self.io_loop.time()\n self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)\n\n def start(self):\n \"\"\"Starts the timer\"\"\"\n if self._running:\n return\n self._running = True\n self._next_timeout = self.io_loop.time()\n self._schedule_next()\n\n def stop(self):\n \"\"\"Stops the timer\"\"\"\n self._running = False\n if self._timeout is not None:\n self.io_loop.remove_timeout(self._timeout)\n self._timeout = None\n\n @tornado.gen.coroutine\n def _run(self):\n \"\"\"Run the run method and schedule next time\"\"\"\n if not self._running:\n return\n try:\n yield self.callback()\n except Exception: # pylint: disable=W0703\n logging.error(\"Error in periodic callback\", exc_info=True)\n self._schedule_next()\n\n def _schedule_next(self):\n \"\"\"Schedule next callback method\"\"\"\n if self._running:\n current_time = self.io_loop.time()\n while self._next_timeout <= current_time:\n self._next_timeout += self.callback_time / 1000.0\n self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)\n\n\n\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
admin.site.register(HuyenQuan)
<|reserved_special_token_1|>
from django.contrib import admin
from .models import HuyenQuan
admin.site.register(HuyenQuan)
<|reserved_special_token_1|>
from django.contrib import admin
# Register your models here.
from .models import HuyenQuan
admin.site.register(HuyenQuan)
|
flexible
|
{
"blob_id": "16e5a44cb4fbe71eaa9c1f5b00505578de0d2cea",
"index": 6403,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(HuyenQuan)\n",
"step-3": "from django.contrib import admin\nfrom .models import HuyenQuan\nadmin.site.register(HuyenQuan)\n",
"step-4": "from django.contrib import admin\n\n# Register your models here.\nfrom .models import HuyenQuan\n\nadmin.site.register(HuyenQuan)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class BRFCustomerBuilder(BRFBaseBuilder):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BRFCustomerBuilder(BRFBaseBuilder):
def define_translator(self) ->None:
self._translator = BRFCustomerTranslator()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BRFCustomerBuilder(BRFBaseBuilder):
def define_translator(self) ->None:
self._translator = BRFCustomerTranslator()
def build_adapter(self) ->None:
self._adapter = CustomerAdapter(client=self._client, translator=
self._translator)
<|reserved_special_token_1|>
from menu_sun_integration.application.adapters.customer_adapter import CustomerAdapter
from menu_sun_integration.infrastructure.brf.builders.brf_base_builder import BRFBaseBuilder
from menu_sun_integration.infrastructure.brf.translators.brf_customer_translator import BRFCustomerTranslator
class BRFCustomerBuilder(BRFBaseBuilder):
def define_translator(self) ->None:
self._translator = BRFCustomerTranslator()
def build_adapter(self) ->None:
self._adapter = CustomerAdapter(client=self._client, translator=
self._translator)
|
flexible
|
{
"blob_id": "8020bac94de3e68193c9891a628a48c537c5afa0",
"index": 9069,
"step-1": "<mask token>\n\n\nclass BRFCustomerBuilder(BRFBaseBuilder):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass BRFCustomerBuilder(BRFBaseBuilder):\n\n def define_translator(self) ->None:\n self._translator = BRFCustomerTranslator()\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass BRFCustomerBuilder(BRFBaseBuilder):\n\n def define_translator(self) ->None:\n self._translator = BRFCustomerTranslator()\n\n def build_adapter(self) ->None:\n self._adapter = CustomerAdapter(client=self._client, translator=\n self._translator)\n",
"step-4": "from menu_sun_integration.application.adapters.customer_adapter import CustomerAdapter\nfrom menu_sun_integration.infrastructure.brf.builders.brf_base_builder import BRFBaseBuilder\nfrom menu_sun_integration.infrastructure.brf.translators.brf_customer_translator import BRFCustomerTranslator\n\n\nclass BRFCustomerBuilder(BRFBaseBuilder):\n\n def define_translator(self) ->None:\n self._translator = BRFCustomerTranslator()\n\n def build_adapter(self) ->None:\n self._adapter = CustomerAdapter(client=self._client, translator=\n self._translator)\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Test_PlaceModel(unittest.TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_var_initialization(self):
self.assertTrue(hasattr(self.model, 'city_id'))
self.assertTrue(hasattr(self.model, 'user_id'))
self.assertTrue(hasattr(self.model, 'name'))
self.assertTrue(hasattr(self.model, 'description'))
self.assertTrue(hasattr(self.model, 'number_rooms'))
self.assertTrue(hasattr(self.model, 'number_bathrooms'))
self.assertTrue(hasattr(self.model, 'max_guest'))
self.assertTrue(hasattr(self.model, 'price_by_night'))
self.assertTrue(hasattr(self.model, 'latitude'))
self.assertTrue(hasattr(self.model, 'longitude'))
self.assertTrue(hasattr(self.model, 'amenities'))
self.assertEqual(self.model.city_id, '')
self.assertEqual(self.model.user_id, '')
self.assertEqual(self.model.name, '')
self.assertEqual(self.model.description, '')
self.assertEqual(self.model.number_rooms, 0)
self.assertEqual(self.model.number_bathrooms, 0)
self.assertEqual(self.model.max_guest, 0)
self.assertEqual(self.model.price_by_night, 0)
self.assertEqual(self.model.latitude, 0.0)
self.assertEqual(self.model.longitude, 0.0)
self.assertEqual(self.model.amenities, [''])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Test_PlaceModel(unittest.TestCase):
"""
Test the place model class
"""
def setUp(self):
self.model = Place()
self.model.save()
def test_var_initialization(self):
self.assertTrue(hasattr(self.model, 'city_id'))
self.assertTrue(hasattr(self.model, 'user_id'))
self.assertTrue(hasattr(self.model, 'name'))
self.assertTrue(hasattr(self.model, 'description'))
self.assertTrue(hasattr(self.model, 'number_rooms'))
self.assertTrue(hasattr(self.model, 'number_bathrooms'))
self.assertTrue(hasattr(self.model, 'max_guest'))
self.assertTrue(hasattr(self.model, 'price_by_night'))
self.assertTrue(hasattr(self.model, 'latitude'))
self.assertTrue(hasattr(self.model, 'longitude'))
self.assertTrue(hasattr(self.model, 'amenities'))
self.assertEqual(self.model.city_id, '')
self.assertEqual(self.model.user_id, '')
self.assertEqual(self.model.name, '')
self.assertEqual(self.model.description, '')
self.assertEqual(self.model.number_rooms, 0)
self.assertEqual(self.model.number_bathrooms, 0)
self.assertEqual(self.model.max_guest, 0)
self.assertEqual(self.model.price_by_night, 0)
self.assertEqual(self.model.latitude, 0.0)
self.assertEqual(self.model.longitude, 0.0)
self.assertEqual(self.model.amenities, [''])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Test_PlaceModel(unittest.TestCase):
"""
Test the place model class
"""
def setUp(self):
self.model = Place()
self.model.save()
def test_var_initialization(self):
self.assertTrue(hasattr(self.model, 'city_id'))
self.assertTrue(hasattr(self.model, 'user_id'))
self.assertTrue(hasattr(self.model, 'name'))
self.assertTrue(hasattr(self.model, 'description'))
self.assertTrue(hasattr(self.model, 'number_rooms'))
self.assertTrue(hasattr(self.model, 'number_bathrooms'))
self.assertTrue(hasattr(self.model, 'max_guest'))
self.assertTrue(hasattr(self.model, 'price_by_night'))
self.assertTrue(hasattr(self.model, 'latitude'))
self.assertTrue(hasattr(self.model, 'longitude'))
self.assertTrue(hasattr(self.model, 'amenities'))
self.assertEqual(self.model.city_id, '')
self.assertEqual(self.model.user_id, '')
self.assertEqual(self.model.name, '')
self.assertEqual(self.model.description, '')
self.assertEqual(self.model.number_rooms, 0)
self.assertEqual(self.model.number_bathrooms, 0)
self.assertEqual(self.model.max_guest, 0)
self.assertEqual(self.model.price_by_night, 0)
self.assertEqual(self.model.latitude, 0.0)
self.assertEqual(self.model.longitude, 0.0)
self.assertEqual(self.model.amenities, [''])
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
import unittest
from datetime import datetime
from models import *
class Test_PlaceModel(unittest.TestCase):
"""
Test the place model class
"""
def setUp(self):
self.model = Place()
self.model.save()
def test_var_initialization(self):
self.assertTrue(hasattr(self.model, 'city_id'))
self.assertTrue(hasattr(self.model, 'user_id'))
self.assertTrue(hasattr(self.model, 'name'))
self.assertTrue(hasattr(self.model, 'description'))
self.assertTrue(hasattr(self.model, 'number_rooms'))
self.assertTrue(hasattr(self.model, 'number_bathrooms'))
self.assertTrue(hasattr(self.model, 'max_guest'))
self.assertTrue(hasattr(self.model, 'price_by_night'))
self.assertTrue(hasattr(self.model, 'latitude'))
self.assertTrue(hasattr(self.model, 'longitude'))
self.assertTrue(hasattr(self.model, 'amenities'))
self.assertEqual(self.model.city_id, '')
self.assertEqual(self.model.user_id, '')
self.assertEqual(self.model.name, '')
self.assertEqual(self.model.description, '')
self.assertEqual(self.model.number_rooms, 0)
self.assertEqual(self.model.number_bathrooms, 0)
self.assertEqual(self.model.max_guest, 0)
self.assertEqual(self.model.price_by_night, 0)
self.assertEqual(self.model.latitude, 0.0)
self.assertEqual(self.model.longitude, 0.0)
self.assertEqual(self.model.amenities, [''])
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
import unittest
from datetime import datetime
from models import *
class Test_PlaceModel(unittest.TestCase):
"""
Test the place model class
"""
def setUp(self):
self.model = Place()
self.model.save()
def test_var_initialization(self):
self.assertTrue(hasattr(self.model, "city_id"))
self.assertTrue(hasattr(self.model, "user_id"))
self.assertTrue(hasattr(self.model, "name"))
self.assertTrue(hasattr(self.model, "description"))
self.assertTrue(hasattr(self.model, "number_rooms"))
self.assertTrue(hasattr(self.model, "number_bathrooms"))
self.assertTrue(hasattr(self.model, "max_guest"))
self.assertTrue(hasattr(self.model, "price_by_night"))
self.assertTrue(hasattr(self.model, "latitude"))
self.assertTrue(hasattr(self.model, "longitude"))
self.assertTrue(hasattr(self.model, "amenities"))
self.assertEqual(self.model.city_id, "")
self.assertEqual(self.model.user_id, "")
self.assertEqual(self.model.name, "")
self.assertEqual(self.model.description, "")
self.assertEqual(self.model.number_rooms, 0)
self.assertEqual(self.model.number_bathrooms, 0)
self.assertEqual(self.model.max_guest, 0)
self.assertEqual(self.model.price_by_night, 0)
self.assertEqual(self.model.latitude, 0.0)
self.assertEqual(self.model.longitude, 0.0)
self.assertEqual(self.model.amenities, [''])
if __name__ == "__main__":
unittest.main()
|
flexible
|
{
"blob_id": "c7881c0d06600a43bdc01f5e464127c596db6713",
"index": 7993,
"step-1": "<mask token>\n\n\nclass Test_PlaceModel(unittest.TestCase):\n <mask token>\n <mask token>\n\n def test_var_initialization(self):\n self.assertTrue(hasattr(self.model, 'city_id'))\n self.assertTrue(hasattr(self.model, 'user_id'))\n self.assertTrue(hasattr(self.model, 'name'))\n self.assertTrue(hasattr(self.model, 'description'))\n self.assertTrue(hasattr(self.model, 'number_rooms'))\n self.assertTrue(hasattr(self.model, 'number_bathrooms'))\n self.assertTrue(hasattr(self.model, 'max_guest'))\n self.assertTrue(hasattr(self.model, 'price_by_night'))\n self.assertTrue(hasattr(self.model, 'latitude'))\n self.assertTrue(hasattr(self.model, 'longitude'))\n self.assertTrue(hasattr(self.model, 'amenities'))\n self.assertEqual(self.model.city_id, '')\n self.assertEqual(self.model.user_id, '')\n self.assertEqual(self.model.name, '')\n self.assertEqual(self.model.description, '')\n self.assertEqual(self.model.number_rooms, 0)\n self.assertEqual(self.model.number_bathrooms, 0)\n self.assertEqual(self.model.max_guest, 0)\n self.assertEqual(self.model.price_by_night, 0)\n self.assertEqual(self.model.latitude, 0.0)\n self.assertEqual(self.model.longitude, 0.0)\n self.assertEqual(self.model.amenities, [''])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Test_PlaceModel(unittest.TestCase):\n \"\"\"\n Test the place model class\n \"\"\"\n\n def setUp(self):\n self.model = Place()\n self.model.save()\n\n def test_var_initialization(self):\n self.assertTrue(hasattr(self.model, 'city_id'))\n self.assertTrue(hasattr(self.model, 'user_id'))\n self.assertTrue(hasattr(self.model, 'name'))\n self.assertTrue(hasattr(self.model, 'description'))\n self.assertTrue(hasattr(self.model, 'number_rooms'))\n self.assertTrue(hasattr(self.model, 'number_bathrooms'))\n self.assertTrue(hasattr(self.model, 'max_guest'))\n self.assertTrue(hasattr(self.model, 'price_by_night'))\n self.assertTrue(hasattr(self.model, 'latitude'))\n self.assertTrue(hasattr(self.model, 'longitude'))\n self.assertTrue(hasattr(self.model, 'amenities'))\n self.assertEqual(self.model.city_id, '')\n self.assertEqual(self.model.user_id, '')\n self.assertEqual(self.model.name, '')\n self.assertEqual(self.model.description, '')\n self.assertEqual(self.model.number_rooms, 0)\n self.assertEqual(self.model.number_bathrooms, 0)\n self.assertEqual(self.model.max_guest, 0)\n self.assertEqual(self.model.price_by_night, 0)\n self.assertEqual(self.model.latitude, 0.0)\n self.assertEqual(self.model.longitude, 0.0)\n self.assertEqual(self.model.amenities, [''])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Test_PlaceModel(unittest.TestCase):\n \"\"\"\n Test the place model class\n \"\"\"\n\n def setUp(self):\n self.model = Place()\n self.model.save()\n\n def test_var_initialization(self):\n self.assertTrue(hasattr(self.model, 'city_id'))\n self.assertTrue(hasattr(self.model, 'user_id'))\n self.assertTrue(hasattr(self.model, 'name'))\n self.assertTrue(hasattr(self.model, 'description'))\n self.assertTrue(hasattr(self.model, 'number_rooms'))\n self.assertTrue(hasattr(self.model, 'number_bathrooms'))\n self.assertTrue(hasattr(self.model, 'max_guest'))\n self.assertTrue(hasattr(self.model, 'price_by_night'))\n self.assertTrue(hasattr(self.model, 'latitude'))\n self.assertTrue(hasattr(self.model, 'longitude'))\n self.assertTrue(hasattr(self.model, 'amenities'))\n self.assertEqual(self.model.city_id, '')\n self.assertEqual(self.model.user_id, '')\n self.assertEqual(self.model.name, '')\n self.assertEqual(self.model.description, '')\n self.assertEqual(self.model.number_rooms, 0)\n self.assertEqual(self.model.number_bathrooms, 0)\n self.assertEqual(self.model.max_guest, 0)\n self.assertEqual(self.model.price_by_night, 0)\n self.assertEqual(self.model.latitude, 0.0)\n self.assertEqual(self.model.longitude, 0.0)\n self.assertEqual(self.model.amenities, [''])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nfrom datetime import datetime\nfrom models import *\n\n\nclass Test_PlaceModel(unittest.TestCase):\n \"\"\"\n Test the place model class\n \"\"\"\n\n def setUp(self):\n self.model = Place()\n self.model.save()\n\n def test_var_initialization(self):\n self.assertTrue(hasattr(self.model, 'city_id'))\n self.assertTrue(hasattr(self.model, 'user_id'))\n self.assertTrue(hasattr(self.model, 'name'))\n self.assertTrue(hasattr(self.model, 'description'))\n self.assertTrue(hasattr(self.model, 'number_rooms'))\n self.assertTrue(hasattr(self.model, 'number_bathrooms'))\n self.assertTrue(hasattr(self.model, 'max_guest'))\n self.assertTrue(hasattr(self.model, 'price_by_night'))\n self.assertTrue(hasattr(self.model, 'latitude'))\n self.assertTrue(hasattr(self.model, 'longitude'))\n self.assertTrue(hasattr(self.model, 'amenities'))\n self.assertEqual(self.model.city_id, '')\n self.assertEqual(self.model.user_id, '')\n self.assertEqual(self.model.name, '')\n self.assertEqual(self.model.description, '')\n self.assertEqual(self.model.number_rooms, 0)\n self.assertEqual(self.model.number_bathrooms, 0)\n self.assertEqual(self.model.max_guest, 0)\n self.assertEqual(self.model.price_by_night, 0)\n self.assertEqual(self.model.latitude, 0.0)\n self.assertEqual(self.model.longitude, 0.0)\n self.assertEqual(self.model.amenities, [''])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import unittest\nfrom datetime import datetime\nfrom models import *\n\n\nclass Test_PlaceModel(unittest.TestCase):\n \"\"\"\n Test the place model class\n \"\"\"\n\n def setUp(self):\n self.model = Place()\n self.model.save()\n\n def test_var_initialization(self):\n self.assertTrue(hasattr(self.model, \"city_id\"))\n self.assertTrue(hasattr(self.model, \"user_id\"))\n self.assertTrue(hasattr(self.model, \"name\"))\n self.assertTrue(hasattr(self.model, \"description\"))\n self.assertTrue(hasattr(self.model, \"number_rooms\"))\n self.assertTrue(hasattr(self.model, \"number_bathrooms\"))\n self.assertTrue(hasattr(self.model, \"max_guest\"))\n self.assertTrue(hasattr(self.model, \"price_by_night\"))\n self.assertTrue(hasattr(self.model, \"latitude\"))\n self.assertTrue(hasattr(self.model, \"longitude\"))\n self.assertTrue(hasattr(self.model, \"amenities\"))\n self.assertEqual(self.model.city_id, \"\")\n self.assertEqual(self.model.user_id, \"\")\n self.assertEqual(self.model.name, \"\")\n self.assertEqual(self.model.description, \"\")\n self.assertEqual(self.model.number_rooms, 0)\n self.assertEqual(self.model.number_bathrooms, 0)\n self.assertEqual(self.model.max_guest, 0)\n self.assertEqual(self.model.price_by_night, 0)\n self.assertEqual(self.model.latitude, 0.0)\n self.assertEqual(self.model.longitude, 0.0)\n self.assertEqual(self.model.amenities, [''])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
im1.show()
im.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
im = Image.open('data/frame1.jpg')
left = 155
top = 65
right = 360
bottom = 270
im1 = im.crop((left, top, right, bottom))
im1.show()
im.show()
<|reserved_special_token_1|>
from PIL import Image
im = Image.open('data/frame1.jpg')
left = 155
top = 65
right = 360
bottom = 270
im1 = im.crop((left, top, right, bottom))
im1.show()
im.show()
<|reserved_special_token_1|>
# Improting Image class from PIL module
from PIL import Image
# Opens a image in RGB mode
im = Image.open("data/frame1.jpg")
# Setting the points for cropped image
left = 155
top = 65
right = 360
bottom = 270
# Cropped image of above dimension
# (It will not change orginal image)
im1 = im.crop((left, top, right, bottom))
# Shows the image in image viewer
im1.show()
im.show()
|
flexible
|
{
"blob_id": "9fd73e0a1dacc46c177f11ce4cf2351b3d622c0d",
"index": 7594,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nim1.show()\nim.show()\n",
"step-3": "<mask token>\nim = Image.open('data/frame1.jpg')\nleft = 155\ntop = 65\nright = 360\nbottom = 270\nim1 = im.crop((left, top, right, bottom))\nim1.show()\nim.show()\n",
"step-4": "from PIL import Image\nim = Image.open('data/frame1.jpg')\nleft = 155\ntop = 65\nright = 360\nbottom = 270\nim1 = im.crop((left, top, right, bottom))\nim1.show()\nim.show()\n",
"step-5": "# Improting Image class from PIL module\nfrom PIL import Image\n\n# Opens a image in RGB mode\nim = Image.open(\"data/frame1.jpg\")\n\n# Setting the points for cropped image\nleft = 155\ntop = 65\nright = 360\nbottom = 270\n\n# Cropped image of above dimension\n# (It will not change orginal image)\nim1 = im.crop((left, top, right, bottom))\n\n# Shows the image in image viewer\nim1.show()\nim.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import plotly.express as px
import pandas as pd
def fiig(plan):
df = pd.DataFrame(plan)
fig = px.timeline(df, x_start="Начало", x_end="Завершение", y="РЦ", color='РЦ', facet_row_spacing=0.6,
facet_col_spacing=0.6, opacity=0.9, hover_data=['Проект', 'МК', 'Наменование', 'Номер', 'Минут'],
title='график проектов')
for i, d in enumerate(fig.data):
d.width = df[df['РЦ'] == d.name]['Вес']
"""
fig.add_hrect( y0="Проект C", y1="Проект C",
annotation_text="аываыв", annotation_position = 'inside top left',
fillcolor="green", opacity=0.25, line_width=0, annotation_font_size=20,
annotation_font_color="blue")
fig.add_vline(x="2009-02-06", line_width=3, line_dash="dash", line_color="green", opacity=0.06)
"""
# fig.add_hline(y=" ")
# fig.add_hline(y=" ")
return fig
# fig.add_vrect(x0=0.9, x1=2)
# fig.show()
def fig_porc_projects(plan):
df = pd.DataFrame(plan)
fig = px.timeline(df, x_start="Начало", x_end="Завершение", y="Проект", color='РЦ', facet_row_spacing=0.2,
facet_col_spacing=0.1, opacity=0.5, hover_data=plan[0].keys(), title=f'Диаграмма проектов')
# for i, d in enumerate(fig.data):
# d.width = df[df['РЦ'] == d.name]['РЦ']
"""
fig.add_hrect( y0="Проект C", y1="Проект C",
annotation_text="аываыв", annotation_position = 'inside top left',
fillcolor="green", opacity=0.25, line_width=0, annotation_font_size=20,
annotation_font_color="blue")
fig.add_vline(x="2009-02-06", line_width=3, line_dash="dash", line_color="green", opacity=0.06)
"""
# fig.add_hline(y=" ")
# fig.add_hline(y=" ")
return fig
# fig.add_vrect(x0=0.9, x1=2)
# fig.show()
def fig_podetalno_naproject_rc(plan, proj):
df = pd.DataFrame([_ for _ in plan if proj in _['Проект']])
fig = px.timeline(df, x_start="Начало", x_end="Завершение", y="Номер", color='РЦ', facet_row_spacing=0.2,
facet_col_spacing=0.1, opacity=0.5, hover_data=plan[0].keys(), title=f'Диаграмма по {proj}')
# for i, d in enumerate(fig.data):
# d.width = df[df['РЦ'] == d.name]['РЦ']
"""
fig.add_hrect( y0="Проект C", y1="Проект C",
annotation_text="аываыв", annotation_position = 'inside top left',
fillcolor="green", opacity=0.25, line_width=0, annotation_font_size=20,
annotation_font_color="blue")
fig.add_vline(x="2009-02-06", line_width=3, line_dash="dash", line_color="green", opacity=0.06)
"""
# fig.add_hline(y=" ")
# fig.add_hline(y=" ")
return fig
def fig_podetalno_narc_projects(plan, rc):
filtr = [_ for _ in plan if rc in _['РЦ']]
df = pd.DataFrame(filtr)
fig = px.timeline(df, x_start="Начало", x_end="Завершение", y="Номер", color='Проект', facet_row_spacing=0.2,
facet_col_spacing=0.1, opacity=0.5, hover_data=plan[0].keys(), title=f'Диаграмма по {rc}')
for i, d in enumerate(fig.data):
d.width = df[df['Проект'] == d.name]['Пост']/10 + 0.1
"""
fig.add_hrect( y0="Проект C", y1="Проект C",
annotation_text="аываыв", annotation_position = 'inside top left',
fillcolor="green", opacity=0.25, line_width=0, annotation_font_size=20,
annotation_font_color="blue")
fig.add_vline(x="2009-02-06", line_width=3, line_dash="dash", line_color="green", opacity=0.06)
"""
# fig.add_hline(y=" ")
# fig.add_hline(y=" ")
return fig
|
normal
|
{
"blob_id": "09850f0d3d295170545a6342337e97a0f190989a",
"index": 6578,
"step-1": "<mask token>\n\n\ndef fig_porc_projects(plan):\n df = pd.DataFrame(plan)\n fig = px.timeline(df, x_start='Начало', x_end='Завершение', y='Проект',\n color='РЦ', facet_row_spacing=0.2, facet_col_spacing=0.1, opacity=\n 0.5, hover_data=plan[0].keys(), title=f'Диаграмма проектов')\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n return fig\n\n\ndef fig_podetalno_naproject_rc(plan, proj):\n df = pd.DataFrame([_ for _ in plan if proj in _['Проект']])\n fig = px.timeline(df, x_start='Начало', x_end='Завершение', y='Номер',\n color='РЦ', facet_row_spacing=0.2, facet_col_spacing=0.1, opacity=\n 0.5, hover_data=plan[0].keys(), title=f'Диаграмма по {proj}')\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n return fig\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef fig_porc_projects(plan):\n df = pd.DataFrame(plan)\n fig = px.timeline(df, x_start='Начало', x_end='Завершение', y='Проект',\n color='РЦ', facet_row_spacing=0.2, facet_col_spacing=0.1, opacity=\n 0.5, hover_data=plan[0].keys(), title=f'Диаграмма проектов')\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n return fig\n\n\ndef fig_podetalno_naproject_rc(plan, proj):\n df = pd.DataFrame([_ for _ in plan if proj in _['Проект']])\n fig = px.timeline(df, x_start='Начало', x_end='Завершение', y='Номер',\n color='РЦ', facet_row_spacing=0.2, facet_col_spacing=0.1, opacity=\n 0.5, hover_data=plan[0].keys(), title=f'Диаграмма по {proj}')\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n return fig\n\n\ndef fig_podetalno_narc_projects(plan, rc):\n filtr = [_ for _ in plan if rc in _['РЦ']]\n df = pd.DataFrame(filtr)\n fig = px.timeline(df, x_start='Начало', x_end='Завершение', y='Номер',\n color='Проект', facet_row_spacing=0.2, facet_col_spacing=0.1,\n opacity=0.5, hover_data=plan[0].keys(), title=f'Диаграмма по {rc}')\n for i, d in enumerate(fig.data):\n d.width = df[df['Проект'] == d.name]['Пост'] / 10 + 0.1\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n return fig\n",
"step-3": "<mask token>\n\n\ndef fiig(plan):\n df = pd.DataFrame(plan)\n fig = px.timeline(df, x_start='Начало', x_end='Завершение', y='РЦ',\n color='РЦ', facet_row_spacing=0.6, facet_col_spacing=0.6, opacity=\n 0.9, hover_data=['Проект', 'МК', 'Наменование', 'Номер', 'Минут'],\n title='график проектов')\n for i, d in enumerate(fig.data):\n d.width = df[df['РЦ'] == d.name]['Вес']\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n return fig\n\n\ndef fig_porc_projects(plan):\n df = pd.DataFrame(plan)\n fig = px.timeline(df, x_start='Начало', x_end='Завершение', y='Проект',\n color='РЦ', facet_row_spacing=0.2, facet_col_spacing=0.1, opacity=\n 0.5, hover_data=plan[0].keys(), title=f'Диаграмма проектов')\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n return fig\n\n\ndef fig_podetalno_naproject_rc(plan, proj):\n df = pd.DataFrame([_ for _ in plan if proj in _['Проект']])\n fig = px.timeline(df, x_start='Начало', x_end='Завершение', y='Номер',\n color='РЦ', facet_row_spacing=0.2, facet_col_spacing=0.1, opacity=\n 0.5, hover_data=plan[0].keys(), title=f'Диаграмма по {proj}')\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n return fig\n\n\ndef fig_podetalno_narc_projects(plan, rc):\n filtr = [_ for _ in plan if rc in _['РЦ']]\n df = pd.DataFrame(filtr)\n fig = px.timeline(df, x_start='Начало', x_end='Завершение', y='Номер',\n color='Проект', facet_row_spacing=0.2, facet_col_spacing=0.1,\n opacity=0.5, hover_data=plan[0].keys(), title=f'Диаграмма по {rc}')\n for i, d in enumerate(fig.data):\n d.width = df[df['Проект'] == d.name]['Пост'] / 10 + 0.1\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n return fig\n",
"step-4": "import plotly.express as px\nimport pandas as pd\n\n\ndef fiig(plan):\n df = pd.DataFrame(plan)\n fig = px.timeline(df, x_start='Начало', x_end='Завершение', y='РЦ',\n color='РЦ', facet_row_spacing=0.6, facet_col_spacing=0.6, opacity=\n 0.9, hover_data=['Проект', 'МК', 'Наменование', 'Номер', 'Минут'],\n title='график проектов')\n for i, d in enumerate(fig.data):\n d.width = df[df['РЦ'] == d.name]['Вес']\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n return fig\n\n\ndef fig_porc_projects(plan):\n df = pd.DataFrame(plan)\n fig = px.timeline(df, x_start='Начало', x_end='Завершение', y='Проект',\n color='РЦ', facet_row_spacing=0.2, facet_col_spacing=0.1, opacity=\n 0.5, hover_data=plan[0].keys(), title=f'Диаграмма проектов')\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n return fig\n\n\ndef fig_podetalno_naproject_rc(plan, proj):\n df = pd.DataFrame([_ for _ in plan if proj in _['Проект']])\n fig = px.timeline(df, x_start='Начало', x_end='Завершение', y='Номер',\n color='РЦ', facet_row_spacing=0.2, facet_col_spacing=0.1, opacity=\n 0.5, hover_data=plan[0].keys(), title=f'Диаграмма по {proj}')\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n return fig\n\n\ndef fig_podetalno_narc_projects(plan, rc):\n filtr = [_ for _ in plan if rc in _['РЦ']]\n df = pd.DataFrame(filtr)\n fig = px.timeline(df, x_start='Начало', x_end='Завершение', y='Номер',\n color='Проект', facet_row_spacing=0.2, facet_col_spacing=0.1,\n opacity=0.5, hover_data=plan[0].keys(), title=f'Диаграмма по {rc}')\n for i, d in enumerate(fig.data):\n d.width = df[df['Проект'] == d.name]['Пост'] / 10 + 0.1\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n return fig\n",
"step-5": "import plotly.express as px\nimport pandas as pd\n\n\ndef fiig(plan):\n df = pd.DataFrame(plan)\n fig = px.timeline(df, x_start=\"Начало\", x_end=\"Завершение\", y=\"РЦ\", color='РЦ', facet_row_spacing=0.6,\n facet_col_spacing=0.6, opacity=0.9, hover_data=['Проект', 'МК', 'Наменование', 'Номер', 'Минут'],\n title='график проектов')\n for i, d in enumerate(fig.data):\n d.width = df[df['РЦ'] == d.name]['Вес']\n\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n\n # fig.add_hline(y=\" \")\n # fig.add_hline(y=\" \")\n return fig\n\n\n# fig.add_vrect(x0=0.9, x1=2)\n# fig.show()\n\ndef fig_porc_projects(plan):\n df = pd.DataFrame(plan)\n fig = px.timeline(df, x_start=\"Начало\", x_end=\"Завершение\", y=\"Проект\", color='РЦ', facet_row_spacing=0.2,\n facet_col_spacing=0.1, opacity=0.5, hover_data=plan[0].keys(), title=f'Диаграмма проектов')\n # for i, d in enumerate(fig.data):\n # d.width = df[df['РЦ'] == d.name]['РЦ']\n\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n\n # fig.add_hline(y=\" \")\n # fig.add_hline(y=\" \")\n return fig\n\n\n# fig.add_vrect(x0=0.9, x1=2)\n# fig.show()\n\ndef fig_podetalno_naproject_rc(plan, proj):\n df = pd.DataFrame([_ for _ in plan if proj in _['Проект']])\n\n fig = px.timeline(df, x_start=\"Начало\", x_end=\"Завершение\", y=\"Номер\", color='РЦ', facet_row_spacing=0.2,\n facet_col_spacing=0.1, opacity=0.5, hover_data=plan[0].keys(), title=f'Диаграмма по {proj}')\n # for i, d in enumerate(fig.data):\n # d.width = df[df['РЦ'] == d.name]['РЦ']\n\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n\n # fig.add_hline(y=\" \")\n # fig.add_hline(y=\" \")\n return fig\n\ndef fig_podetalno_narc_projects(plan, rc):\n filtr = [_ for _ in plan if rc in _['РЦ']]\n df = pd.DataFrame(filtr)\n\n fig = px.timeline(df, x_start=\"Начало\", x_end=\"Завершение\", y=\"Номер\", color='Проект', facet_row_spacing=0.2,\n facet_col_spacing=0.1, opacity=0.5, hover_data=plan[0].keys(), title=f'Диаграмма по {rc}')\n for i, d in enumerate(fig.data):\n d.width = df[df['Проект'] == d.name]['Пост']/10 + 0.1\n\n\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n\n # fig.add_hline(y=\" \")\n # fig.add_hline(y=\" \")\n return fig\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from django.db import models
from NavigantAnalyzer.common import convert_datetime_string
import json
# A custom view-based model for flat outputs - RÖ - 2018-10-24
# Don't add, change or delete fields without editing the view in the Db
class Results_flat(models.Model):
race_id = models.IntegerField()
race_name = models.CharField(max_length=127)
race_serie = models.CharField(max_length=127, blank=True)
race_begin = models.DateTimeField(blank=True, null=True)
result_start_time = models.DateTimeField(blank=True, null=True)
runner_last_name = models.CharField(max_length=63, blank=True)
runner_first_name = models.CharField(max_length=63, blank=True)
result_emit = models.CharField(max_length=12, blank=True)
course_name = models.CharField(max_length=63)
course_length = models.IntegerField(blank=True, null=True)
course_num_participants = models.IntegerField(blank=True, null=True)
course_min_time = models.IntegerField(blank=True, null=True)
course_mean_time = models.IntegerField(blank=True, null=True)
course_min_puistotime = models.IntegerField(blank=True, null=True)
course_mean_puistotime = models.IntegerField(blank=True, null=True)
visit_min_time = models.IntegerField(blank=True, null=True)
visit_mean_time = models.IntegerField(blank=True, null=True)
visit_min_puistotime = models.IntegerField(blank=True, null=True)
visit_mean_puistotime = models.IntegerField(blank=True, null=True)
visit_puistoman_time = models.IntegerField(blank=True, null=True) # Since 2019-12-08
leg_min_time = models.IntegerField(blank=True, null=True)
leg_mean_time = models.IntegerField(blank=True, null=True)
leg_min_puistotime = models.IntegerField(blank=True, null=True)
leg_mean_puistotime = models.IntegerField(blank=True, null=True)
visit_order = models.IntegerField()
visit_code = models.IntegerField()
visit_time = models.IntegerField()
visit_position = models.IntegerField(blank=True)
visit_puistoposition = models.IntegerField(blank=True)
leg_time = models.IntegerField(blank=True)
leg_position = models.IntegerField(blank=True)
leg_puistoposition = models.IntegerField(blank=True)
visit_puistodiff_time_l = models.IntegerField(blank=True, null=True) # Since 2019-12-08
visit_puistodiff_time_pm = models.IntegerField(blank=True, null=True) # Since 2019-12-08
leg_puistodiff_time_l = models.IntegerField(blank=True, null=True) # Since 2019-12-08
leg_puistodiff_time_pm = models.IntegerField(blank=True, null=True) # Since 2019-12-08
leg_puistoperc_time_l = models.FloatField(null=True) # Since 2019-12-08
leg_puistoperc_time_pm = models.FloatField(null=True) # Since 2019-12-08
leg_puistoperc_time_l = models.FloatField(null=True) # Since 2019-12-08
leg_puisto_success = models.FloatField(null=True) # Since 2019-12-08
result_puistoperc_time_l = models.FloatField(null=True) # Since 2019-12-08
result_puistoperc_time_pm = models.FloatField(null=True) # Since 2019-12-08
result_puisto_max_level = models.FloatField(null=True) # Since 2019-12-08
result_puisto_success = models.FloatField(null=True) # Since 2019-12-08
result_puisto_optimum = models.IntegerField(null=True) # Since 2019-12-08
result_puisto_mistakes = models.IntegerField(null=True) # Since 2019-12-08
class Meta:
managed = False
db_table = 'NavigantAnalyzer_results_flat'
def get_fields(self):
result = dict()
datetime_fields = ['race_begin', 'result_start_time']
for field in Results_flat._meta.fields:
value = field.value_to_string(self)
if value.isdigit():
value = int(value)
if field.name in datetime_fields:
value = convert_datetime_string(value)
result[field.name] = value
return json.dumps(result)
|
normal
|
{
"blob_id": "802eb0502c5eddcabd41b2d438bf53a5d6fb2c82",
"index": 8368,
"step-1": "<mask token>\n\n\nclass Results_flat(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n managed = False\n db_table = 'NavigantAnalyzer_results_flat'\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Results_flat(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n managed = False\n db_table = 'NavigantAnalyzer_results_flat'\n\n def get_fields(self):\n result = dict()\n datetime_fields = ['race_begin', 'result_start_time']\n for field in Results_flat._meta.fields:\n value = field.value_to_string(self)\n if value.isdigit():\n value = int(value)\n if field.name in datetime_fields:\n value = convert_datetime_string(value)\n result[field.name] = value\n return json.dumps(result)\n",
"step-3": "<mask token>\n\n\nclass Results_flat(models.Model):\n race_id = models.IntegerField()\n race_name = models.CharField(max_length=127)\n race_serie = models.CharField(max_length=127, blank=True)\n race_begin = models.DateTimeField(blank=True, null=True)\n result_start_time = models.DateTimeField(blank=True, null=True)\n runner_last_name = models.CharField(max_length=63, blank=True)\n runner_first_name = models.CharField(max_length=63, blank=True)\n result_emit = models.CharField(max_length=12, blank=True)\n course_name = models.CharField(max_length=63)\n course_length = models.IntegerField(blank=True, null=True)\n course_num_participants = models.IntegerField(blank=True, null=True)\n course_min_time = models.IntegerField(blank=True, null=True)\n course_mean_time = models.IntegerField(blank=True, null=True)\n course_min_puistotime = models.IntegerField(blank=True, null=True)\n course_mean_puistotime = models.IntegerField(blank=True, null=True)\n visit_min_time = models.IntegerField(blank=True, null=True)\n visit_mean_time = models.IntegerField(blank=True, null=True)\n visit_min_puistotime = models.IntegerField(blank=True, null=True)\n visit_mean_puistotime = models.IntegerField(blank=True, null=True)\n visit_puistoman_time = models.IntegerField(blank=True, null=True)\n leg_min_time = models.IntegerField(blank=True, null=True)\n leg_mean_time = models.IntegerField(blank=True, null=True)\n leg_min_puistotime = models.IntegerField(blank=True, null=True)\n leg_mean_puistotime = models.IntegerField(blank=True, null=True)\n visit_order = models.IntegerField()\n visit_code = models.IntegerField()\n visit_time = models.IntegerField()\n visit_position = models.IntegerField(blank=True)\n visit_puistoposition = models.IntegerField(blank=True)\n leg_time = models.IntegerField(blank=True)\n leg_position = models.IntegerField(blank=True)\n leg_puistoposition = models.IntegerField(blank=True)\n visit_puistodiff_time_l = models.IntegerField(blank=True, null=True)\n visit_puistodiff_time_pm = models.IntegerField(blank=True, null=True)\n leg_puistodiff_time_l = models.IntegerField(blank=True, null=True)\n leg_puistodiff_time_pm = models.IntegerField(blank=True, null=True)\n leg_puistoperc_time_l = models.FloatField(null=True)\n leg_puistoperc_time_pm = models.FloatField(null=True)\n leg_puistoperc_time_l = models.FloatField(null=True)\n leg_puisto_success = models.FloatField(null=True)\n result_puistoperc_time_l = models.FloatField(null=True)\n result_puistoperc_time_pm = models.FloatField(null=True)\n result_puisto_max_level = models.FloatField(null=True)\n result_puisto_success = models.FloatField(null=True)\n result_puisto_optimum = models.IntegerField(null=True)\n result_puisto_mistakes = models.IntegerField(null=True)\n\n\n class Meta:\n managed = False\n db_table = 'NavigantAnalyzer_results_flat'\n\n def get_fields(self):\n result = dict()\n datetime_fields = ['race_begin', 'result_start_time']\n for field in Results_flat._meta.fields:\n value = field.value_to_string(self)\n if value.isdigit():\n value = int(value)\n if field.name in datetime_fields:\n value = convert_datetime_string(value)\n result[field.name] = value\n return json.dumps(result)\n",
"step-4": "from django.db import models\nfrom NavigantAnalyzer.common import convert_datetime_string\nimport json\n\n\nclass Results_flat(models.Model):\n race_id = models.IntegerField()\n race_name = models.CharField(max_length=127)\n race_serie = models.CharField(max_length=127, blank=True)\n race_begin = models.DateTimeField(blank=True, null=True)\n result_start_time = models.DateTimeField(blank=True, null=True)\n runner_last_name = models.CharField(max_length=63, blank=True)\n runner_first_name = models.CharField(max_length=63, blank=True)\n result_emit = models.CharField(max_length=12, blank=True)\n course_name = models.CharField(max_length=63)\n course_length = models.IntegerField(blank=True, null=True)\n course_num_participants = models.IntegerField(blank=True, null=True)\n course_min_time = models.IntegerField(blank=True, null=True)\n course_mean_time = models.IntegerField(blank=True, null=True)\n course_min_puistotime = models.IntegerField(blank=True, null=True)\n course_mean_puistotime = models.IntegerField(blank=True, null=True)\n visit_min_time = models.IntegerField(blank=True, null=True)\n visit_mean_time = models.IntegerField(blank=True, null=True)\n visit_min_puistotime = models.IntegerField(blank=True, null=True)\n visit_mean_puistotime = models.IntegerField(blank=True, null=True)\n visit_puistoman_time = models.IntegerField(blank=True, null=True)\n leg_min_time = models.IntegerField(blank=True, null=True)\n leg_mean_time = models.IntegerField(blank=True, null=True)\n leg_min_puistotime = models.IntegerField(blank=True, null=True)\n leg_mean_puistotime = models.IntegerField(blank=True, null=True)\n visit_order = models.IntegerField()\n visit_code = models.IntegerField()\n visit_time = models.IntegerField()\n visit_position = models.IntegerField(blank=True)\n visit_puistoposition = models.IntegerField(blank=True)\n leg_time = models.IntegerField(blank=True)\n leg_position = models.IntegerField(blank=True)\n leg_puistoposition = models.IntegerField(blank=True)\n visit_puistodiff_time_l = models.IntegerField(blank=True, null=True)\n visit_puistodiff_time_pm = models.IntegerField(blank=True, null=True)\n leg_puistodiff_time_l = models.IntegerField(blank=True, null=True)\n leg_puistodiff_time_pm = models.IntegerField(blank=True, null=True)\n leg_puistoperc_time_l = models.FloatField(null=True)\n leg_puistoperc_time_pm = models.FloatField(null=True)\n leg_puistoperc_time_l = models.FloatField(null=True)\n leg_puisto_success = models.FloatField(null=True)\n result_puistoperc_time_l = models.FloatField(null=True)\n result_puistoperc_time_pm = models.FloatField(null=True)\n result_puisto_max_level = models.FloatField(null=True)\n result_puisto_success = models.FloatField(null=True)\n result_puisto_optimum = models.IntegerField(null=True)\n result_puisto_mistakes = models.IntegerField(null=True)\n\n\n class Meta:\n managed = False\n db_table = 'NavigantAnalyzer_results_flat'\n\n def get_fields(self):\n result = dict()\n datetime_fields = ['race_begin', 'result_start_time']\n for field in Results_flat._meta.fields:\n value = field.value_to_string(self)\n if value.isdigit():\n value = int(value)\n if field.name in datetime_fields:\n value = convert_datetime_string(value)\n result[field.name] = value\n return json.dumps(result)\n",
"step-5": "from django.db import models\nfrom NavigantAnalyzer.common import convert_datetime_string\nimport json\n\n# A custom view-based model for flat outputs - RÖ - 2018-10-24\n# Don't add, change or delete fields without editing the view in the Db\nclass Results_flat(models.Model):\n race_id = models.IntegerField()\n race_name = models.CharField(max_length=127)\n race_serie = models.CharField(max_length=127, blank=True)\n race_begin = models.DateTimeField(blank=True, null=True)\n result_start_time = models.DateTimeField(blank=True, null=True)\n runner_last_name = models.CharField(max_length=63, blank=True)\n runner_first_name = models.CharField(max_length=63, blank=True)\n result_emit = models.CharField(max_length=12, blank=True)\n course_name = models.CharField(max_length=63)\n course_length = models.IntegerField(blank=True, null=True)\n course_num_participants = models.IntegerField(blank=True, null=True)\n course_min_time = models.IntegerField(blank=True, null=True)\n course_mean_time = models.IntegerField(blank=True, null=True)\n course_min_puistotime = models.IntegerField(blank=True, null=True)\n course_mean_puistotime = models.IntegerField(blank=True, null=True)\n visit_min_time = models.IntegerField(blank=True, null=True)\n visit_mean_time = models.IntegerField(blank=True, null=True)\n visit_min_puistotime = models.IntegerField(blank=True, null=True)\n visit_mean_puistotime = models.IntegerField(blank=True, null=True)\n visit_puistoman_time = models.IntegerField(blank=True, null=True) # Since 2019-12-08\n leg_min_time = models.IntegerField(blank=True, null=True)\n leg_mean_time = models.IntegerField(blank=True, null=True)\n leg_min_puistotime = models.IntegerField(blank=True, null=True)\n leg_mean_puistotime = models.IntegerField(blank=True, null=True)\n visit_order = models.IntegerField()\n visit_code = models.IntegerField()\n visit_time = models.IntegerField()\n visit_position = models.IntegerField(blank=True)\n visit_puistoposition = models.IntegerField(blank=True)\n leg_time = models.IntegerField(blank=True)\n leg_position = models.IntegerField(blank=True)\n leg_puistoposition = models.IntegerField(blank=True)\n visit_puistodiff_time_l = models.IntegerField(blank=True, null=True) # Since 2019-12-08\n visit_puistodiff_time_pm = models.IntegerField(blank=True, null=True) # Since 2019-12-08\n leg_puistodiff_time_l = models.IntegerField(blank=True, null=True) # Since 2019-12-08\n leg_puistodiff_time_pm = models.IntegerField(blank=True, null=True) # Since 2019-12-08\n leg_puistoperc_time_l = models.FloatField(null=True) # Since 2019-12-08\n leg_puistoperc_time_pm = models.FloatField(null=True) # Since 2019-12-08\n leg_puistoperc_time_l = models.FloatField(null=True) # Since 2019-12-08\n leg_puisto_success = models.FloatField(null=True) # Since 2019-12-08\n result_puistoperc_time_l = models.FloatField(null=True) # Since 2019-12-08\n result_puistoperc_time_pm = models.FloatField(null=True) # Since 2019-12-08\n result_puisto_max_level = models.FloatField(null=True) # Since 2019-12-08\n result_puisto_success = models.FloatField(null=True) # Since 2019-12-08\n result_puisto_optimum = models.IntegerField(null=True) # Since 2019-12-08\n result_puisto_mistakes = models.IntegerField(null=True) # Since 2019-12-08\n\n class Meta:\n managed = False\n db_table = 'NavigantAnalyzer_results_flat'\n\n def get_fields(self):\n result = dict()\n datetime_fields = ['race_begin', 'result_start_time']\n for field in Results_flat._meta.fields:\n value = field.value_to_string(self)\n if value.isdigit():\n value = int(value)\n if field.name in datetime_fields:\n value = convert_datetime_string(value)\n result[field.name] = value\n return json.dumps(result)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# A program to display and find the sum of a list of numbers using for loop
list=[10,20,30,40,50]
sum=0;
for i in list:
print(i)
sum=sum+i
print('sum =',sum)
|
normal
|
{
"blob_id": "88e34ee5cd5af7d3b04321c4aa4fc815f926add1",
"index": 7110,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in list:\n print(i)\n sum = sum + i\nprint('sum =', sum)\n",
"step-3": "list = [10, 20, 30, 40, 50]\nsum = 0\nfor i in list:\n print(i)\n sum = sum + i\nprint('sum =', sum)\n",
"step-4": "# A program to display and find the sum of a list of numbers using for loop\r\n\r\nlist=[10,20,30,40,50]\r\nsum=0;\r\n\r\nfor i in list:\r\n\tprint(i)\r\n\tsum=sum+i\r\nprint('sum =',sum)\t",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import testTemplate
def getTests():
tests = []
suite=testTemplate.testSuite("Sample Test Cases")
testcase = testTemplate.testInstance("3\n1 1 1\n1 1 1\n1 1 1" , "6" , "Sample #1")
suite.add(testcase)
testcase = testTemplate.testInstance("11\n1 0 0 1 0 0 0 0 0 1 1 \n1 1 1 1 1 0 1 0 1 0 0 \n1 0 0 1 0 0 1 1 0 1 0 \n1 0 1 1 1 0 1 1 0 1 1 \n0 1 1 1 0 1 0 0 1 1 1 \n1 1 1 0 0 1 0 0 0 0 0 \n0 0 0 0 1 0 1 0 0 0 1 \n1 0 1 1 0 0 0 0 0 0 1 \n0 0 1 0 1 1 0 0 0 1 1 \n1 1 1 0 0 0 1 0 1 0 1 \n1 0 0 0 1 1 1 1 0 0 0" , "7588" , "Sample #2")
suite.add(testcase)
testcase = testTemplate.testInstance("11\n0 1 1 1 0 1 0 0 0 1 0 \n0 0 1 1 1 1 1 1 1 1 1 \n1 1 0 1 0 0 0 0 0 1 0 \n0 1 0 1 0 1 0 1 0 1 1 \n1 0 0 1 0 0 0 0 1 0 1 \n0 0 1 0 1 1 0 0 0 0 1 \n1 0 1 0 1 1 1 0 1 1 0 \n1 0 1 1 0 1 1 0 0 1 0 \n0 0 1 1 0 1 1 1 1 1 1 \n0 1 0 0 0 0 0 0 0 1 1 \n0 1 1 0 0 0 0 0 1 0 1 " , "7426" , "Sample #3")
suite.add(testcase)
tests.append(suite)
return tests
|
normal
|
{
"blob_id": "de4c31ad474b7ce75631214aceafbe4d7334f14b",
"index": 6956,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef getTests():\n tests = []\n suite = testTemplate.testSuite('Sample Test Cases')\n testcase = testTemplate.testInstance('3\\n1 1 1\\n1 1 1\\n1 1 1', '6',\n 'Sample #1')\n suite.add(testcase)\n testcase = testTemplate.testInstance(\n \"\"\"11\n1 0 0 1 0 0 0 0 0 1 1 \n1 1 1 1 1 0 1 0 1 0 0 \n1 0 0 1 0 0 1 1 0 1 0 \n1 0 1 1 1 0 1 1 0 1 1 \n0 1 1 1 0 1 0 0 1 1 1 \n1 1 1 0 0 1 0 0 0 0 0 \n0 0 0 0 1 0 1 0 0 0 1 \n1 0 1 1 0 0 0 0 0 0 1 \n0 0 1 0 1 1 0 0 0 1 1 \n1 1 1 0 0 0 1 0 1 0 1 \n1 0 0 0 1 1 1 1 0 0 0\"\"\"\n , '7588', 'Sample #2')\n suite.add(testcase)\n testcase = testTemplate.testInstance(\n \"\"\"11\n0 1 1 1 0 1 0 0 0 1 0 \n0 0 1 1 1 1 1 1 1 1 1 \n1 1 0 1 0 0 0 0 0 1 0 \n0 1 0 1 0 1 0 1 0 1 1 \n1 0 0 1 0 0 0 0 1 0 1 \n0 0 1 0 1 1 0 0 0 0 1 \n1 0 1 0 1 1 1 0 1 1 0 \n1 0 1 1 0 1 1 0 0 1 0 \n0 0 1 1 0 1 1 1 1 1 1 \n0 1 0 0 0 0 0 0 0 1 1 \n0 1 1 0 0 0 0 0 1 0 1 \"\"\"\n , '7426', 'Sample #3')\n suite.add(testcase)\n tests.append(suite)\n return tests\n",
"step-3": "import testTemplate\n\n\ndef getTests():\n tests = []\n suite = testTemplate.testSuite('Sample Test Cases')\n testcase = testTemplate.testInstance('3\\n1 1 1\\n1 1 1\\n1 1 1', '6',\n 'Sample #1')\n suite.add(testcase)\n testcase = testTemplate.testInstance(\n \"\"\"11\n1 0 0 1 0 0 0 0 0 1 1 \n1 1 1 1 1 0 1 0 1 0 0 \n1 0 0 1 0 0 1 1 0 1 0 \n1 0 1 1 1 0 1 1 0 1 1 \n0 1 1 1 0 1 0 0 1 1 1 \n1 1 1 0 0 1 0 0 0 0 0 \n0 0 0 0 1 0 1 0 0 0 1 \n1 0 1 1 0 0 0 0 0 0 1 \n0 0 1 0 1 1 0 0 0 1 1 \n1 1 1 0 0 0 1 0 1 0 1 \n1 0 0 0 1 1 1 1 0 0 0\"\"\"\n , '7588', 'Sample #2')\n suite.add(testcase)\n testcase = testTemplate.testInstance(\n \"\"\"11\n0 1 1 1 0 1 0 0 0 1 0 \n0 0 1 1 1 1 1 1 1 1 1 \n1 1 0 1 0 0 0 0 0 1 0 \n0 1 0 1 0 1 0 1 0 1 1 \n1 0 0 1 0 0 0 0 1 0 1 \n0 0 1 0 1 1 0 0 0 0 1 \n1 0 1 0 1 1 1 0 1 1 0 \n1 0 1 1 0 1 1 0 0 1 0 \n0 0 1 1 0 1 1 1 1 1 1 \n0 1 0 0 0 0 0 0 0 1 1 \n0 1 1 0 0 0 0 0 1 0 1 \"\"\"\n , '7426', 'Sample #3')\n suite.add(testcase)\n tests.append(suite)\n return tests\n",
"step-4": "import testTemplate \ndef getTests():\n\ttests = []\n\t\n\tsuite=testTemplate.testSuite(\"Sample Test Cases\")\n\ttestcase = testTemplate.testInstance(\"3\\n1 1 1\\n1 1 1\\n1 1 1\" , \"6\" , \"Sample #1\")\n\tsuite.add(testcase)\n\ttestcase = testTemplate.testInstance(\"11\\n1 0 0 1 0 0 0 0 0 1 1 \\n1 1 1 1 1 0 1 0 1 0 0 \\n1 0 0 1 0 0 1 1 0 1 0 \\n1 0 1 1 1 0 1 1 0 1 1 \\n0 1 1 1 0 1 0 0 1 1 1 \\n1 1 1 0 0 1 0 0 0 0 0 \\n0 0 0 0 1 0 1 0 0 0 1 \\n1 0 1 1 0 0 0 0 0 0 1 \\n0 0 1 0 1 1 0 0 0 1 1 \\n1 1 1 0 0 0 1 0 1 0 1 \\n1 0 0 0 1 1 1 1 0 0 0\" , \"7588\" , \"Sample #2\")\n\tsuite.add(testcase)\n\ttestcase = testTemplate.testInstance(\"11\\n0 1 1 1 0 1 0 0 0 1 0 \\n0 0 1 1 1 1 1 1 1 1 1 \\n1 1 0 1 0 0 0 0 0 1 0 \\n0 1 0 1 0 1 0 1 0 1 1 \\n1 0 0 1 0 0 0 0 1 0 1 \\n0 0 1 0 1 1 0 0 0 0 1 \\n1 0 1 0 1 1 1 0 1 1 0 \\n1 0 1 1 0 1 1 0 0 1 0 \\n0 0 1 1 0 1 1 1 1 1 1 \\n0 1 0 0 0 0 0 0 0 1 1 \\n0 1 1 0 0 0 0 0 1 0 1 \" , \"7426\" , \"Sample #3\")\n\tsuite.add(testcase)\n\ttests.append(suite)\n\t\n\treturn tests\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class MaterialSiteEditor(QObject):
<|reserved_special_token_0|>
def __init__(self, site, parent=None):
super().__init__(parent)
loader = UiLoader()
self.ui = loader.load_file('material_site_editor.ui', parent)
self._site = site
self.charge_comboboxes = []
self.occupancy_spinboxes = []
self.thermal_factor_spinboxes = []
self.update_gui()
self.setup_connections()
def setup_connections(self):
self.ui.select_atom_types.pressed.connect(self.select_atom_types)
self.ui.thermal_factor_type.currentIndexChanged.connect(self.
thermal_factor_type_changed)
for w in self.site_settings_widgets:
w.valueChanged.connect(self.update_config)
self.ui.table.selectionModel().selectionChanged.connect(self.
selection_changed)
self.ui.remove_atom_type.pressed.connect(self.remove_selected_atom)
self.ui.convert_u_to_tensors.toggled.connect(self.convert_u_to_tensors)
def select_atom_types(self):
dialog = PeriodicTableDialog(self.atom_types, self.ui)
if not dialog.exec_():
return
self.atom_types = dialog.selected_atoms
@property
def site(self):
return self._site
@site.setter
def site(self, v):
self._site = v
self.update_gui()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@property
def fractional_coords(self):
return self.site['fractional_coords']
@property
def thermal_factor_type(self):
return self.ui.thermal_factor_type.currentText()
def U(self, val):
type = self.thermal_factor_type
if type == 'U':
multiplier = 1
elif type == 'B':
multiplier = B_TO_U
else:
raise Exception(f'Unknown type: {type}')
return val * multiplier
<|reserved_special_token_0|>
def thermal_factor(self, atom):
type = self.thermal_factor_type
if type == 'U':
multiplier = 1
elif type == 'B':
multiplier = U_TO_B
else:
raise Exception(f'Unknown type: {type}')
return atom['U'] * multiplier
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@property
def num_rows(self):
return self.ui.table.rowCount()
@property
def selected_row(self):
selected = self.ui.table.selectionModel().selectedRows()
return selected[0].row() if selected else None
def select_row(self, i):
if i is None or i >= self.num_rows:
return
selection_model = self.ui.table.selectionModel()
selection_model.clearSelection()
model_index = selection_model.model().index(i, 0)
command = QItemSelectionModel.Select | QItemSelectionModel.Rows
selection_model.select(model_index, command)
def selection_changed(self):
self.update_enable_states()
def update_enable_states(self):
enable_remove = self.num_rows > 1 and self.selected_row is not None
self.ui.remove_atom_type.setEnabled(enable_remove)
<|reserved_special_token_0|>
def create_symbol_label(self, v):
w = QTableWidgetItem(v)
return w
def create_charge_combobox(self, charge, symbol):
cb = QComboBox(self.ui.table)
if charge not in chargestate[symbol]:
raise Exception(f'Invalid charge {charge} for {symbol}')
cb.addItems(chargestate[symbol])
cb.setCurrentText(charge)
cb.currentIndexChanged.connect(self.update_config)
size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
cb.setSizePolicy(size_policy)
self.charge_comboboxes.append(cb)
return cb
def create_occupancy_spinbox(self, v):
sb = ScientificDoubleSpinBox(self.ui.table)
sb.setKeyboardTracking(False)
sb.setMinimum(OCCUPATION_MIN)
sb.setMaximum(OCCUPATION_MAX)
sb.setValue(v)
sb.valueChanged.connect(self.update_config)
size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sb.setSizePolicy(size_policy)
self.occupancy_spinboxes.append(sb)
return sb
def create_thermal_factor_spinbox(self, v):
sb = ThermalFactorSpinBox(self.ui.table)
sb.setKeyboardTracking(False)
sb.setMinimum(THERMAL_FACTOR_MIN)
sb.setMaximum(THERMAL_FACTOR_MAX)
sb.setValue(v)
sb.valueChanged.connect(self.update_config)
sb.setToolTip('Double-click to open tensor editor')
size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sb.setSizePolicy(size_policy)
self.thermal_factor_spinboxes.append(sb)
return sb
<|reserved_special_token_0|>
def update_gui(self):
with block_signals(*self.site_settings_widgets):
for i, w in enumerate(self.fractional_coords_widgets):
w.setValue(self.fractional_coords[i])
self.update_total_occupancy()
self.update_table()
self.reset_scalar_tensor_toggle()
def reset_scalar_tensor_toggle(self):
any_scalars = any(not isinstance(w.value(), np.ndarray) for w in
self.thermal_factor_spinboxes)
with block_signals(self.ui.convert_u_to_tensors):
self.ui.convert_u_to_tensors.setChecked(not any_scalars)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def update_config(self):
for i, w in enumerate(self.fractional_coords_widgets):
self.fractional_coords[i] = w.value()
for atom, combobox in zip(self.atoms, self.charge_comboboxes):
atom['charge'] = combobox.currentText()
for atom, spinbox in zip(self.atoms, self.occupancy_spinboxes):
atom['occupancy'] = spinbox.value()
for atom, spinbox in zip(self.atoms, self.thermal_factor_spinboxes):
atom['U'] = self.U(spinbox.value())
self.update_total_occupancy()
self.update_occupancy_validity()
self.emit_site_modified_if_valid()
<|reserved_special_token_0|>
def reset_occupancies(self):
total = 1.0
atoms = self.atoms
num_atoms = len(atoms)
for atom in atoms:
atom['occupancy'] = total / num_atoms
self.update_total_occupancy()
self.update_occupancy_validity()
@property
def site_valid(self):
return self.occupancies_valid
<|reserved_special_token_0|>
def update_occupancy_validity(self):
valid = self.occupancies_valid
color = 'white' if valid else 'red'
msg = '' if valid else 'Sum of occupancies must be <= 1'
self.ui.total_occupancy.setStyleSheet(f'background-color: {color}')
self.ui.total_occupancy.setToolTip(msg)
<|reserved_special_token_0|>
@property
def fractional_coords_widgets(self):
return [self.ui.coords_x, self.ui.coords_y, self.ui.coords_z]
@property
def site_settings_widgets(self):
return self.fractional_coords_widgets
<|reserved_special_token_0|>
class ThermalFactorSpinBox(ScientificDoubleSpinBox):
def __init__(self, parent=None):
super().__init__(parent)
self.editor = ThermalFactorEditor(0, parent)
self.setLineEdit(ThermalFactorLineEdit(self, self))
self.valueChanged.connect(self.update_editor_value)
def value(self):
return self.editor.value
def setValue(self, v):
self.editor.value = v
if self.editor.is_tensor:
super().setValue(super().value())
self.valueChanged.emit(super().value())
self.setReadOnly(True)
else:
super().setValue(v)
self.valueChanged.emit(v)
self.setReadOnly(False)
def update_editor_value(self):
if not self.editor.is_tensor:
self.editor.value = super().value()
def textFromValue(self, value):
if not hasattr(self, 'editor') or not self.editor.is_tensor:
return super().textFromValue(value)
return 'Tensor'
def open_editor(self):
original = copy.deepcopy(self.editor.value)
if not self.editor.exec_():
self.editor.value = original
return
self.setValue(self.editor.value)
class ThermalFactorLineEdit(QLineEdit):
def __init__(self, spinbox, parent=None):
super().__init__(parent)
self.spinbox = spinbox
def mousePressEvent(self, event):
if self.isReadOnly():
self.open_editor()
return
super().mousePressEvent(event)
def mouseDoubleClickEvent(self, event):
self.open_editor()
def open_editor(self):
self.spinbox.open_editor()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MaterialSiteEditor(QObject):
<|reserved_special_token_0|>
def __init__(self, site, parent=None):
super().__init__(parent)
loader = UiLoader()
self.ui = loader.load_file('material_site_editor.ui', parent)
self._site = site
self.charge_comboboxes = []
self.occupancy_spinboxes = []
self.thermal_factor_spinboxes = []
self.update_gui()
self.setup_connections()
def setup_connections(self):
self.ui.select_atom_types.pressed.connect(self.select_atom_types)
self.ui.thermal_factor_type.currentIndexChanged.connect(self.
thermal_factor_type_changed)
for w in self.site_settings_widgets:
w.valueChanged.connect(self.update_config)
self.ui.table.selectionModel().selectionChanged.connect(self.
selection_changed)
self.ui.remove_atom_type.pressed.connect(self.remove_selected_atom)
self.ui.convert_u_to_tensors.toggled.connect(self.convert_u_to_tensors)
def select_atom_types(self):
dialog = PeriodicTableDialog(self.atom_types, self.ui)
if not dialog.exec_():
return
self.atom_types = dialog.selected_atoms
@property
def site(self):
return self._site
@site.setter
def site(self, v):
self._site = v
self.update_gui()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@property
def fractional_coords(self):
return self.site['fractional_coords']
@property
def thermal_factor_type(self):
return self.ui.thermal_factor_type.currentText()
def U(self, val):
type = self.thermal_factor_type
if type == 'U':
multiplier = 1
elif type == 'B':
multiplier = B_TO_U
else:
raise Exception(f'Unknown type: {type}')
return val * multiplier
<|reserved_special_token_0|>
def thermal_factor(self, atom):
type = self.thermal_factor_type
if type == 'U':
multiplier = 1
elif type == 'B':
multiplier = U_TO_B
else:
raise Exception(f'Unknown type: {type}')
return atom['U'] * multiplier
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@property
def num_rows(self):
return self.ui.table.rowCount()
@property
def selected_row(self):
selected = self.ui.table.selectionModel().selectedRows()
return selected[0].row() if selected else None
def select_row(self, i):
if i is None or i >= self.num_rows:
return
selection_model = self.ui.table.selectionModel()
selection_model.clearSelection()
model_index = selection_model.model().index(i, 0)
command = QItemSelectionModel.Select | QItemSelectionModel.Rows
selection_model.select(model_index, command)
def selection_changed(self):
self.update_enable_states()
def update_enable_states(self):
enable_remove = self.num_rows > 1 and self.selected_row is not None
self.ui.remove_atom_type.setEnabled(enable_remove)
<|reserved_special_token_0|>
def create_symbol_label(self, v):
w = QTableWidgetItem(v)
return w
def create_charge_combobox(self, charge, symbol):
cb = QComboBox(self.ui.table)
if charge not in chargestate[symbol]:
raise Exception(f'Invalid charge {charge} for {symbol}')
cb.addItems(chargestate[symbol])
cb.setCurrentText(charge)
cb.currentIndexChanged.connect(self.update_config)
size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
cb.setSizePolicy(size_policy)
self.charge_comboboxes.append(cb)
return cb
def create_occupancy_spinbox(self, v):
sb = ScientificDoubleSpinBox(self.ui.table)
sb.setKeyboardTracking(False)
sb.setMinimum(OCCUPATION_MIN)
sb.setMaximum(OCCUPATION_MAX)
sb.setValue(v)
sb.valueChanged.connect(self.update_config)
size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sb.setSizePolicy(size_policy)
self.occupancy_spinboxes.append(sb)
return sb
def create_thermal_factor_spinbox(self, v):
sb = ThermalFactorSpinBox(self.ui.table)
sb.setKeyboardTracking(False)
sb.setMinimum(THERMAL_FACTOR_MIN)
sb.setMaximum(THERMAL_FACTOR_MAX)
sb.setValue(v)
sb.valueChanged.connect(self.update_config)
sb.setToolTip('Double-click to open tensor editor')
size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sb.setSizePolicy(size_policy)
self.thermal_factor_spinboxes.append(sb)
return sb
<|reserved_special_token_0|>
def update_gui(self):
with block_signals(*self.site_settings_widgets):
for i, w in enumerate(self.fractional_coords_widgets):
w.setValue(self.fractional_coords[i])
self.update_total_occupancy()
self.update_table()
self.reset_scalar_tensor_toggle()
def reset_scalar_tensor_toggle(self):
any_scalars = any(not isinstance(w.value(), np.ndarray) for w in
self.thermal_factor_spinboxes)
with block_signals(self.ui.convert_u_to_tensors):
self.ui.convert_u_to_tensors.setChecked(not any_scalars)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def update_thermal_factor_header(self):
w = self.ui.table.horizontalHeaderItem(COLUMNS['thermal_factor'])
w.setText(self.thermal_factor_type)
def update_config(self):
for i, w in enumerate(self.fractional_coords_widgets):
self.fractional_coords[i] = w.value()
for atom, combobox in zip(self.atoms, self.charge_comboboxes):
atom['charge'] = combobox.currentText()
for atom, spinbox in zip(self.atoms, self.occupancy_spinboxes):
atom['occupancy'] = spinbox.value()
for atom, spinbox in zip(self.atoms, self.thermal_factor_spinboxes):
atom['U'] = self.U(spinbox.value())
self.update_total_occupancy()
self.update_occupancy_validity()
self.emit_site_modified_if_valid()
<|reserved_special_token_0|>
def reset_occupancies(self):
total = 1.0
atoms = self.atoms
num_atoms = len(atoms)
for atom in atoms:
atom['occupancy'] = total / num_atoms
self.update_total_occupancy()
self.update_occupancy_validity()
@property
def site_valid(self):
return self.occupancies_valid
<|reserved_special_token_0|>
def update_occupancy_validity(self):
valid = self.occupancies_valid
color = 'white' if valid else 'red'
msg = '' if valid else 'Sum of occupancies must be <= 1'
self.ui.total_occupancy.setStyleSheet(f'background-color: {color}')
self.ui.total_occupancy.setToolTip(msg)
<|reserved_special_token_0|>
@property
def fractional_coords_widgets(self):
return [self.ui.coords_x, self.ui.coords_y, self.ui.coords_z]
@property
def site_settings_widgets(self):
return self.fractional_coords_widgets
<|reserved_special_token_0|>
class ThermalFactorSpinBox(ScientificDoubleSpinBox):
def __init__(self, parent=None):
super().__init__(parent)
self.editor = ThermalFactorEditor(0, parent)
self.setLineEdit(ThermalFactorLineEdit(self, self))
self.valueChanged.connect(self.update_editor_value)
def value(self):
return self.editor.value
def setValue(self, v):
self.editor.value = v
if self.editor.is_tensor:
super().setValue(super().value())
self.valueChanged.emit(super().value())
self.setReadOnly(True)
else:
super().setValue(v)
self.valueChanged.emit(v)
self.setReadOnly(False)
def update_editor_value(self):
if not self.editor.is_tensor:
self.editor.value = super().value()
def textFromValue(self, value):
if not hasattr(self, 'editor') or not self.editor.is_tensor:
return super().textFromValue(value)
return 'Tensor'
def open_editor(self):
original = copy.deepcopy(self.editor.value)
if not self.editor.exec_():
self.editor.value = original
return
self.setValue(self.editor.value)
class ThermalFactorLineEdit(QLineEdit):
def __init__(self, spinbox, parent=None):
super().__init__(parent)
self.spinbox = spinbox
def mousePressEvent(self, event):
if self.isReadOnly():
self.open_editor()
return
super().mousePressEvent(event)
def mouseDoubleClickEvent(self, event):
self.open_editor()
def open_editor(self):
self.spinbox.open_editor()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MaterialSiteEditor(QObject):
<|reserved_special_token_0|>
def __init__(self, site, parent=None):
super().__init__(parent)
loader = UiLoader()
self.ui = loader.load_file('material_site_editor.ui', parent)
self._site = site
self.charge_comboboxes = []
self.occupancy_spinboxes = []
self.thermal_factor_spinboxes = []
self.update_gui()
self.setup_connections()
def setup_connections(self):
self.ui.select_atom_types.pressed.connect(self.select_atom_types)
self.ui.thermal_factor_type.currentIndexChanged.connect(self.
thermal_factor_type_changed)
for w in self.site_settings_widgets:
w.valueChanged.connect(self.update_config)
self.ui.table.selectionModel().selectionChanged.connect(self.
selection_changed)
self.ui.remove_atom_type.pressed.connect(self.remove_selected_atom)
self.ui.convert_u_to_tensors.toggled.connect(self.convert_u_to_tensors)
def select_atom_types(self):
dialog = PeriodicTableDialog(self.atom_types, self.ui)
if not dialog.exec_():
return
self.atom_types = dialog.selected_atoms
@property
def site(self):
return self._site
@site.setter
def site(self, v):
self._site = v
self.update_gui()
@property
def atoms(self):
return self.site['atoms']
<|reserved_special_token_0|>
@property
def fractional_coords(self):
return self.site['fractional_coords']
@property
def thermal_factor_type(self):
return self.ui.thermal_factor_type.currentText()
def U(self, val):
type = self.thermal_factor_type
if type == 'U':
multiplier = 1
elif type == 'B':
multiplier = B_TO_U
else:
raise Exception(f'Unknown type: {type}')
return val * multiplier
def B(self, val):
type = self.thermal_factor_type
if type == 'U':
multiplier = U_TO_B
elif type == 'B':
multiplier = 1
else:
raise Exception(f'Unknown type: {type}')
return val * multiplier
def thermal_factor(self, atom):
type = self.thermal_factor_type
if type == 'U':
multiplier = 1
elif type == 'B':
multiplier = U_TO_B
else:
raise Exception(f'Unknown type: {type}')
return atom['U'] * multiplier
@property
def atom_types(self):
return [x['symbol'] for x in self.site['atoms']]
@atom_types.setter
def atom_types(self, v):
if v == self.atom_types:
return
atoms = self.atoms
previous_u_values = {x['symbol']: x['U'] for x in atoms}
previous_charges = {x['symbol']: x['charge'] for x in atoms}
atoms.clear()
for symbol in v:
atom = {'symbol': symbol, 'U': previous_u_values.get(symbol,
DEFAULT_U), 'charge': previous_charges.get(symbol,
DEFAULT_CHARGE)}
atoms.append(atom)
self.reset_occupancies()
self.update_table()
self.emit_site_modified_if_valid()
@property
def num_rows(self):
return self.ui.table.rowCount()
@property
def selected_row(self):
selected = self.ui.table.selectionModel().selectedRows()
return selected[0].row() if selected else None
def select_row(self, i):
if i is None or i >= self.num_rows:
return
selection_model = self.ui.table.selectionModel()
selection_model.clearSelection()
model_index = selection_model.model().index(i, 0)
command = QItemSelectionModel.Select | QItemSelectionModel.Rows
selection_model.select(model_index, command)
def selection_changed(self):
self.update_enable_states()
def update_enable_states(self):
enable_remove = self.num_rows > 1 and self.selected_row is not None
self.ui.remove_atom_type.setEnabled(enable_remove)
<|reserved_special_token_0|>
def create_symbol_label(self, v):
w = QTableWidgetItem(v)
return w
def create_charge_combobox(self, charge, symbol):
cb = QComboBox(self.ui.table)
if charge not in chargestate[symbol]:
raise Exception(f'Invalid charge {charge} for {symbol}')
cb.addItems(chargestate[symbol])
cb.setCurrentText(charge)
cb.currentIndexChanged.connect(self.update_config)
size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
cb.setSizePolicy(size_policy)
self.charge_comboboxes.append(cb)
return cb
def create_occupancy_spinbox(self, v):
sb = ScientificDoubleSpinBox(self.ui.table)
sb.setKeyboardTracking(False)
sb.setMinimum(OCCUPATION_MIN)
sb.setMaximum(OCCUPATION_MAX)
sb.setValue(v)
sb.valueChanged.connect(self.update_config)
size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sb.setSizePolicy(size_policy)
self.occupancy_spinboxes.append(sb)
return sb
def create_thermal_factor_spinbox(self, v):
sb = ThermalFactorSpinBox(self.ui.table)
sb.setKeyboardTracking(False)
sb.setMinimum(THERMAL_FACTOR_MIN)
sb.setMaximum(THERMAL_FACTOR_MAX)
sb.setValue(v)
sb.valueChanged.connect(self.update_config)
sb.setToolTip('Double-click to open tensor editor')
size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sb.setSizePolicy(size_policy)
self.thermal_factor_spinboxes.append(sb)
return sb
<|reserved_special_token_0|>
def update_gui(self):
with block_signals(*self.site_settings_widgets):
for i, w in enumerate(self.fractional_coords_widgets):
w.setValue(self.fractional_coords[i])
self.update_total_occupancy()
self.update_table()
self.reset_scalar_tensor_toggle()
def reset_scalar_tensor_toggle(self):
any_scalars = any(not isinstance(w.value(), np.ndarray) for w in
self.thermal_factor_spinboxes)
with block_signals(self.ui.convert_u_to_tensors):
self.ui.convert_u_to_tensors.setChecked(not any_scalars)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def update_thermal_factor_header(self):
w = self.ui.table.horizontalHeaderItem(COLUMNS['thermal_factor'])
w.setText(self.thermal_factor_type)
def update_config(self):
for i, w in enumerate(self.fractional_coords_widgets):
self.fractional_coords[i] = w.value()
for atom, combobox in zip(self.atoms, self.charge_comboboxes):
atom['charge'] = combobox.currentText()
for atom, spinbox in zip(self.atoms, self.occupancy_spinboxes):
atom['occupancy'] = spinbox.value()
for atom, spinbox in zip(self.atoms, self.thermal_factor_spinboxes):
atom['U'] = self.U(spinbox.value())
self.update_total_occupancy()
self.update_occupancy_validity()
self.emit_site_modified_if_valid()
def update_total_occupancy(self):
self.ui.total_occupancy.setValue(self.total_occupancy)
def reset_occupancies(self):
total = 1.0
atoms = self.atoms
num_atoms = len(atoms)
for atom in atoms:
atom['occupancy'] = total / num_atoms
self.update_total_occupancy()
self.update_occupancy_validity()
@property
def site_valid(self):
return self.occupancies_valid
@property
def occupancies_valid(self):
return self.total_occupancy <= 1.0
def update_occupancy_validity(self):
valid = self.occupancies_valid
color = 'white' if valid else 'red'
msg = '' if valid else 'Sum of occupancies must be <= 1'
self.ui.total_occupancy.setStyleSheet(f'background-color: {color}')
self.ui.total_occupancy.setToolTip(msg)
<|reserved_special_token_0|>
@property
def fractional_coords_widgets(self):
return [self.ui.coords_x, self.ui.coords_y, self.ui.coords_z]
@property
def site_settings_widgets(self):
return self.fractional_coords_widgets
def convert_u_to_tensors(self, b):
def scalar_to_tensor(spinbox):
if isinstance(spinbox.value(), np.ndarray):
return
tensor = np.zeros(6, dtype=np.float64)
tensor[:3] = spinbox.value()
spinbox.setValue(tensor)
def tensor_to_scalar(spinbox):
value = spinbox.value()
if not isinstance(value, np.ndarray):
return
scalar = spinbox.editor.ui.scalar_value.value()
if np.isclose(scalar, 0) and np.allclose(value[:3], value[0]
) and np.allclose(value[3:], 0):
scalar = value[0]
spinbox.setValue(scalar)
f = scalar_to_tensor if b else tensor_to_scalar
for spinbox in self.thermal_factor_spinboxes:
f(spinbox)
class ThermalFactorSpinBox(ScientificDoubleSpinBox):
def __init__(self, parent=None):
super().__init__(parent)
self.editor = ThermalFactorEditor(0, parent)
self.setLineEdit(ThermalFactorLineEdit(self, self))
self.valueChanged.connect(self.update_editor_value)
def value(self):
return self.editor.value
def setValue(self, v):
self.editor.value = v
if self.editor.is_tensor:
super().setValue(super().value())
self.valueChanged.emit(super().value())
self.setReadOnly(True)
else:
super().setValue(v)
self.valueChanged.emit(v)
self.setReadOnly(False)
def update_editor_value(self):
if not self.editor.is_tensor:
self.editor.value = super().value()
def textFromValue(self, value):
if not hasattr(self, 'editor') or not self.editor.is_tensor:
return super().textFromValue(value)
return 'Tensor'
def open_editor(self):
original = copy.deepcopy(self.editor.value)
if not self.editor.exec_():
self.editor.value = original
return
self.setValue(self.editor.value)
class ThermalFactorLineEdit(QLineEdit):
def __init__(self, spinbox, parent=None):
super().__init__(parent)
self.spinbox = spinbox
def mousePressEvent(self, event):
if self.isReadOnly():
self.open_editor()
return
super().mousePressEvent(event)
def mouseDoubleClickEvent(self, event):
self.open_editor()
def open_editor(self):
self.spinbox.open_editor()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MaterialSiteEditor(QObject):
site_modified = Signal()
def __init__(self, site, parent=None):
super().__init__(parent)
loader = UiLoader()
self.ui = loader.load_file('material_site_editor.ui', parent)
self._site = site
self.charge_comboboxes = []
self.occupancy_spinboxes = []
self.thermal_factor_spinboxes = []
self.update_gui()
self.setup_connections()
def setup_connections(self):
self.ui.select_atom_types.pressed.connect(self.select_atom_types)
self.ui.thermal_factor_type.currentIndexChanged.connect(self.
thermal_factor_type_changed)
for w in self.site_settings_widgets:
w.valueChanged.connect(self.update_config)
self.ui.table.selectionModel().selectionChanged.connect(self.
selection_changed)
self.ui.remove_atom_type.pressed.connect(self.remove_selected_atom)
self.ui.convert_u_to_tensors.toggled.connect(self.convert_u_to_tensors)
def select_atom_types(self):
dialog = PeriodicTableDialog(self.atom_types, self.ui)
if not dialog.exec_():
return
self.atom_types = dialog.selected_atoms
@property
def site(self):
return self._site
@site.setter
def site(self, v):
self._site = v
self.update_gui()
@property
def atoms(self):
return self.site['atoms']
@property
def total_occupancy(self):
return sum(x['occupancy'] for x in self.atoms)
@property
def fractional_coords(self):
return self.site['fractional_coords']
@property
def thermal_factor_type(self):
return self.ui.thermal_factor_type.currentText()
def U(self, val):
type = self.thermal_factor_type
if type == 'U':
multiplier = 1
elif type == 'B':
multiplier = B_TO_U
else:
raise Exception(f'Unknown type: {type}')
return val * multiplier
def B(self, val):
type = self.thermal_factor_type
if type == 'U':
multiplier = U_TO_B
elif type == 'B':
multiplier = 1
else:
raise Exception(f'Unknown type: {type}')
return val * multiplier
def thermal_factor(self, atom):
type = self.thermal_factor_type
if type == 'U':
multiplier = 1
elif type == 'B':
multiplier = U_TO_B
else:
raise Exception(f'Unknown type: {type}')
return atom['U'] * multiplier
@property
def atom_types(self):
return [x['symbol'] for x in self.site['atoms']]
@atom_types.setter
def atom_types(self, v):
if v == self.atom_types:
return
atoms = self.atoms
previous_u_values = {x['symbol']: x['U'] for x in atoms}
previous_charges = {x['symbol']: x['charge'] for x in atoms}
atoms.clear()
for symbol in v:
atom = {'symbol': symbol, 'U': previous_u_values.get(symbol,
DEFAULT_U), 'charge': previous_charges.get(symbol,
DEFAULT_CHARGE)}
atoms.append(atom)
self.reset_occupancies()
self.update_table()
self.emit_site_modified_if_valid()
@property
def num_rows(self):
return self.ui.table.rowCount()
@property
def selected_row(self):
selected = self.ui.table.selectionModel().selectedRows()
return selected[0].row() if selected else None
def select_row(self, i):
if i is None or i >= self.num_rows:
return
selection_model = self.ui.table.selectionModel()
selection_model.clearSelection()
model_index = selection_model.model().index(i, 0)
command = QItemSelectionModel.Select | QItemSelectionModel.Rows
selection_model.select(model_index, command)
def selection_changed(self):
self.update_enable_states()
def update_enable_states(self):
enable_remove = self.num_rows > 1 and self.selected_row is not None
self.ui.remove_atom_type.setEnabled(enable_remove)
def remove_selected_atom(self):
if self.selected_row is None:
return
atom_types = self.atom_types
del atom_types[self.selected_row]
self.atom_types = atom_types
def create_symbol_label(self, v):
w = QTableWidgetItem(v)
return w
def create_charge_combobox(self, charge, symbol):
cb = QComboBox(self.ui.table)
if charge not in chargestate[symbol]:
raise Exception(f'Invalid charge {charge} for {symbol}')
cb.addItems(chargestate[symbol])
cb.setCurrentText(charge)
cb.currentIndexChanged.connect(self.update_config)
size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
cb.setSizePolicy(size_policy)
self.charge_comboboxes.append(cb)
return cb
def create_occupancy_spinbox(self, v):
sb = ScientificDoubleSpinBox(self.ui.table)
sb.setKeyboardTracking(False)
sb.setMinimum(OCCUPATION_MIN)
sb.setMaximum(OCCUPATION_MAX)
sb.setValue(v)
sb.valueChanged.connect(self.update_config)
size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sb.setSizePolicy(size_policy)
self.occupancy_spinboxes.append(sb)
return sb
def create_thermal_factor_spinbox(self, v):
sb = ThermalFactorSpinBox(self.ui.table)
sb.setKeyboardTracking(False)
sb.setMinimum(THERMAL_FACTOR_MIN)
sb.setMaximum(THERMAL_FACTOR_MAX)
sb.setValue(v)
sb.valueChanged.connect(self.update_config)
sb.setToolTip('Double-click to open tensor editor')
size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sb.setSizePolicy(size_policy)
self.thermal_factor_spinboxes.append(sb)
return sb
def clear_table(self):
self.charge_comboboxes.clear()
self.occupancy_spinboxes.clear()
self.thermal_factor_spinboxes.clear()
self.ui.table.clearContents()
def update_gui(self):
with block_signals(*self.site_settings_widgets):
for i, w in enumerate(self.fractional_coords_widgets):
w.setValue(self.fractional_coords[i])
self.update_total_occupancy()
self.update_table()
self.reset_scalar_tensor_toggle()
def reset_scalar_tensor_toggle(self):
any_scalars = any(not isinstance(w.value(), np.ndarray) for w in
self.thermal_factor_spinboxes)
with block_signals(self.ui.convert_u_to_tensors):
self.ui.convert_u_to_tensors.setChecked(not any_scalars)
def update_table(self):
prev_selected = self.selected_row
block_list = [self.ui.table, self.ui.table.selectionModel()]
with block_signals(*block_list):
atoms = self.site['atoms']
self.clear_table()
self.ui.table.setRowCount(len(atoms))
for i, atom in enumerate(atoms):
w = self.create_symbol_label(atom['symbol'])
self.ui.table.setItem(i, COLUMNS['symbol'], w)
w = self.create_charge_combobox(atom['charge'], atom['symbol'])
self.ui.table.setCellWidget(i, COLUMNS['charge'], w)
w = self.create_occupancy_spinbox(atom['occupancy'])
self.ui.table.setCellWidget(i, COLUMNS['occupancy'], w)
v = self.thermal_factor(atom)
w = self.create_thermal_factor_spinbox(v)
self.ui.table.setCellWidget(i, COLUMNS['thermal_factor'], w)
self.update_occupancy_validity()
if prev_selected is not None:
select_row = (prev_selected if prev_selected < self.
num_rows else self.num_rows - 1)
self.select_row(select_row)
self.selection_changed()
def thermal_factor_type_changed(self):
self.update_thermal_factor_header()
self.update_table()
text = f'Convert {self.thermal_factor_type} to tensors'
self.ui.convert_u_to_tensors.setText(text)
def update_thermal_factor_header(self):
w = self.ui.table.horizontalHeaderItem(COLUMNS['thermal_factor'])
w.setText(self.thermal_factor_type)
def update_config(self):
for i, w in enumerate(self.fractional_coords_widgets):
self.fractional_coords[i] = w.value()
for atom, combobox in zip(self.atoms, self.charge_comboboxes):
atom['charge'] = combobox.currentText()
for atom, spinbox in zip(self.atoms, self.occupancy_spinboxes):
atom['occupancy'] = spinbox.value()
for atom, spinbox in zip(self.atoms, self.thermal_factor_spinboxes):
atom['U'] = self.U(spinbox.value())
self.update_total_occupancy()
self.update_occupancy_validity()
self.emit_site_modified_if_valid()
def update_total_occupancy(self):
self.ui.total_occupancy.setValue(self.total_occupancy)
def reset_occupancies(self):
total = 1.0
atoms = self.atoms
num_atoms = len(atoms)
for atom in atoms:
atom['occupancy'] = total / num_atoms
self.update_total_occupancy()
self.update_occupancy_validity()
@property
def site_valid(self):
return self.occupancies_valid
@property
def occupancies_valid(self):
return self.total_occupancy <= 1.0
def update_occupancy_validity(self):
valid = self.occupancies_valid
color = 'white' if valid else 'red'
msg = '' if valid else 'Sum of occupancies must be <= 1'
self.ui.total_occupancy.setStyleSheet(f'background-color: {color}')
self.ui.total_occupancy.setToolTip(msg)
def emit_site_modified_if_valid(self):
if not self.site_valid:
return
self.site_modified.emit()
@property
def fractional_coords_widgets(self):
return [self.ui.coords_x, self.ui.coords_y, self.ui.coords_z]
@property
def site_settings_widgets(self):
return self.fractional_coords_widgets
def convert_u_to_tensors(self, b):
def scalar_to_tensor(spinbox):
if isinstance(spinbox.value(), np.ndarray):
return
tensor = np.zeros(6, dtype=np.float64)
tensor[:3] = spinbox.value()
spinbox.setValue(tensor)
def tensor_to_scalar(spinbox):
value = spinbox.value()
if not isinstance(value, np.ndarray):
return
scalar = spinbox.editor.ui.scalar_value.value()
if np.isclose(scalar, 0) and np.allclose(value[:3], value[0]
) and np.allclose(value[3:], 0):
scalar = value[0]
spinbox.setValue(scalar)
f = scalar_to_tensor if b else tensor_to_scalar
for spinbox in self.thermal_factor_spinboxes:
f(spinbox)
class ThermalFactorSpinBox(ScientificDoubleSpinBox):
def __init__(self, parent=None):
super().__init__(parent)
self.editor = ThermalFactorEditor(0, parent)
self.setLineEdit(ThermalFactorLineEdit(self, self))
self.valueChanged.connect(self.update_editor_value)
def value(self):
return self.editor.value
def setValue(self, v):
self.editor.value = v
if self.editor.is_tensor:
super().setValue(super().value())
self.valueChanged.emit(super().value())
self.setReadOnly(True)
else:
super().setValue(v)
self.valueChanged.emit(v)
self.setReadOnly(False)
def update_editor_value(self):
if not self.editor.is_tensor:
self.editor.value = super().value()
def textFromValue(self, value):
if not hasattr(self, 'editor') or not self.editor.is_tensor:
return super().textFromValue(value)
return 'Tensor'
def open_editor(self):
original = copy.deepcopy(self.editor.value)
if not self.editor.exec_():
self.editor.value = original
return
self.setValue(self.editor.value)
class ThermalFactorLineEdit(QLineEdit):
def __init__(self, spinbox, parent=None):
super().__init__(parent)
self.spinbox = spinbox
def mousePressEvent(self, event):
if self.isReadOnly():
self.open_editor()
return
super().mousePressEvent(event)
def mouseDoubleClickEvent(self, event):
self.open_editor()
def open_editor(self):
self.spinbox.open_editor()
<|reserved_special_token_1|>
import copy
import numpy as np
from PySide2.QtCore import QItemSelectionModel, QObject, Signal
from PySide2.QtWidgets import (
QComboBox, QLineEdit, QSizePolicy, QTableWidgetItem
)
from hexrd.constants import chargestate
from hexrd.material import Material
from hexrd.ui.periodic_table_dialog import PeriodicTableDialog
from hexrd.ui.scientificspinbox import ScientificDoubleSpinBox
from hexrd.ui.thermal_factor_editor import ThermalFactorEditor
from hexrd.ui.ui_loader import UiLoader
from hexrd.ui.utils import block_signals
COLUMNS = {
'symbol': 0,
'charge': 1,
'occupancy': 2,
'thermal_factor': 3
}
DEFAULT_CHARGE = '0'
DEFAULT_U = Material.DFLT_U[0]
OCCUPATION_MIN = 0
OCCUPATION_MAX = 1
THERMAL_FACTOR_MIN = -1.e7
THERMAL_FACTOR_MAX = 1.e7
U_TO_B = 8 * np.pi ** 2
B_TO_U = 1 / U_TO_B
class MaterialSiteEditor(QObject):
site_modified = Signal()
def __init__(self, site, parent=None):
super().__init__(parent)
loader = UiLoader()
self.ui = loader.load_file('material_site_editor.ui', parent)
self._site = site
self.charge_comboboxes = []
self.occupancy_spinboxes = []
self.thermal_factor_spinboxes = []
self.update_gui()
self.setup_connections()
def setup_connections(self):
self.ui.select_atom_types.pressed.connect(self.select_atom_types)
self.ui.thermal_factor_type.currentIndexChanged.connect(
self.thermal_factor_type_changed)
for w in self.site_settings_widgets:
w.valueChanged.connect(self.update_config)
self.ui.table.selectionModel().selectionChanged.connect(
self.selection_changed)
self.ui.remove_atom_type.pressed.connect(self.remove_selected_atom)
self.ui.convert_u_to_tensors.toggled.connect(self.convert_u_to_tensors)
def select_atom_types(self):
dialog = PeriodicTableDialog(self.atom_types, self.ui)
if not dialog.exec_():
return
self.atom_types = dialog.selected_atoms
@property
def site(self):
return self._site
@site.setter
def site(self, v):
self._site = v
self.update_gui()
@property
def atoms(self):
return self.site['atoms']
@property
def total_occupancy(self):
return sum(x['occupancy'] for x in self.atoms)
@property
def fractional_coords(self):
return self.site['fractional_coords']
@property
def thermal_factor_type(self):
return self.ui.thermal_factor_type.currentText()
def U(self, val):
# Take a thermal factor from a spin box and convert it to U
type = self.thermal_factor_type
if type == 'U':
multiplier = 1
elif type == 'B':
multiplier = B_TO_U
else:
raise Exception(f'Unknown type: {type}')
return val * multiplier
def B(self, val):
# Take a thermal factor from a spin box and convert it to B
type = self.thermal_factor_type
if type == 'U':
multiplier = U_TO_B
elif type == 'B':
multiplier = 1
else:
raise Exception(f'Unknown type: {type}')
return val * multiplier
def thermal_factor(self, atom):
# Given an atom, return the thermal factor in either B or U
type = self.thermal_factor_type
if type == 'U':
multiplier = 1
elif type == 'B':
multiplier = U_TO_B
else:
raise Exception(f'Unknown type: {type}')
return atom['U'] * multiplier
@property
def atom_types(self):
return [x['symbol'] for x in self.site['atoms']]
@atom_types.setter
def atom_types(self, v):
if v == self.atom_types:
# No changes needed...
return
# Reset all the occupancies
atoms = self.atoms
previous_u_values = {x['symbol']: x['U'] for x in atoms}
previous_charges = {x['symbol']: x['charge'] for x in atoms}
atoms.clear()
for symbol in v:
# Use previous values if available. Otherwise, use the defaults.
atom = {
'symbol': symbol,
'U': previous_u_values.get(symbol, DEFAULT_U),
'charge': previous_charges.get(symbol, DEFAULT_CHARGE),
}
atoms.append(atom)
self.reset_occupancies()
self.update_table()
self.emit_site_modified_if_valid()
@property
def num_rows(self):
return self.ui.table.rowCount()
@property
def selected_row(self):
selected = self.ui.table.selectionModel().selectedRows()
return selected[0].row() if selected else None
def select_row(self, i):
if i is None or i >= self.num_rows:
# Out of range. Don't do anything.
return
# Select the row
selection_model = self.ui.table.selectionModel()
selection_model.clearSelection()
model_index = selection_model.model().index(i, 0)
command = QItemSelectionModel.Select | QItemSelectionModel.Rows
selection_model.select(model_index, command)
def selection_changed(self):
self.update_enable_states()
def update_enable_states(self):
enable_remove = self.num_rows > 1 and self.selected_row is not None
self.ui.remove_atom_type.setEnabled(enable_remove)
def remove_selected_atom(self):
if self.selected_row is None:
return
atom_types = self.atom_types
del atom_types[self.selected_row]
self.atom_types = atom_types
def create_symbol_label(self, v):
w = QTableWidgetItem(v)
return w
def create_charge_combobox(self, charge, symbol):
cb = QComboBox(self.ui.table)
if charge not in chargestate[symbol]:
raise Exception(f'Invalid charge {charge} for {symbol}')
cb.addItems(chargestate[symbol])
cb.setCurrentText(charge)
cb.currentIndexChanged.connect(self.update_config)
size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
cb.setSizePolicy(size_policy)
self.charge_comboboxes.append(cb)
return cb
def create_occupancy_spinbox(self, v):
sb = ScientificDoubleSpinBox(self.ui.table)
sb.setKeyboardTracking(False)
sb.setMinimum(OCCUPATION_MIN)
sb.setMaximum(OCCUPATION_MAX)
sb.setValue(v)
sb.valueChanged.connect(self.update_config)
size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sb.setSizePolicy(size_policy)
self.occupancy_spinboxes.append(sb)
return sb
def create_thermal_factor_spinbox(self, v):
sb = ThermalFactorSpinBox(self.ui.table)
sb.setKeyboardTracking(False)
sb.setMinimum(THERMAL_FACTOR_MIN)
sb.setMaximum(THERMAL_FACTOR_MAX)
sb.setValue(v)
sb.valueChanged.connect(self.update_config)
sb.setToolTip('Double-click to open tensor editor')
size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sb.setSizePolicy(size_policy)
self.thermal_factor_spinboxes.append(sb)
return sb
def clear_table(self):
self.charge_comboboxes.clear()
self.occupancy_spinboxes.clear()
self.thermal_factor_spinboxes.clear()
self.ui.table.clearContents()
def update_gui(self):
with block_signals(*self.site_settings_widgets):
for i, w in enumerate(self.fractional_coords_widgets):
w.setValue(self.fractional_coords[i])
self.update_total_occupancy()
self.update_table()
self.reset_scalar_tensor_toggle()
def reset_scalar_tensor_toggle(self):
any_scalars = any(not isinstance(w.value(), np.ndarray)
for w in self.thermal_factor_spinboxes)
with block_signals(self.ui.convert_u_to_tensors):
self.ui.convert_u_to_tensors.setChecked(not any_scalars)
def update_table(self):
prev_selected = self.selected_row
block_list = [
self.ui.table,
self.ui.table.selectionModel()
]
with block_signals(*block_list):
atoms = self.site['atoms']
self.clear_table()
self.ui.table.setRowCount(len(atoms))
for i, atom in enumerate(atoms):
w = self.create_symbol_label(atom['symbol'])
self.ui.table.setItem(i, COLUMNS['symbol'], w)
w = self.create_charge_combobox(atom['charge'], atom['symbol'])
self.ui.table.setCellWidget(i, COLUMNS['charge'], w)
w = self.create_occupancy_spinbox(atom['occupancy'])
self.ui.table.setCellWidget(i, COLUMNS['occupancy'], w)
v = self.thermal_factor(atom)
w = self.create_thermal_factor_spinbox(v)
self.ui.table.setCellWidget(i, COLUMNS['thermal_factor'], w)
self.update_occupancy_validity()
if prev_selected is not None:
select_row = (prev_selected if prev_selected < self.num_rows
else self.num_rows - 1)
self.select_row(select_row)
# Just in case the selection actually changed...
self.selection_changed()
def thermal_factor_type_changed(self):
self.update_thermal_factor_header()
self.update_table()
# Update the text for the tensor toggle as well
text = f'Convert {self.thermal_factor_type} to tensors'
self.ui.convert_u_to_tensors.setText(text)
def update_thermal_factor_header(self):
w = self.ui.table.horizontalHeaderItem(COLUMNS['thermal_factor'])
w.setText(self.thermal_factor_type)
def update_config(self):
for i, w in enumerate(self.fractional_coords_widgets):
self.fractional_coords[i] = w.value()
for atom, combobox in zip(self.atoms, self.charge_comboboxes):
atom['charge'] = combobox.currentText()
for atom, spinbox in zip(self.atoms, self.occupancy_spinboxes):
atom['occupancy'] = spinbox.value()
for atom, spinbox in zip(self.atoms, self.thermal_factor_spinboxes):
atom['U'] = self.U(spinbox.value())
self.update_total_occupancy()
self.update_occupancy_validity()
self.emit_site_modified_if_valid()
def update_total_occupancy(self):
self.ui.total_occupancy.setValue(self.total_occupancy)
def reset_occupancies(self):
total = 1.0
atoms = self.atoms
num_atoms = len(atoms)
for atom in atoms:
atom['occupancy'] = total / num_atoms
self.update_total_occupancy()
self.update_occupancy_validity()
@property
def site_valid(self):
return self.occupancies_valid
@property
def occupancies_valid(self):
return self.total_occupancy <= 1.0
def update_occupancy_validity(self):
valid = self.occupancies_valid
color = 'white' if valid else 'red'
msg = '' if valid else 'Sum of occupancies must be <= 1'
self.ui.total_occupancy.setStyleSheet(f'background-color: {color}')
self.ui.total_occupancy.setToolTip(msg)
def emit_site_modified_if_valid(self):
if not self.site_valid:
return
self.site_modified.emit()
@property
def fractional_coords_widgets(self):
return [
self.ui.coords_x,
self.ui.coords_y,
self.ui.coords_z
]
@property
def site_settings_widgets(self):
return self.fractional_coords_widgets
def convert_u_to_tensors(self, b):
def scalar_to_tensor(spinbox):
if isinstance(spinbox.value(), np.ndarray):
# Already a tensor
return
tensor = np.zeros(6, dtype=np.float64)
tensor[:3] = spinbox.value()
spinbox.setValue(tensor)
def tensor_to_scalar(spinbox):
value = spinbox.value()
if not isinstance(value, np.ndarray):
# Already a scalar
return
# Use the previous spinbox value if available
scalar = spinbox.editor.ui.scalar_value.value()
if (np.isclose(scalar, 0) and np.allclose(value[:3], value[0]) and
np.allclose(value[3:], 0)):
# If the previous value is zero, and the tensor is diagonal,
# use the diagonal value
scalar = value[0]
spinbox.setValue(scalar)
f = scalar_to_tensor if b else tensor_to_scalar
for spinbox in self.thermal_factor_spinboxes:
f(spinbox)
class ThermalFactorSpinBox(ScientificDoubleSpinBox):
def __init__(self, parent=None):
super().__init__(parent)
self.editor = ThermalFactorEditor(0, parent)
self.setLineEdit(ThermalFactorLineEdit(self, self))
self.valueChanged.connect(self.update_editor_value)
def value(self):
return self.editor.value
def setValue(self, v):
self.editor.value = v
if self.editor.is_tensor:
# Force an update
super().setValue(super().value())
self.valueChanged.emit(super().value())
self.setReadOnly(True)
else:
super().setValue(v)
self.valueChanged.emit(v)
self.setReadOnly(False)
def update_editor_value(self):
if not self.editor.is_tensor:
self.editor.value = super().value()
def textFromValue(self, value):
if not hasattr(self, 'editor') or not self.editor.is_tensor:
return super().textFromValue(value)
return 'Tensor'
def open_editor(self):
original = copy.deepcopy(self.editor.value)
if not self.editor.exec_():
self.editor.value = original
return
self.setValue(self.editor.value)
class ThermalFactorLineEdit(QLineEdit):
def __init__(self, spinbox, parent=None):
super().__init__(parent)
self.spinbox = spinbox
def mousePressEvent(self, event):
if self.isReadOnly():
self.open_editor()
return
super().mousePressEvent(event)
def mouseDoubleClickEvent(self, event):
self.open_editor()
def open_editor(self):
self.spinbox.open_editor()
|
flexible
|
{
"blob_id": "ec2be72f81d260c491cdc31b68b34401fb49b91e",
"index": 2660,
"step-1": "<mask token>\n\n\nclass MaterialSiteEditor(QObject):\n <mask token>\n\n def __init__(self, site, parent=None):\n super().__init__(parent)\n loader = UiLoader()\n self.ui = loader.load_file('material_site_editor.ui', parent)\n self._site = site\n self.charge_comboboxes = []\n self.occupancy_spinboxes = []\n self.thermal_factor_spinboxes = []\n self.update_gui()\n self.setup_connections()\n\n def setup_connections(self):\n self.ui.select_atom_types.pressed.connect(self.select_atom_types)\n self.ui.thermal_factor_type.currentIndexChanged.connect(self.\n thermal_factor_type_changed)\n for w in self.site_settings_widgets:\n w.valueChanged.connect(self.update_config)\n self.ui.table.selectionModel().selectionChanged.connect(self.\n selection_changed)\n self.ui.remove_atom_type.pressed.connect(self.remove_selected_atom)\n self.ui.convert_u_to_tensors.toggled.connect(self.convert_u_to_tensors)\n\n def select_atom_types(self):\n dialog = PeriodicTableDialog(self.atom_types, self.ui)\n if not dialog.exec_():\n return\n self.atom_types = dialog.selected_atoms\n\n @property\n def site(self):\n return self._site\n\n @site.setter\n def site(self, v):\n self._site = v\n self.update_gui()\n <mask token>\n <mask token>\n\n @property\n def fractional_coords(self):\n return self.site['fractional_coords']\n\n @property\n def thermal_factor_type(self):\n return self.ui.thermal_factor_type.currentText()\n\n def U(self, val):\n type = self.thermal_factor_type\n if type == 'U':\n multiplier = 1\n elif type == 'B':\n multiplier = B_TO_U\n else:\n raise Exception(f'Unknown type: {type}')\n return val * multiplier\n <mask token>\n\n def thermal_factor(self, atom):\n type = self.thermal_factor_type\n if type == 'U':\n multiplier = 1\n elif type == 'B':\n multiplier = U_TO_B\n else:\n raise Exception(f'Unknown type: {type}')\n return atom['U'] * multiplier\n <mask token>\n <mask token>\n\n @property\n def num_rows(self):\n return self.ui.table.rowCount()\n\n @property\n def selected_row(self):\n selected = self.ui.table.selectionModel().selectedRows()\n return selected[0].row() if selected else None\n\n def select_row(self, i):\n if i is None or i >= self.num_rows:\n return\n selection_model = self.ui.table.selectionModel()\n selection_model.clearSelection()\n model_index = selection_model.model().index(i, 0)\n command = QItemSelectionModel.Select | QItemSelectionModel.Rows\n selection_model.select(model_index, command)\n\n def selection_changed(self):\n self.update_enable_states()\n\n def update_enable_states(self):\n enable_remove = self.num_rows > 1 and self.selected_row is not None\n self.ui.remove_atom_type.setEnabled(enable_remove)\n <mask token>\n\n def create_symbol_label(self, v):\n w = QTableWidgetItem(v)\n return w\n\n def create_charge_combobox(self, charge, symbol):\n cb = QComboBox(self.ui.table)\n if charge not in chargestate[symbol]:\n raise Exception(f'Invalid charge {charge} for {symbol}')\n cb.addItems(chargestate[symbol])\n cb.setCurrentText(charge)\n cb.currentIndexChanged.connect(self.update_config)\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n cb.setSizePolicy(size_policy)\n self.charge_comboboxes.append(cb)\n return cb\n\n def create_occupancy_spinbox(self, v):\n sb = ScientificDoubleSpinBox(self.ui.table)\n sb.setKeyboardTracking(False)\n sb.setMinimum(OCCUPATION_MIN)\n sb.setMaximum(OCCUPATION_MAX)\n sb.setValue(v)\n sb.valueChanged.connect(self.update_config)\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n sb.setSizePolicy(size_policy)\n self.occupancy_spinboxes.append(sb)\n return sb\n\n def create_thermal_factor_spinbox(self, v):\n sb = ThermalFactorSpinBox(self.ui.table)\n sb.setKeyboardTracking(False)\n sb.setMinimum(THERMAL_FACTOR_MIN)\n sb.setMaximum(THERMAL_FACTOR_MAX)\n sb.setValue(v)\n sb.valueChanged.connect(self.update_config)\n sb.setToolTip('Double-click to open tensor editor')\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n sb.setSizePolicy(size_policy)\n self.thermal_factor_spinboxes.append(sb)\n return sb\n <mask token>\n\n def update_gui(self):\n with block_signals(*self.site_settings_widgets):\n for i, w in enumerate(self.fractional_coords_widgets):\n w.setValue(self.fractional_coords[i])\n self.update_total_occupancy()\n self.update_table()\n self.reset_scalar_tensor_toggle()\n\n def reset_scalar_tensor_toggle(self):\n any_scalars = any(not isinstance(w.value(), np.ndarray) for w in\n self.thermal_factor_spinboxes)\n with block_signals(self.ui.convert_u_to_tensors):\n self.ui.convert_u_to_tensors.setChecked(not any_scalars)\n <mask token>\n <mask token>\n <mask token>\n\n def update_config(self):\n for i, w in enumerate(self.fractional_coords_widgets):\n self.fractional_coords[i] = w.value()\n for atom, combobox in zip(self.atoms, self.charge_comboboxes):\n atom['charge'] = combobox.currentText()\n for atom, spinbox in zip(self.atoms, self.occupancy_spinboxes):\n atom['occupancy'] = spinbox.value()\n for atom, spinbox in zip(self.atoms, self.thermal_factor_spinboxes):\n atom['U'] = self.U(spinbox.value())\n self.update_total_occupancy()\n self.update_occupancy_validity()\n self.emit_site_modified_if_valid()\n <mask token>\n\n def reset_occupancies(self):\n total = 1.0\n atoms = self.atoms\n num_atoms = len(atoms)\n for atom in atoms:\n atom['occupancy'] = total / num_atoms\n self.update_total_occupancy()\n self.update_occupancy_validity()\n\n @property\n def site_valid(self):\n return self.occupancies_valid\n <mask token>\n\n def update_occupancy_validity(self):\n valid = self.occupancies_valid\n color = 'white' if valid else 'red'\n msg = '' if valid else 'Sum of occupancies must be <= 1'\n self.ui.total_occupancy.setStyleSheet(f'background-color: {color}')\n self.ui.total_occupancy.setToolTip(msg)\n <mask token>\n\n @property\n def fractional_coords_widgets(self):\n return [self.ui.coords_x, self.ui.coords_y, self.ui.coords_z]\n\n @property\n def site_settings_widgets(self):\n return self.fractional_coords_widgets\n <mask token>\n\n\nclass ThermalFactorSpinBox(ScientificDoubleSpinBox):\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.editor = ThermalFactorEditor(0, parent)\n self.setLineEdit(ThermalFactorLineEdit(self, self))\n self.valueChanged.connect(self.update_editor_value)\n\n def value(self):\n return self.editor.value\n\n def setValue(self, v):\n self.editor.value = v\n if self.editor.is_tensor:\n super().setValue(super().value())\n self.valueChanged.emit(super().value())\n self.setReadOnly(True)\n else:\n super().setValue(v)\n self.valueChanged.emit(v)\n self.setReadOnly(False)\n\n def update_editor_value(self):\n if not self.editor.is_tensor:\n self.editor.value = super().value()\n\n def textFromValue(self, value):\n if not hasattr(self, 'editor') or not self.editor.is_tensor:\n return super().textFromValue(value)\n return 'Tensor'\n\n def open_editor(self):\n original = copy.deepcopy(self.editor.value)\n if not self.editor.exec_():\n self.editor.value = original\n return\n self.setValue(self.editor.value)\n\n\nclass ThermalFactorLineEdit(QLineEdit):\n\n def __init__(self, spinbox, parent=None):\n super().__init__(parent)\n self.spinbox = spinbox\n\n def mousePressEvent(self, event):\n if self.isReadOnly():\n self.open_editor()\n return\n super().mousePressEvent(event)\n\n def mouseDoubleClickEvent(self, event):\n self.open_editor()\n\n def open_editor(self):\n self.spinbox.open_editor()\n",
"step-2": "<mask token>\n\n\nclass MaterialSiteEditor(QObject):\n <mask token>\n\n def __init__(self, site, parent=None):\n super().__init__(parent)\n loader = UiLoader()\n self.ui = loader.load_file('material_site_editor.ui', parent)\n self._site = site\n self.charge_comboboxes = []\n self.occupancy_spinboxes = []\n self.thermal_factor_spinboxes = []\n self.update_gui()\n self.setup_connections()\n\n def setup_connections(self):\n self.ui.select_atom_types.pressed.connect(self.select_atom_types)\n self.ui.thermal_factor_type.currentIndexChanged.connect(self.\n thermal_factor_type_changed)\n for w in self.site_settings_widgets:\n w.valueChanged.connect(self.update_config)\n self.ui.table.selectionModel().selectionChanged.connect(self.\n selection_changed)\n self.ui.remove_atom_type.pressed.connect(self.remove_selected_atom)\n self.ui.convert_u_to_tensors.toggled.connect(self.convert_u_to_tensors)\n\n def select_atom_types(self):\n dialog = PeriodicTableDialog(self.atom_types, self.ui)\n if not dialog.exec_():\n return\n self.atom_types = dialog.selected_atoms\n\n @property\n def site(self):\n return self._site\n\n @site.setter\n def site(self, v):\n self._site = v\n self.update_gui()\n <mask token>\n <mask token>\n\n @property\n def fractional_coords(self):\n return self.site['fractional_coords']\n\n @property\n def thermal_factor_type(self):\n return self.ui.thermal_factor_type.currentText()\n\n def U(self, val):\n type = self.thermal_factor_type\n if type == 'U':\n multiplier = 1\n elif type == 'B':\n multiplier = B_TO_U\n else:\n raise Exception(f'Unknown type: {type}')\n return val * multiplier\n <mask token>\n\n def thermal_factor(self, atom):\n type = self.thermal_factor_type\n if type == 'U':\n multiplier = 1\n elif type == 'B':\n multiplier = U_TO_B\n else:\n raise Exception(f'Unknown type: {type}')\n return atom['U'] * multiplier\n <mask token>\n <mask token>\n\n @property\n def num_rows(self):\n return self.ui.table.rowCount()\n\n @property\n def selected_row(self):\n selected = self.ui.table.selectionModel().selectedRows()\n return selected[0].row() if selected else None\n\n def select_row(self, i):\n if i is None or i >= self.num_rows:\n return\n selection_model = self.ui.table.selectionModel()\n selection_model.clearSelection()\n model_index = selection_model.model().index(i, 0)\n command = QItemSelectionModel.Select | QItemSelectionModel.Rows\n selection_model.select(model_index, command)\n\n def selection_changed(self):\n self.update_enable_states()\n\n def update_enable_states(self):\n enable_remove = self.num_rows > 1 and self.selected_row is not None\n self.ui.remove_atom_type.setEnabled(enable_remove)\n <mask token>\n\n def create_symbol_label(self, v):\n w = QTableWidgetItem(v)\n return w\n\n def create_charge_combobox(self, charge, symbol):\n cb = QComboBox(self.ui.table)\n if charge not in chargestate[symbol]:\n raise Exception(f'Invalid charge {charge} for {symbol}')\n cb.addItems(chargestate[symbol])\n cb.setCurrentText(charge)\n cb.currentIndexChanged.connect(self.update_config)\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n cb.setSizePolicy(size_policy)\n self.charge_comboboxes.append(cb)\n return cb\n\n def create_occupancy_spinbox(self, v):\n sb = ScientificDoubleSpinBox(self.ui.table)\n sb.setKeyboardTracking(False)\n sb.setMinimum(OCCUPATION_MIN)\n sb.setMaximum(OCCUPATION_MAX)\n sb.setValue(v)\n sb.valueChanged.connect(self.update_config)\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n sb.setSizePolicy(size_policy)\n self.occupancy_spinboxes.append(sb)\n return sb\n\n def create_thermal_factor_spinbox(self, v):\n sb = ThermalFactorSpinBox(self.ui.table)\n sb.setKeyboardTracking(False)\n sb.setMinimum(THERMAL_FACTOR_MIN)\n sb.setMaximum(THERMAL_FACTOR_MAX)\n sb.setValue(v)\n sb.valueChanged.connect(self.update_config)\n sb.setToolTip('Double-click to open tensor editor')\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n sb.setSizePolicy(size_policy)\n self.thermal_factor_spinboxes.append(sb)\n return sb\n <mask token>\n\n def update_gui(self):\n with block_signals(*self.site_settings_widgets):\n for i, w in enumerate(self.fractional_coords_widgets):\n w.setValue(self.fractional_coords[i])\n self.update_total_occupancy()\n self.update_table()\n self.reset_scalar_tensor_toggle()\n\n def reset_scalar_tensor_toggle(self):\n any_scalars = any(not isinstance(w.value(), np.ndarray) for w in\n self.thermal_factor_spinboxes)\n with block_signals(self.ui.convert_u_to_tensors):\n self.ui.convert_u_to_tensors.setChecked(not any_scalars)\n <mask token>\n <mask token>\n\n def update_thermal_factor_header(self):\n w = self.ui.table.horizontalHeaderItem(COLUMNS['thermal_factor'])\n w.setText(self.thermal_factor_type)\n\n def update_config(self):\n for i, w in enumerate(self.fractional_coords_widgets):\n self.fractional_coords[i] = w.value()\n for atom, combobox in zip(self.atoms, self.charge_comboboxes):\n atom['charge'] = combobox.currentText()\n for atom, spinbox in zip(self.atoms, self.occupancy_spinboxes):\n atom['occupancy'] = spinbox.value()\n for atom, spinbox in zip(self.atoms, self.thermal_factor_spinboxes):\n atom['U'] = self.U(spinbox.value())\n self.update_total_occupancy()\n self.update_occupancy_validity()\n self.emit_site_modified_if_valid()\n <mask token>\n\n def reset_occupancies(self):\n total = 1.0\n atoms = self.atoms\n num_atoms = len(atoms)\n for atom in atoms:\n atom['occupancy'] = total / num_atoms\n self.update_total_occupancy()\n self.update_occupancy_validity()\n\n @property\n def site_valid(self):\n return self.occupancies_valid\n <mask token>\n\n def update_occupancy_validity(self):\n valid = self.occupancies_valid\n color = 'white' if valid else 'red'\n msg = '' if valid else 'Sum of occupancies must be <= 1'\n self.ui.total_occupancy.setStyleSheet(f'background-color: {color}')\n self.ui.total_occupancy.setToolTip(msg)\n <mask token>\n\n @property\n def fractional_coords_widgets(self):\n return [self.ui.coords_x, self.ui.coords_y, self.ui.coords_z]\n\n @property\n def site_settings_widgets(self):\n return self.fractional_coords_widgets\n <mask token>\n\n\nclass ThermalFactorSpinBox(ScientificDoubleSpinBox):\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.editor = ThermalFactorEditor(0, parent)\n self.setLineEdit(ThermalFactorLineEdit(self, self))\n self.valueChanged.connect(self.update_editor_value)\n\n def value(self):\n return self.editor.value\n\n def setValue(self, v):\n self.editor.value = v\n if self.editor.is_tensor:\n super().setValue(super().value())\n self.valueChanged.emit(super().value())\n self.setReadOnly(True)\n else:\n super().setValue(v)\n self.valueChanged.emit(v)\n self.setReadOnly(False)\n\n def update_editor_value(self):\n if not self.editor.is_tensor:\n self.editor.value = super().value()\n\n def textFromValue(self, value):\n if not hasattr(self, 'editor') or not self.editor.is_tensor:\n return super().textFromValue(value)\n return 'Tensor'\n\n def open_editor(self):\n original = copy.deepcopy(self.editor.value)\n if not self.editor.exec_():\n self.editor.value = original\n return\n self.setValue(self.editor.value)\n\n\nclass ThermalFactorLineEdit(QLineEdit):\n\n def __init__(self, spinbox, parent=None):\n super().__init__(parent)\n self.spinbox = spinbox\n\n def mousePressEvent(self, event):\n if self.isReadOnly():\n self.open_editor()\n return\n super().mousePressEvent(event)\n\n def mouseDoubleClickEvent(self, event):\n self.open_editor()\n\n def open_editor(self):\n self.spinbox.open_editor()\n",
"step-3": "<mask token>\n\n\nclass MaterialSiteEditor(QObject):\n <mask token>\n\n def __init__(self, site, parent=None):\n super().__init__(parent)\n loader = UiLoader()\n self.ui = loader.load_file('material_site_editor.ui', parent)\n self._site = site\n self.charge_comboboxes = []\n self.occupancy_spinboxes = []\n self.thermal_factor_spinboxes = []\n self.update_gui()\n self.setup_connections()\n\n def setup_connections(self):\n self.ui.select_atom_types.pressed.connect(self.select_atom_types)\n self.ui.thermal_factor_type.currentIndexChanged.connect(self.\n thermal_factor_type_changed)\n for w in self.site_settings_widgets:\n w.valueChanged.connect(self.update_config)\n self.ui.table.selectionModel().selectionChanged.connect(self.\n selection_changed)\n self.ui.remove_atom_type.pressed.connect(self.remove_selected_atom)\n self.ui.convert_u_to_tensors.toggled.connect(self.convert_u_to_tensors)\n\n def select_atom_types(self):\n dialog = PeriodicTableDialog(self.atom_types, self.ui)\n if not dialog.exec_():\n return\n self.atom_types = dialog.selected_atoms\n\n @property\n def site(self):\n return self._site\n\n @site.setter\n def site(self, v):\n self._site = v\n self.update_gui()\n\n @property\n def atoms(self):\n return self.site['atoms']\n <mask token>\n\n @property\n def fractional_coords(self):\n return self.site['fractional_coords']\n\n @property\n def thermal_factor_type(self):\n return self.ui.thermal_factor_type.currentText()\n\n def U(self, val):\n type = self.thermal_factor_type\n if type == 'U':\n multiplier = 1\n elif type == 'B':\n multiplier = B_TO_U\n else:\n raise Exception(f'Unknown type: {type}')\n return val * multiplier\n\n def B(self, val):\n type = self.thermal_factor_type\n if type == 'U':\n multiplier = U_TO_B\n elif type == 'B':\n multiplier = 1\n else:\n raise Exception(f'Unknown type: {type}')\n return val * multiplier\n\n def thermal_factor(self, atom):\n type = self.thermal_factor_type\n if type == 'U':\n multiplier = 1\n elif type == 'B':\n multiplier = U_TO_B\n else:\n raise Exception(f'Unknown type: {type}')\n return atom['U'] * multiplier\n\n @property\n def atom_types(self):\n return [x['symbol'] for x in self.site['atoms']]\n\n @atom_types.setter\n def atom_types(self, v):\n if v == self.atom_types:\n return\n atoms = self.atoms\n previous_u_values = {x['symbol']: x['U'] for x in atoms}\n previous_charges = {x['symbol']: x['charge'] for x in atoms}\n atoms.clear()\n for symbol in v:\n atom = {'symbol': symbol, 'U': previous_u_values.get(symbol,\n DEFAULT_U), 'charge': previous_charges.get(symbol,\n DEFAULT_CHARGE)}\n atoms.append(atom)\n self.reset_occupancies()\n self.update_table()\n self.emit_site_modified_if_valid()\n\n @property\n def num_rows(self):\n return self.ui.table.rowCount()\n\n @property\n def selected_row(self):\n selected = self.ui.table.selectionModel().selectedRows()\n return selected[0].row() if selected else None\n\n def select_row(self, i):\n if i is None or i >= self.num_rows:\n return\n selection_model = self.ui.table.selectionModel()\n selection_model.clearSelection()\n model_index = selection_model.model().index(i, 0)\n command = QItemSelectionModel.Select | QItemSelectionModel.Rows\n selection_model.select(model_index, command)\n\n def selection_changed(self):\n self.update_enable_states()\n\n def update_enable_states(self):\n enable_remove = self.num_rows > 1 and self.selected_row is not None\n self.ui.remove_atom_type.setEnabled(enable_remove)\n <mask token>\n\n def create_symbol_label(self, v):\n w = QTableWidgetItem(v)\n return w\n\n def create_charge_combobox(self, charge, symbol):\n cb = QComboBox(self.ui.table)\n if charge not in chargestate[symbol]:\n raise Exception(f'Invalid charge {charge} for {symbol}')\n cb.addItems(chargestate[symbol])\n cb.setCurrentText(charge)\n cb.currentIndexChanged.connect(self.update_config)\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n cb.setSizePolicy(size_policy)\n self.charge_comboboxes.append(cb)\n return cb\n\n def create_occupancy_spinbox(self, v):\n sb = ScientificDoubleSpinBox(self.ui.table)\n sb.setKeyboardTracking(False)\n sb.setMinimum(OCCUPATION_MIN)\n sb.setMaximum(OCCUPATION_MAX)\n sb.setValue(v)\n sb.valueChanged.connect(self.update_config)\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n sb.setSizePolicy(size_policy)\n self.occupancy_spinboxes.append(sb)\n return sb\n\n def create_thermal_factor_spinbox(self, v):\n sb = ThermalFactorSpinBox(self.ui.table)\n sb.setKeyboardTracking(False)\n sb.setMinimum(THERMAL_FACTOR_MIN)\n sb.setMaximum(THERMAL_FACTOR_MAX)\n sb.setValue(v)\n sb.valueChanged.connect(self.update_config)\n sb.setToolTip('Double-click to open tensor editor')\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n sb.setSizePolicy(size_policy)\n self.thermal_factor_spinboxes.append(sb)\n return sb\n <mask token>\n\n def update_gui(self):\n with block_signals(*self.site_settings_widgets):\n for i, w in enumerate(self.fractional_coords_widgets):\n w.setValue(self.fractional_coords[i])\n self.update_total_occupancy()\n self.update_table()\n self.reset_scalar_tensor_toggle()\n\n def reset_scalar_tensor_toggle(self):\n any_scalars = any(not isinstance(w.value(), np.ndarray) for w in\n self.thermal_factor_spinboxes)\n with block_signals(self.ui.convert_u_to_tensors):\n self.ui.convert_u_to_tensors.setChecked(not any_scalars)\n <mask token>\n <mask token>\n\n def update_thermal_factor_header(self):\n w = self.ui.table.horizontalHeaderItem(COLUMNS['thermal_factor'])\n w.setText(self.thermal_factor_type)\n\n def update_config(self):\n for i, w in enumerate(self.fractional_coords_widgets):\n self.fractional_coords[i] = w.value()\n for atom, combobox in zip(self.atoms, self.charge_comboboxes):\n atom['charge'] = combobox.currentText()\n for atom, spinbox in zip(self.atoms, self.occupancy_spinboxes):\n atom['occupancy'] = spinbox.value()\n for atom, spinbox in zip(self.atoms, self.thermal_factor_spinboxes):\n atom['U'] = self.U(spinbox.value())\n self.update_total_occupancy()\n self.update_occupancy_validity()\n self.emit_site_modified_if_valid()\n\n def update_total_occupancy(self):\n self.ui.total_occupancy.setValue(self.total_occupancy)\n\n def reset_occupancies(self):\n total = 1.0\n atoms = self.atoms\n num_atoms = len(atoms)\n for atom in atoms:\n atom['occupancy'] = total / num_atoms\n self.update_total_occupancy()\n self.update_occupancy_validity()\n\n @property\n def site_valid(self):\n return self.occupancies_valid\n\n @property\n def occupancies_valid(self):\n return self.total_occupancy <= 1.0\n\n def update_occupancy_validity(self):\n valid = self.occupancies_valid\n color = 'white' if valid else 'red'\n msg = '' if valid else 'Sum of occupancies must be <= 1'\n self.ui.total_occupancy.setStyleSheet(f'background-color: {color}')\n self.ui.total_occupancy.setToolTip(msg)\n <mask token>\n\n @property\n def fractional_coords_widgets(self):\n return [self.ui.coords_x, self.ui.coords_y, self.ui.coords_z]\n\n @property\n def site_settings_widgets(self):\n return self.fractional_coords_widgets\n\n def convert_u_to_tensors(self, b):\n\n def scalar_to_tensor(spinbox):\n if isinstance(spinbox.value(), np.ndarray):\n return\n tensor = np.zeros(6, dtype=np.float64)\n tensor[:3] = spinbox.value()\n spinbox.setValue(tensor)\n\n def tensor_to_scalar(spinbox):\n value = spinbox.value()\n if not isinstance(value, np.ndarray):\n return\n scalar = spinbox.editor.ui.scalar_value.value()\n if np.isclose(scalar, 0) and np.allclose(value[:3], value[0]\n ) and np.allclose(value[3:], 0):\n scalar = value[0]\n spinbox.setValue(scalar)\n f = scalar_to_tensor if b else tensor_to_scalar\n for spinbox in self.thermal_factor_spinboxes:\n f(spinbox)\n\n\nclass ThermalFactorSpinBox(ScientificDoubleSpinBox):\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.editor = ThermalFactorEditor(0, parent)\n self.setLineEdit(ThermalFactorLineEdit(self, self))\n self.valueChanged.connect(self.update_editor_value)\n\n def value(self):\n return self.editor.value\n\n def setValue(self, v):\n self.editor.value = v\n if self.editor.is_tensor:\n super().setValue(super().value())\n self.valueChanged.emit(super().value())\n self.setReadOnly(True)\n else:\n super().setValue(v)\n self.valueChanged.emit(v)\n self.setReadOnly(False)\n\n def update_editor_value(self):\n if not self.editor.is_tensor:\n self.editor.value = super().value()\n\n def textFromValue(self, value):\n if not hasattr(self, 'editor') or not self.editor.is_tensor:\n return super().textFromValue(value)\n return 'Tensor'\n\n def open_editor(self):\n original = copy.deepcopy(self.editor.value)\n if not self.editor.exec_():\n self.editor.value = original\n return\n self.setValue(self.editor.value)\n\n\nclass ThermalFactorLineEdit(QLineEdit):\n\n def __init__(self, spinbox, parent=None):\n super().__init__(parent)\n self.spinbox = spinbox\n\n def mousePressEvent(self, event):\n if self.isReadOnly():\n self.open_editor()\n return\n super().mousePressEvent(event)\n\n def mouseDoubleClickEvent(self, event):\n self.open_editor()\n\n def open_editor(self):\n self.spinbox.open_editor()\n",
"step-4": "<mask token>\n\n\nclass MaterialSiteEditor(QObject):\n site_modified = Signal()\n\n def __init__(self, site, parent=None):\n super().__init__(parent)\n loader = UiLoader()\n self.ui = loader.load_file('material_site_editor.ui', parent)\n self._site = site\n self.charge_comboboxes = []\n self.occupancy_spinboxes = []\n self.thermal_factor_spinboxes = []\n self.update_gui()\n self.setup_connections()\n\n def setup_connections(self):\n self.ui.select_atom_types.pressed.connect(self.select_atom_types)\n self.ui.thermal_factor_type.currentIndexChanged.connect(self.\n thermal_factor_type_changed)\n for w in self.site_settings_widgets:\n w.valueChanged.connect(self.update_config)\n self.ui.table.selectionModel().selectionChanged.connect(self.\n selection_changed)\n self.ui.remove_atom_type.pressed.connect(self.remove_selected_atom)\n self.ui.convert_u_to_tensors.toggled.connect(self.convert_u_to_tensors)\n\n def select_atom_types(self):\n dialog = PeriodicTableDialog(self.atom_types, self.ui)\n if not dialog.exec_():\n return\n self.atom_types = dialog.selected_atoms\n\n @property\n def site(self):\n return self._site\n\n @site.setter\n def site(self, v):\n self._site = v\n self.update_gui()\n\n @property\n def atoms(self):\n return self.site['atoms']\n\n @property\n def total_occupancy(self):\n return sum(x['occupancy'] for x in self.atoms)\n\n @property\n def fractional_coords(self):\n return self.site['fractional_coords']\n\n @property\n def thermal_factor_type(self):\n return self.ui.thermal_factor_type.currentText()\n\n def U(self, val):\n type = self.thermal_factor_type\n if type == 'U':\n multiplier = 1\n elif type == 'B':\n multiplier = B_TO_U\n else:\n raise Exception(f'Unknown type: {type}')\n return val * multiplier\n\n def B(self, val):\n type = self.thermal_factor_type\n if type == 'U':\n multiplier = U_TO_B\n elif type == 'B':\n multiplier = 1\n else:\n raise Exception(f'Unknown type: {type}')\n return val * multiplier\n\n def thermal_factor(self, atom):\n type = self.thermal_factor_type\n if type == 'U':\n multiplier = 1\n elif type == 'B':\n multiplier = U_TO_B\n else:\n raise Exception(f'Unknown type: {type}')\n return atom['U'] * multiplier\n\n @property\n def atom_types(self):\n return [x['symbol'] for x in self.site['atoms']]\n\n @atom_types.setter\n def atom_types(self, v):\n if v == self.atom_types:\n return\n atoms = self.atoms\n previous_u_values = {x['symbol']: x['U'] for x in atoms}\n previous_charges = {x['symbol']: x['charge'] for x in atoms}\n atoms.clear()\n for symbol in v:\n atom = {'symbol': symbol, 'U': previous_u_values.get(symbol,\n DEFAULT_U), 'charge': previous_charges.get(symbol,\n DEFAULT_CHARGE)}\n atoms.append(atom)\n self.reset_occupancies()\n self.update_table()\n self.emit_site_modified_if_valid()\n\n @property\n def num_rows(self):\n return self.ui.table.rowCount()\n\n @property\n def selected_row(self):\n selected = self.ui.table.selectionModel().selectedRows()\n return selected[0].row() if selected else None\n\n def select_row(self, i):\n if i is None or i >= self.num_rows:\n return\n selection_model = self.ui.table.selectionModel()\n selection_model.clearSelection()\n model_index = selection_model.model().index(i, 0)\n command = QItemSelectionModel.Select | QItemSelectionModel.Rows\n selection_model.select(model_index, command)\n\n def selection_changed(self):\n self.update_enable_states()\n\n def update_enable_states(self):\n enable_remove = self.num_rows > 1 and self.selected_row is not None\n self.ui.remove_atom_type.setEnabled(enable_remove)\n\n def remove_selected_atom(self):\n if self.selected_row is None:\n return\n atom_types = self.atom_types\n del atom_types[self.selected_row]\n self.atom_types = atom_types\n\n def create_symbol_label(self, v):\n w = QTableWidgetItem(v)\n return w\n\n def create_charge_combobox(self, charge, symbol):\n cb = QComboBox(self.ui.table)\n if charge not in chargestate[symbol]:\n raise Exception(f'Invalid charge {charge} for {symbol}')\n cb.addItems(chargestate[symbol])\n cb.setCurrentText(charge)\n cb.currentIndexChanged.connect(self.update_config)\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n cb.setSizePolicy(size_policy)\n self.charge_comboboxes.append(cb)\n return cb\n\n def create_occupancy_spinbox(self, v):\n sb = ScientificDoubleSpinBox(self.ui.table)\n sb.setKeyboardTracking(False)\n sb.setMinimum(OCCUPATION_MIN)\n sb.setMaximum(OCCUPATION_MAX)\n sb.setValue(v)\n sb.valueChanged.connect(self.update_config)\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n sb.setSizePolicy(size_policy)\n self.occupancy_spinboxes.append(sb)\n return sb\n\n def create_thermal_factor_spinbox(self, v):\n sb = ThermalFactorSpinBox(self.ui.table)\n sb.setKeyboardTracking(False)\n sb.setMinimum(THERMAL_FACTOR_MIN)\n sb.setMaximum(THERMAL_FACTOR_MAX)\n sb.setValue(v)\n sb.valueChanged.connect(self.update_config)\n sb.setToolTip('Double-click to open tensor editor')\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n sb.setSizePolicy(size_policy)\n self.thermal_factor_spinboxes.append(sb)\n return sb\n\n def clear_table(self):\n self.charge_comboboxes.clear()\n self.occupancy_spinboxes.clear()\n self.thermal_factor_spinboxes.clear()\n self.ui.table.clearContents()\n\n def update_gui(self):\n with block_signals(*self.site_settings_widgets):\n for i, w in enumerate(self.fractional_coords_widgets):\n w.setValue(self.fractional_coords[i])\n self.update_total_occupancy()\n self.update_table()\n self.reset_scalar_tensor_toggle()\n\n def reset_scalar_tensor_toggle(self):\n any_scalars = any(not isinstance(w.value(), np.ndarray) for w in\n self.thermal_factor_spinboxes)\n with block_signals(self.ui.convert_u_to_tensors):\n self.ui.convert_u_to_tensors.setChecked(not any_scalars)\n\n def update_table(self):\n prev_selected = self.selected_row\n block_list = [self.ui.table, self.ui.table.selectionModel()]\n with block_signals(*block_list):\n atoms = self.site['atoms']\n self.clear_table()\n self.ui.table.setRowCount(len(atoms))\n for i, atom in enumerate(atoms):\n w = self.create_symbol_label(atom['symbol'])\n self.ui.table.setItem(i, COLUMNS['symbol'], w)\n w = self.create_charge_combobox(atom['charge'], atom['symbol'])\n self.ui.table.setCellWidget(i, COLUMNS['charge'], w)\n w = self.create_occupancy_spinbox(atom['occupancy'])\n self.ui.table.setCellWidget(i, COLUMNS['occupancy'], w)\n v = self.thermal_factor(atom)\n w = self.create_thermal_factor_spinbox(v)\n self.ui.table.setCellWidget(i, COLUMNS['thermal_factor'], w)\n self.update_occupancy_validity()\n if prev_selected is not None:\n select_row = (prev_selected if prev_selected < self.\n num_rows else self.num_rows - 1)\n self.select_row(select_row)\n self.selection_changed()\n\n def thermal_factor_type_changed(self):\n self.update_thermal_factor_header()\n self.update_table()\n text = f'Convert {self.thermal_factor_type} to tensors'\n self.ui.convert_u_to_tensors.setText(text)\n\n def update_thermal_factor_header(self):\n w = self.ui.table.horizontalHeaderItem(COLUMNS['thermal_factor'])\n w.setText(self.thermal_factor_type)\n\n def update_config(self):\n for i, w in enumerate(self.fractional_coords_widgets):\n self.fractional_coords[i] = w.value()\n for atom, combobox in zip(self.atoms, self.charge_comboboxes):\n atom['charge'] = combobox.currentText()\n for atom, spinbox in zip(self.atoms, self.occupancy_spinboxes):\n atom['occupancy'] = spinbox.value()\n for atom, spinbox in zip(self.atoms, self.thermal_factor_spinboxes):\n atom['U'] = self.U(spinbox.value())\n self.update_total_occupancy()\n self.update_occupancy_validity()\n self.emit_site_modified_if_valid()\n\n def update_total_occupancy(self):\n self.ui.total_occupancy.setValue(self.total_occupancy)\n\n def reset_occupancies(self):\n total = 1.0\n atoms = self.atoms\n num_atoms = len(atoms)\n for atom in atoms:\n atom['occupancy'] = total / num_atoms\n self.update_total_occupancy()\n self.update_occupancy_validity()\n\n @property\n def site_valid(self):\n return self.occupancies_valid\n\n @property\n def occupancies_valid(self):\n return self.total_occupancy <= 1.0\n\n def update_occupancy_validity(self):\n valid = self.occupancies_valid\n color = 'white' if valid else 'red'\n msg = '' if valid else 'Sum of occupancies must be <= 1'\n self.ui.total_occupancy.setStyleSheet(f'background-color: {color}')\n self.ui.total_occupancy.setToolTip(msg)\n\n def emit_site_modified_if_valid(self):\n if not self.site_valid:\n return\n self.site_modified.emit()\n\n @property\n def fractional_coords_widgets(self):\n return [self.ui.coords_x, self.ui.coords_y, self.ui.coords_z]\n\n @property\n def site_settings_widgets(self):\n return self.fractional_coords_widgets\n\n def convert_u_to_tensors(self, b):\n\n def scalar_to_tensor(spinbox):\n if isinstance(spinbox.value(), np.ndarray):\n return\n tensor = np.zeros(6, dtype=np.float64)\n tensor[:3] = spinbox.value()\n spinbox.setValue(tensor)\n\n def tensor_to_scalar(spinbox):\n value = spinbox.value()\n if not isinstance(value, np.ndarray):\n return\n scalar = spinbox.editor.ui.scalar_value.value()\n if np.isclose(scalar, 0) and np.allclose(value[:3], value[0]\n ) and np.allclose(value[3:], 0):\n scalar = value[0]\n spinbox.setValue(scalar)\n f = scalar_to_tensor if b else tensor_to_scalar\n for spinbox in self.thermal_factor_spinboxes:\n f(spinbox)\n\n\nclass ThermalFactorSpinBox(ScientificDoubleSpinBox):\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.editor = ThermalFactorEditor(0, parent)\n self.setLineEdit(ThermalFactorLineEdit(self, self))\n self.valueChanged.connect(self.update_editor_value)\n\n def value(self):\n return self.editor.value\n\n def setValue(self, v):\n self.editor.value = v\n if self.editor.is_tensor:\n super().setValue(super().value())\n self.valueChanged.emit(super().value())\n self.setReadOnly(True)\n else:\n super().setValue(v)\n self.valueChanged.emit(v)\n self.setReadOnly(False)\n\n def update_editor_value(self):\n if not self.editor.is_tensor:\n self.editor.value = super().value()\n\n def textFromValue(self, value):\n if not hasattr(self, 'editor') or not self.editor.is_tensor:\n return super().textFromValue(value)\n return 'Tensor'\n\n def open_editor(self):\n original = copy.deepcopy(self.editor.value)\n if not self.editor.exec_():\n self.editor.value = original\n return\n self.setValue(self.editor.value)\n\n\nclass ThermalFactorLineEdit(QLineEdit):\n\n def __init__(self, spinbox, parent=None):\n super().__init__(parent)\n self.spinbox = spinbox\n\n def mousePressEvent(self, event):\n if self.isReadOnly():\n self.open_editor()\n return\n super().mousePressEvent(event)\n\n def mouseDoubleClickEvent(self, event):\n self.open_editor()\n\n def open_editor(self):\n self.spinbox.open_editor()\n",
"step-5": "import copy\n\nimport numpy as np\n\nfrom PySide2.QtCore import QItemSelectionModel, QObject, Signal\nfrom PySide2.QtWidgets import (\n QComboBox, QLineEdit, QSizePolicy, QTableWidgetItem\n)\n\nfrom hexrd.constants import chargestate\nfrom hexrd.material import Material\n\nfrom hexrd.ui.periodic_table_dialog import PeriodicTableDialog\nfrom hexrd.ui.scientificspinbox import ScientificDoubleSpinBox\nfrom hexrd.ui.thermal_factor_editor import ThermalFactorEditor\nfrom hexrd.ui.ui_loader import UiLoader\nfrom hexrd.ui.utils import block_signals\n\n\nCOLUMNS = {\n 'symbol': 0,\n 'charge': 1,\n 'occupancy': 2,\n 'thermal_factor': 3\n}\n\nDEFAULT_CHARGE = '0'\nDEFAULT_U = Material.DFLT_U[0]\n\nOCCUPATION_MIN = 0\nOCCUPATION_MAX = 1\n\nTHERMAL_FACTOR_MIN = -1.e7\nTHERMAL_FACTOR_MAX = 1.e7\n\nU_TO_B = 8 * np.pi ** 2\nB_TO_U = 1 / U_TO_B\n\n\nclass MaterialSiteEditor(QObject):\n\n site_modified = Signal()\n\n def __init__(self, site, parent=None):\n super().__init__(parent)\n\n loader = UiLoader()\n self.ui = loader.load_file('material_site_editor.ui', parent)\n\n self._site = site\n\n self.charge_comboboxes = []\n self.occupancy_spinboxes = []\n self.thermal_factor_spinboxes = []\n\n self.update_gui()\n\n self.setup_connections()\n\n def setup_connections(self):\n self.ui.select_atom_types.pressed.connect(self.select_atom_types)\n self.ui.thermal_factor_type.currentIndexChanged.connect(\n self.thermal_factor_type_changed)\n for w in self.site_settings_widgets:\n w.valueChanged.connect(self.update_config)\n self.ui.table.selectionModel().selectionChanged.connect(\n self.selection_changed)\n self.ui.remove_atom_type.pressed.connect(self.remove_selected_atom)\n self.ui.convert_u_to_tensors.toggled.connect(self.convert_u_to_tensors)\n\n def select_atom_types(self):\n dialog = PeriodicTableDialog(self.atom_types, self.ui)\n if not dialog.exec_():\n return\n\n self.atom_types = dialog.selected_atoms\n\n @property\n def site(self):\n return self._site\n\n @site.setter\n def site(self, v):\n self._site = v\n self.update_gui()\n\n @property\n def atoms(self):\n return self.site['atoms']\n\n @property\n def total_occupancy(self):\n return sum(x['occupancy'] for x in self.atoms)\n\n @property\n def fractional_coords(self):\n return self.site['fractional_coords']\n\n @property\n def thermal_factor_type(self):\n return self.ui.thermal_factor_type.currentText()\n\n def U(self, val):\n # Take a thermal factor from a spin box and convert it to U\n type = self.thermal_factor_type\n if type == 'U':\n multiplier = 1\n elif type == 'B':\n multiplier = B_TO_U\n else:\n raise Exception(f'Unknown type: {type}')\n\n return val * multiplier\n\n def B(self, val):\n # Take a thermal factor from a spin box and convert it to B\n type = self.thermal_factor_type\n if type == 'U':\n multiplier = U_TO_B\n elif type == 'B':\n multiplier = 1\n else:\n raise Exception(f'Unknown type: {type}')\n\n return val * multiplier\n\n def thermal_factor(self, atom):\n # Given an atom, return the thermal factor in either B or U\n type = self.thermal_factor_type\n if type == 'U':\n multiplier = 1\n elif type == 'B':\n multiplier = U_TO_B\n else:\n raise Exception(f'Unknown type: {type}')\n\n return atom['U'] * multiplier\n\n @property\n def atom_types(self):\n return [x['symbol'] for x in self.site['atoms']]\n\n @atom_types.setter\n def atom_types(self, v):\n if v == self.atom_types:\n # No changes needed...\n return\n\n # Reset all the occupancies\n atoms = self.atoms\n previous_u_values = {x['symbol']: x['U'] for x in atoms}\n previous_charges = {x['symbol']: x['charge'] for x in atoms}\n atoms.clear()\n\n for symbol in v:\n # Use previous values if available. Otherwise, use the defaults.\n atom = {\n 'symbol': symbol,\n 'U': previous_u_values.get(symbol, DEFAULT_U),\n 'charge': previous_charges.get(symbol, DEFAULT_CHARGE),\n }\n atoms.append(atom)\n\n self.reset_occupancies()\n self.update_table()\n self.emit_site_modified_if_valid()\n\n @property\n def num_rows(self):\n return self.ui.table.rowCount()\n\n @property\n def selected_row(self):\n selected = self.ui.table.selectionModel().selectedRows()\n return selected[0].row() if selected else None\n\n def select_row(self, i):\n if i is None or i >= self.num_rows:\n # Out of range. Don't do anything.\n return\n\n # Select the row\n selection_model = self.ui.table.selectionModel()\n selection_model.clearSelection()\n\n model_index = selection_model.model().index(i, 0)\n command = QItemSelectionModel.Select | QItemSelectionModel.Rows\n selection_model.select(model_index, command)\n\n def selection_changed(self):\n self.update_enable_states()\n\n def update_enable_states(self):\n enable_remove = self.num_rows > 1 and self.selected_row is not None\n self.ui.remove_atom_type.setEnabled(enable_remove)\n\n def remove_selected_atom(self):\n if self.selected_row is None:\n return\n\n atom_types = self.atom_types\n del atom_types[self.selected_row]\n self.atom_types = atom_types\n\n def create_symbol_label(self, v):\n w = QTableWidgetItem(v)\n return w\n\n def create_charge_combobox(self, charge, symbol):\n cb = QComboBox(self.ui.table)\n\n if charge not in chargestate[symbol]:\n raise Exception(f'Invalid charge {charge} for {symbol}')\n\n cb.addItems(chargestate[symbol])\n cb.setCurrentText(charge)\n cb.currentIndexChanged.connect(self.update_config)\n\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n cb.setSizePolicy(size_policy)\n\n self.charge_comboboxes.append(cb)\n return cb\n\n def create_occupancy_spinbox(self, v):\n sb = ScientificDoubleSpinBox(self.ui.table)\n sb.setKeyboardTracking(False)\n sb.setMinimum(OCCUPATION_MIN)\n sb.setMaximum(OCCUPATION_MAX)\n sb.setValue(v)\n sb.valueChanged.connect(self.update_config)\n\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n sb.setSizePolicy(size_policy)\n\n self.occupancy_spinboxes.append(sb)\n return sb\n\n def create_thermal_factor_spinbox(self, v):\n sb = ThermalFactorSpinBox(self.ui.table)\n sb.setKeyboardTracking(False)\n sb.setMinimum(THERMAL_FACTOR_MIN)\n sb.setMaximum(THERMAL_FACTOR_MAX)\n sb.setValue(v)\n sb.valueChanged.connect(self.update_config)\n sb.setToolTip('Double-click to open tensor editor')\n\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n sb.setSizePolicy(size_policy)\n\n self.thermal_factor_spinboxes.append(sb)\n return sb\n\n def clear_table(self):\n self.charge_comboboxes.clear()\n self.occupancy_spinboxes.clear()\n self.thermal_factor_spinboxes.clear()\n self.ui.table.clearContents()\n\n def update_gui(self):\n with block_signals(*self.site_settings_widgets):\n for i, w in enumerate(self.fractional_coords_widgets):\n w.setValue(self.fractional_coords[i])\n\n self.update_total_occupancy()\n self.update_table()\n self.reset_scalar_tensor_toggle()\n\n def reset_scalar_tensor_toggle(self):\n any_scalars = any(not isinstance(w.value(), np.ndarray)\n for w in self.thermal_factor_spinboxes)\n\n with block_signals(self.ui.convert_u_to_tensors):\n self.ui.convert_u_to_tensors.setChecked(not any_scalars)\n\n def update_table(self):\n prev_selected = self.selected_row\n\n block_list = [\n self.ui.table,\n self.ui.table.selectionModel()\n ]\n with block_signals(*block_list):\n atoms = self.site['atoms']\n self.clear_table()\n self.ui.table.setRowCount(len(atoms))\n for i, atom in enumerate(atoms):\n w = self.create_symbol_label(atom['symbol'])\n self.ui.table.setItem(i, COLUMNS['symbol'], w)\n\n w = self.create_charge_combobox(atom['charge'], atom['symbol'])\n self.ui.table.setCellWidget(i, COLUMNS['charge'], w)\n\n w = self.create_occupancy_spinbox(atom['occupancy'])\n self.ui.table.setCellWidget(i, COLUMNS['occupancy'], w)\n\n v = self.thermal_factor(atom)\n w = self.create_thermal_factor_spinbox(v)\n self.ui.table.setCellWidget(i, COLUMNS['thermal_factor'], w)\n\n self.update_occupancy_validity()\n\n if prev_selected is not None:\n select_row = (prev_selected if prev_selected < self.num_rows\n else self.num_rows - 1)\n self.select_row(select_row)\n\n # Just in case the selection actually changed...\n self.selection_changed()\n\n def thermal_factor_type_changed(self):\n self.update_thermal_factor_header()\n self.update_table()\n\n # Update the text for the tensor toggle as well\n text = f'Convert {self.thermal_factor_type} to tensors'\n self.ui.convert_u_to_tensors.setText(text)\n\n def update_thermal_factor_header(self):\n w = self.ui.table.horizontalHeaderItem(COLUMNS['thermal_factor'])\n w.setText(self.thermal_factor_type)\n\n def update_config(self):\n for i, w in enumerate(self.fractional_coords_widgets):\n self.fractional_coords[i] = w.value()\n\n for atom, combobox in zip(self.atoms, self.charge_comboboxes):\n atom['charge'] = combobox.currentText()\n\n for atom, spinbox in zip(self.atoms, self.occupancy_spinboxes):\n atom['occupancy'] = spinbox.value()\n\n for atom, spinbox in zip(self.atoms, self.thermal_factor_spinboxes):\n atom['U'] = self.U(spinbox.value())\n\n self.update_total_occupancy()\n self.update_occupancy_validity()\n\n self.emit_site_modified_if_valid()\n\n def update_total_occupancy(self):\n self.ui.total_occupancy.setValue(self.total_occupancy)\n\n def reset_occupancies(self):\n total = 1.0\n atoms = self.atoms\n num_atoms = len(atoms)\n for atom in atoms:\n atom['occupancy'] = total / num_atoms\n\n self.update_total_occupancy()\n self.update_occupancy_validity()\n\n @property\n def site_valid(self):\n return self.occupancies_valid\n\n @property\n def occupancies_valid(self):\n return self.total_occupancy <= 1.0\n\n def update_occupancy_validity(self):\n valid = self.occupancies_valid\n color = 'white' if valid else 'red'\n msg = '' if valid else 'Sum of occupancies must be <= 1'\n\n self.ui.total_occupancy.setStyleSheet(f'background-color: {color}')\n self.ui.total_occupancy.setToolTip(msg)\n\n def emit_site_modified_if_valid(self):\n if not self.site_valid:\n return\n\n self.site_modified.emit()\n\n @property\n def fractional_coords_widgets(self):\n return [\n self.ui.coords_x,\n self.ui.coords_y,\n self.ui.coords_z\n ]\n\n @property\n def site_settings_widgets(self):\n return self.fractional_coords_widgets\n\n def convert_u_to_tensors(self, b):\n\n def scalar_to_tensor(spinbox):\n if isinstance(spinbox.value(), np.ndarray):\n # Already a tensor\n return\n\n tensor = np.zeros(6, dtype=np.float64)\n tensor[:3] = spinbox.value()\n spinbox.setValue(tensor)\n\n def tensor_to_scalar(spinbox):\n value = spinbox.value()\n if not isinstance(value, np.ndarray):\n # Already a scalar\n return\n\n # Use the previous spinbox value if available\n scalar = spinbox.editor.ui.scalar_value.value()\n if (np.isclose(scalar, 0) and np.allclose(value[:3], value[0]) and\n np.allclose(value[3:], 0)):\n # If the previous value is zero, and the tensor is diagonal,\n # use the diagonal value\n scalar = value[0]\n\n spinbox.setValue(scalar)\n\n f = scalar_to_tensor if b else tensor_to_scalar\n\n for spinbox in self.thermal_factor_spinboxes:\n f(spinbox)\n\n\nclass ThermalFactorSpinBox(ScientificDoubleSpinBox):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.editor = ThermalFactorEditor(0, parent)\n self.setLineEdit(ThermalFactorLineEdit(self, self))\n self.valueChanged.connect(self.update_editor_value)\n\n def value(self):\n return self.editor.value\n\n def setValue(self, v):\n self.editor.value = v\n if self.editor.is_tensor:\n # Force an update\n super().setValue(super().value())\n self.valueChanged.emit(super().value())\n self.setReadOnly(True)\n else:\n super().setValue(v)\n self.valueChanged.emit(v)\n self.setReadOnly(False)\n\n def update_editor_value(self):\n if not self.editor.is_tensor:\n self.editor.value = super().value()\n\n def textFromValue(self, value):\n if not hasattr(self, 'editor') or not self.editor.is_tensor:\n return super().textFromValue(value)\n\n return 'Tensor'\n\n def open_editor(self):\n original = copy.deepcopy(self.editor.value)\n if not self.editor.exec_():\n self.editor.value = original\n return\n\n self.setValue(self.editor.value)\n\n\nclass ThermalFactorLineEdit(QLineEdit):\n def __init__(self, spinbox, parent=None):\n super().__init__(parent)\n\n self.spinbox = spinbox\n\n def mousePressEvent(self, event):\n if self.isReadOnly():\n self.open_editor()\n return\n\n super().mousePressEvent(event)\n\n def mouseDoubleClickEvent(self, event):\n self.open_editor()\n\n def open_editor(self):\n self.spinbox.open_editor()\n",
"step-ids": [
39,
40,
47,
54,
57
]
}
|
[
39,
40,
47,
54,
57
] |
import pandas as pd
import numpy as np
import difflib as dl
import sys
def get_close(x):
if len(x) == 0:
return ""
return x[0]
list_file = sys.argv[1]
rating_file = sys.argv[2]
output_file = sys.argv[3]
movie_list = open(list_file).read().splitlines()
movie_data = pd.DataFrame({'movie': movie_list})
rating_data = pd.read_csv(rating_file)
rating_data['rating'] = rating_data['rating'].astype(str).astype(float)
rating_data['counts'] = pd.Series(1, index=rating_data.index)
rating_data = rating_data.groupby(['title'])['counts', 'rating'].sum().reset_index()
rating_data['average_rating'] = pd.Series(rating_data['rating']/rating_data['counts'], index=rating_data.index)
movie_data['closed'] = pd.Series(movie_data['movie'], index=movie_data.index)
movie_data['closed'] = movie_data['closed'].apply(lambda x: dl.get_close_matches(x, rating_data['title'], n=1))
movie_data['closed'] = movie_data['closed'].apply(get_close)
result = movie_data.set_index('closed').join(rating_data.set_index('title')).reset_index()
result['average_rating'] = result['average_rating'].apply(lambda x: round(x, 2))
result = result.drop(['closed', 'rating', 'counts'], axis=1)
result = result.set_index('movie')
result.to_csv(output_file, sep=',', encoding='utf-8')
|
normal
|
{
"blob_id": "7a9515b1f8cc196eb7551137a1418d5a387e7fd3",
"index": 959,
"step-1": "<mask token>\n\n\ndef get_close(x):\n if len(x) == 0:\n return ''\n return x[0]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_close(x):\n if len(x) == 0:\n return ''\n return x[0]\n\n\n<mask token>\nresult.to_csv(output_file, sep=',', encoding='utf-8')\n",
"step-3": "<mask token>\n\n\ndef get_close(x):\n if len(x) == 0:\n return ''\n return x[0]\n\n\nlist_file = sys.argv[1]\nrating_file = sys.argv[2]\noutput_file = sys.argv[3]\nmovie_list = open(list_file).read().splitlines()\nmovie_data = pd.DataFrame({'movie': movie_list})\nrating_data = pd.read_csv(rating_file)\nrating_data['rating'] = rating_data['rating'].astype(str).astype(float)\nrating_data['counts'] = pd.Series(1, index=rating_data.index)\nrating_data = rating_data.groupby(['title'])['counts', 'rating'].sum(\n ).reset_index()\nrating_data['average_rating'] = pd.Series(rating_data['rating'] /\n rating_data['counts'], index=rating_data.index)\nmovie_data['closed'] = pd.Series(movie_data['movie'], index=movie_data.index)\nmovie_data['closed'] = movie_data['closed'].apply(lambda x: dl.\n get_close_matches(x, rating_data['title'], n=1))\nmovie_data['closed'] = movie_data['closed'].apply(get_close)\nresult = movie_data.set_index('closed').join(rating_data.set_index('title')\n ).reset_index()\nresult['average_rating'] = result['average_rating'].apply(lambda x: round(x, 2)\n )\nresult = result.drop(['closed', 'rating', 'counts'], axis=1)\nresult = result.set_index('movie')\nresult.to_csv(output_file, sep=',', encoding='utf-8')\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport difflib as dl\nimport sys\n\n\ndef get_close(x):\n if len(x) == 0:\n return ''\n return x[0]\n\n\nlist_file = sys.argv[1]\nrating_file = sys.argv[2]\noutput_file = sys.argv[3]\nmovie_list = open(list_file).read().splitlines()\nmovie_data = pd.DataFrame({'movie': movie_list})\nrating_data = pd.read_csv(rating_file)\nrating_data['rating'] = rating_data['rating'].astype(str).astype(float)\nrating_data['counts'] = pd.Series(1, index=rating_data.index)\nrating_data = rating_data.groupby(['title'])['counts', 'rating'].sum(\n ).reset_index()\nrating_data['average_rating'] = pd.Series(rating_data['rating'] /\n rating_data['counts'], index=rating_data.index)\nmovie_data['closed'] = pd.Series(movie_data['movie'], index=movie_data.index)\nmovie_data['closed'] = movie_data['closed'].apply(lambda x: dl.\n get_close_matches(x, rating_data['title'], n=1))\nmovie_data['closed'] = movie_data['closed'].apply(get_close)\nresult = movie_data.set_index('closed').join(rating_data.set_index('title')\n ).reset_index()\nresult['average_rating'] = result['average_rating'].apply(lambda x: round(x, 2)\n )\nresult = result.drop(['closed', 'rating', 'counts'], axis=1)\nresult = result.set_index('movie')\nresult.to_csv(output_file, sep=',', encoding='utf-8')\n",
"step-5": "import pandas as pd\nimport numpy as np\nimport difflib as dl\nimport sys\n\ndef get_close(x):\n\tif len(x) == 0:\n\t\treturn \"\"\n\treturn x[0]\n\nlist_file = sys.argv[1]\nrating_file = sys.argv[2]\noutput_file = sys.argv[3]\n\nmovie_list = open(list_file).read().splitlines()\nmovie_data = pd.DataFrame({'movie': movie_list})\nrating_data = pd.read_csv(rating_file)\nrating_data['rating'] = rating_data['rating'].astype(str).astype(float)\nrating_data['counts'] = pd.Series(1, index=rating_data.index)\nrating_data = rating_data.groupby(['title'])['counts', 'rating'].sum().reset_index()\nrating_data['average_rating'] = pd.Series(rating_data['rating']/rating_data['counts'], index=rating_data.index)\n\nmovie_data['closed'] = pd.Series(movie_data['movie'], index=movie_data.index)\nmovie_data['closed'] = movie_data['closed'].apply(lambda x: dl.get_close_matches(x, rating_data['title'], n=1))\nmovie_data['closed'] = movie_data['closed'].apply(get_close)\n\nresult = movie_data.set_index('closed').join(rating_data.set_index('title')).reset_index()\n\nresult['average_rating'] = result['average_rating'].apply(lambda x: round(x, 2))\nresult = result.drop(['closed', 'rating', 'counts'], axis=1)\nresult = result.set_index('movie')\n\nresult.to_csv(output_file, sep=',', encoding='utf-8')\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import json
from aioredis import Redis
from aiologger.loggers.json import ExtendedLogRecord
from aiologger.handlers.base import Handler
from app.core import config
class RedisHandler(Handler):
def __init__(
self,
redis_client,
key=f"{config.APP_NAME}-log",
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.key = key
self.redis_client: Redis = redis_client
@property
def initialized(self):
return not self.redis_client.closed
async def emit(self, record: ExtendedLogRecord) -> None:
await self.redis_client.rpush(self.key, self.format(record))
async def close(self) -> None:
self.redis_client.close()
await self.redis_client.wait_closed()
@staticmethod
def format(record: ExtendedLogRecord):
o = {
"msg": record.get_message(),
"logged_at": record.created,
"line_number": record.lineno,
"file": record.pathname,
"function": record.funcName,
"level": record.levelname,
"module": record.module,
"kwargs": record.args,
**record.extra,
}
return json.dumps(o, ensure_ascii=False)
|
normal
|
{
"blob_id": "fe581ca8176fed01309f0d852f72564863aa0895",
"index": 8413,
"step-1": "<mask token>\n\n\nclass RedisHandler(Handler):\n <mask token>\n <mask token>\n\n async def emit(self, record: ExtendedLogRecord) ->None:\n await self.redis_client.rpush(self.key, self.format(record))\n\n async def close(self) ->None:\n self.redis_client.close()\n await self.redis_client.wait_closed()\n\n @staticmethod\n def format(record: ExtendedLogRecord):\n o = {'msg': record.get_message(), 'logged_at': record.created,\n 'line_number': record.lineno, 'file': record.pathname,\n 'function': record.funcName, 'level': record.levelname,\n 'module': record.module, 'kwargs': record.args, **record.extra}\n return json.dumps(o, ensure_ascii=False)\n",
"step-2": "<mask token>\n\n\nclass RedisHandler(Handler):\n\n def __init__(self, redis_client, key=f'{config.APP_NAME}-log', *args,\n **kwargs):\n super().__init__(*args, **kwargs)\n self.key = key\n self.redis_client: Redis = redis_client\n <mask token>\n\n async def emit(self, record: ExtendedLogRecord) ->None:\n await self.redis_client.rpush(self.key, self.format(record))\n\n async def close(self) ->None:\n self.redis_client.close()\n await self.redis_client.wait_closed()\n\n @staticmethod\n def format(record: ExtendedLogRecord):\n o = {'msg': record.get_message(), 'logged_at': record.created,\n 'line_number': record.lineno, 'file': record.pathname,\n 'function': record.funcName, 'level': record.levelname,\n 'module': record.module, 'kwargs': record.args, **record.extra}\n return json.dumps(o, ensure_ascii=False)\n",
"step-3": "<mask token>\n\n\nclass RedisHandler(Handler):\n\n def __init__(self, redis_client, key=f'{config.APP_NAME}-log', *args,\n **kwargs):\n super().__init__(*args, **kwargs)\n self.key = key\n self.redis_client: Redis = redis_client\n\n @property\n def initialized(self):\n return not self.redis_client.closed\n\n async def emit(self, record: ExtendedLogRecord) ->None:\n await self.redis_client.rpush(self.key, self.format(record))\n\n async def close(self) ->None:\n self.redis_client.close()\n await self.redis_client.wait_closed()\n\n @staticmethod\n def format(record: ExtendedLogRecord):\n o = {'msg': record.get_message(), 'logged_at': record.created,\n 'line_number': record.lineno, 'file': record.pathname,\n 'function': record.funcName, 'level': record.levelname,\n 'module': record.module, 'kwargs': record.args, **record.extra}\n return json.dumps(o, ensure_ascii=False)\n",
"step-4": "import json\nfrom aioredis import Redis\nfrom aiologger.loggers.json import ExtendedLogRecord\nfrom aiologger.handlers.base import Handler\nfrom app.core import config\n\n\nclass RedisHandler(Handler):\n\n def __init__(self, redis_client, key=f'{config.APP_NAME}-log', *args,\n **kwargs):\n super().__init__(*args, **kwargs)\n self.key = key\n self.redis_client: Redis = redis_client\n\n @property\n def initialized(self):\n return not self.redis_client.closed\n\n async def emit(self, record: ExtendedLogRecord) ->None:\n await self.redis_client.rpush(self.key, self.format(record))\n\n async def close(self) ->None:\n self.redis_client.close()\n await self.redis_client.wait_closed()\n\n @staticmethod\n def format(record: ExtendedLogRecord):\n o = {'msg': record.get_message(), 'logged_at': record.created,\n 'line_number': record.lineno, 'file': record.pathname,\n 'function': record.funcName, 'level': record.levelname,\n 'module': record.module, 'kwargs': record.args, **record.extra}\n return json.dumps(o, ensure_ascii=False)\n",
"step-5": "import json\n\nfrom aioredis import Redis\nfrom aiologger.loggers.json import ExtendedLogRecord\nfrom aiologger.handlers.base import Handler\n\nfrom app.core import config\n\n\nclass RedisHandler(Handler):\n def __init__(\n self,\n redis_client,\n key=f\"{config.APP_NAME}-log\",\n *args,\n **kwargs,\n ):\n super().__init__(*args, **kwargs)\n self.key = key\n self.redis_client: Redis = redis_client\n\n @property\n def initialized(self):\n return not self.redis_client.closed\n\n async def emit(self, record: ExtendedLogRecord) -> None:\n await self.redis_client.rpush(self.key, self.format(record))\n\n async def close(self) -> None:\n self.redis_client.close()\n await self.redis_client.wait_closed()\n\n @staticmethod\n def format(record: ExtendedLogRecord):\n o = {\n \"msg\": record.get_message(),\n \"logged_at\": record.created,\n \"line_number\": record.lineno,\n \"file\": record.pathname,\n \"function\": record.funcName,\n \"level\": record.levelname,\n \"module\": record.module,\n \"kwargs\": record.args,\n **record.extra,\n }\n return json.dumps(o, ensure_ascii=False)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#
#River Sheppard
#
#
from PIL import Image
if __name__ == "__main__":
scale = 768
# creating the new image in RGB mode
bitmap = Image.new("RGB", (scale, scale), "white")
# Allocating the storage for the image and
# loading the pixel data.
pix = bitmap.load()
# setting up the variables according to
# the equation to create the fractal
c = complex(-0.585, 0.85)
move = 0.0
maxIter = 255
for x in range(scale):
for y in range(scale):
zx = 1.5*(x - scale/2)/(0.5*scale) + move
zy = 1.0*(y - scale/2)/(0.5*scale) + move
z = complex(zx,zy)
i = maxIter
while abs(z*z) < 4 and i > 1:
z = z**2 + c
i -= 1
# convert byte to RGB (3 bytes), kinda
# magic to get nice colors
pix[x,y] = (i << 21) + (i << 10) + i*8
# to display the created fractal
bitmap.show()
|
normal
|
{
"blob_id": "507251113d80eaa3684081f7814470053b04dda9",
"index": 1436,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n scale = 768\n bitmap = Image.new('RGB', (scale, scale), 'white')\n pix = bitmap.load()\n c = complex(-0.585, 0.85)\n move = 0.0\n maxIter = 255\n for x in range(scale):\n for y in range(scale):\n zx = 1.5 * (x - scale / 2) / (0.5 * scale) + move\n zy = 1.0 * (y - scale / 2) / (0.5 * scale) + move\n z = complex(zx, zy)\n i = maxIter\n while abs(z * z) < 4 and i > 1:\n z = z ** 2 + c\n i -= 1\n pix[x, y] = (i << 21) + (i << 10) + i * 8\n bitmap.show()\n",
"step-3": "from PIL import Image\nif __name__ == '__main__':\n scale = 768\n bitmap = Image.new('RGB', (scale, scale), 'white')\n pix = bitmap.load()\n c = complex(-0.585, 0.85)\n move = 0.0\n maxIter = 255\n for x in range(scale):\n for y in range(scale):\n zx = 1.5 * (x - scale / 2) / (0.5 * scale) + move\n zy = 1.0 * (y - scale / 2) / (0.5 * scale) + move\n z = complex(zx, zy)\n i = maxIter\n while abs(z * z) < 4 and i > 1:\n z = z ** 2 + c\n i -= 1\n pix[x, y] = (i << 21) + (i << 10) + i * 8\n bitmap.show()\n",
"step-4": "#\r\n#River Sheppard\r\n#\r\n#\r\n\r\nfrom PIL import Image\r\n\r\nif __name__ == \"__main__\":\r\n scale = 768\r\n \r\n # creating the new image in RGB mode\r\n bitmap = Image.new(\"RGB\", (scale, scale), \"white\")\r\n \r\n # Allocating the storage for the image and\r\n # loading the pixel data.\r\n pix = bitmap.load()\r\n \r\n # setting up the variables according to \r\n # the equation to create the fractal\r\n c = complex(-0.585, 0.85)\r\n move = 0.0\r\n maxIter = 255\r\n \r\n for x in range(scale):\r\n for y in range(scale):\r\n zx = 1.5*(x - scale/2)/(0.5*scale) + move\r\n zy = 1.0*(y - scale/2)/(0.5*scale) + move\r\n z = complex(zx,zy)\r\n i = maxIter\r\n while abs(z*z) < 4 and i > 1:\r\n z = z**2 + c\r\n i -= 1\r\n \r\n # convert byte to RGB (3 bytes), kinda \r\n # magic to get nice colors\r\n pix[x,y] = (i << 21) + (i << 10) + i*8\r\n \r\n # to display the created fractal\r\n bitmap.show()\r\n \r\n \r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
"""
Created on 11/03/2020
@author: [email protected]
"""
import sys
from PyQt5.QtWidgets import (QApplication, QWidget, QLabel, QRadioButton, QVBoxLayout, QCheckBox, QProgressBar,
QGroupBox, QComboBox, QLineEdit, QPushButton, QMessageBox, QInputDialog, QDialog, QDialogButtonBox, QSlider, QGridLayout, QHBoxLayout)
from PyQt5.QtGui import QIcon, QPainter, QPen, QFont, QPixmap
from PyQt5.QtCore import Qt
from PyQt5.QtCore import QCoreApplication, QObject, QRunnable, QThread, QThreadPool, pyqtSignal, pyqtSlot
#append the relative location you want to import from
sys.path.append("../Instrument_Libraries")
from instrumentConfig import Instrument
#For some reason the following code needs to be here for the Steam icon to show on the taskbar.
#Google code, don't know why.
import ctypes
myappid = u'mycompany.myproduct.subproduct.version' # arbitrary string
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
class MainWindow(QWidget):
instrumentName = "Unitialized Instrument"
instrumentList = []
#Instrument Types is a dictionary
instrumentTypes = {}
instrumentKey = "Uninitialized Key"
def __init__(self):
super(MainWindow, self).__init__()
self.configInstrument = Instrument()
self.instrumentList = self.configInstrument.listInstruments()
self.instrumentTypes = self.configInstrument.listInstrumentTypes()
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 500, 600)
self.setWindowTitle('Tektronix Channel Label Widget')
self.setWindowIcon(QIcon('Steam_icon_logo.gif'))
instrumentGroupBox = QGroupBox()
instrumentGrid = QGridLayout()
self.scopeComboBox = QComboBox()
for index in range (0, len(self.instrumentList)):
self.scopeComboBox.addItem(self.instrumentList[index].rstrip())
instrumentGrid.addWidget(self.scopeComboBox, 0, 0)
self.initScopeButton = QPushButton('Initialize Scope', self)
self.initScopeButton.clicked[bool].connect(self.initScope)
instrumentGrid.addWidget(self.initScopeButton, 1, 0)
scopeLabel = QLabel(self)
scopeLabel.setText("Scope Type")
instrumentGrid.addWidget(scopeLabel, 2, 0)
self.scopeIDN = QLabel(self)
self.scopeIDN.setText(self.instrumentName)
instrumentGrid.addWidget(self.scopeIDN, 3, 0)
instrumentGroupBox.setLayout(instrumentGrid)
instrumentGroupBox.setLayout(instrumentGrid)
startButtonGroupBox = QGroupBox()
startButtonLayout = QHBoxLayout()
self.startStopButton = QPushButton('Test Scope Connection', self)
self.startStopButton.clicked[bool].connect(self.startStopTest)
self.startStopButton.setEnabled(False)
startButtonLayout.addWidget(self.startStopButton)
self.getScopeShot = QPushButton('Get Scope Shot', self)
pictureGroupBox = QGroupBox()
pictureLayout = QHBoxLayout()
self.pictLabel = QLabel(self)
pictureLayout.addWidget(self.pictLabel)
pictureGroupBox.setLayout(pictureLayout)
self.getScopeShot.clicked[bool].connect(self.scopeShot)
self.getScopeShot.setEnabled(False)
startButtonLayout.addWidget(self.getScopeShot)
startButtonGroupBox.setLayout(startButtonLayout)
grid = QGridLayout()
grid.addWidget(instrumentGroupBox, 0, 0)
grid.addWidget(startButtonGroupBox, 1, 0)
grid.addWidget(pictureGroupBox, 2, 0)
self.setLayout(grid)
self.show()
def initScope(self):
self.instrumentName = self.scopeComboBox.currentText()
# self.scope, self.scopeName = self.configInstrument.initInstrument(self.instrumentName)
self.scope, self.scopeName = self.configInstrument.initInstrument("172.18.18.24")
print ("Configured Scope: " + self.scopeName)
self.scopeIDN.setText(self.scopeName)
self.startStopButton.setEnabled(True)
self.getScopeShot.setEnabled(True)
def startStopTest(self):
self.scope.setState(1, "ON")
self.scope.setState(2, "ON")
self.scope.setState(3, "ON")
self.scope.setState(4, "ON")
self.scope.setBandwidth(1, "ON")
self.scope.setBandwidth(2, "ON")
self.scope.setBandwidth(3, "ON")
self.scope.setBandwidth(4, "ON")
#Siglent library hard codes trigger level to mV
self.scope.setEdgeTrigger(3, 50, "FALL")
def scopeShot(self):
print ("Get Scope Shot")
self.scope.clear()
print ("ReadIDN Returns: " + str(self.scope.readIDN()))
print ("next line")
self.scope.clear()
self.scope.scopeScreenCaptureCopyToPC("siglentImage.png")
# loading image
self.pixmap = QPixmap("siglentImage.png")
# adding image to label
self.pictLabel.setText("Image Here")
self.pictLabel.setPixmap(self.pixmap)
# Optional, resize label to image size
self.pictLabel.resize(self.pixmap.width(),
self.pixmap.height())
if __name__ == '__main__':
app = QCoreApplication.instance()
if app is None:
app = QApplication(sys.argv)
ex = MainWindow()
app.exec_()
|
normal
|
{
"blob_id": "33464f19c42d1a192792a73297f4d926df78ab71",
"index": 2906,
"step-1": "<mask token>\n\n\nclass MainWindow(QWidget):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def initUI(self):\n self.setGeometry(300, 300, 500, 600)\n self.setWindowTitle('Tektronix Channel Label Widget')\n self.setWindowIcon(QIcon('Steam_icon_logo.gif'))\n instrumentGroupBox = QGroupBox()\n instrumentGrid = QGridLayout()\n self.scopeComboBox = QComboBox()\n for index in range(0, len(self.instrumentList)):\n self.scopeComboBox.addItem(self.instrumentList[index].rstrip())\n instrumentGrid.addWidget(self.scopeComboBox, 0, 0)\n self.initScopeButton = QPushButton('Initialize Scope', self)\n self.initScopeButton.clicked[bool].connect(self.initScope)\n instrumentGrid.addWidget(self.initScopeButton, 1, 0)\n scopeLabel = QLabel(self)\n scopeLabel.setText('Scope Type')\n instrumentGrid.addWidget(scopeLabel, 2, 0)\n self.scopeIDN = QLabel(self)\n self.scopeIDN.setText(self.instrumentName)\n instrumentGrid.addWidget(self.scopeIDN, 3, 0)\n instrumentGroupBox.setLayout(instrumentGrid)\n instrumentGroupBox.setLayout(instrumentGrid)\n startButtonGroupBox = QGroupBox()\n startButtonLayout = QHBoxLayout()\n self.startStopButton = QPushButton('Test Scope Connection', self)\n self.startStopButton.clicked[bool].connect(self.startStopTest)\n self.startStopButton.setEnabled(False)\n startButtonLayout.addWidget(self.startStopButton)\n self.getScopeShot = QPushButton('Get Scope Shot', self)\n pictureGroupBox = QGroupBox()\n pictureLayout = QHBoxLayout()\n self.pictLabel = QLabel(self)\n pictureLayout.addWidget(self.pictLabel)\n pictureGroupBox.setLayout(pictureLayout)\n self.getScopeShot.clicked[bool].connect(self.scopeShot)\n self.getScopeShot.setEnabled(False)\n startButtonLayout.addWidget(self.getScopeShot)\n startButtonGroupBox.setLayout(startButtonLayout)\n grid = QGridLayout()\n grid.addWidget(instrumentGroupBox, 0, 0)\n grid.addWidget(startButtonGroupBox, 1, 0)\n grid.addWidget(pictureGroupBox, 2, 0)\n self.setLayout(grid)\n self.show()\n <mask token>\n\n def startStopTest(self):\n self.scope.setState(1, 'ON')\n self.scope.setState(2, 'ON')\n self.scope.setState(3, 'ON')\n self.scope.setState(4, 'ON')\n self.scope.setBandwidth(1, 'ON')\n self.scope.setBandwidth(2, 'ON')\n self.scope.setBandwidth(3, 'ON')\n self.scope.setBandwidth(4, 'ON')\n self.scope.setEdgeTrigger(3, 50, 'FALL')\n\n def scopeShot(self):\n print('Get Scope Shot')\n self.scope.clear()\n print('ReadIDN Returns: ' + str(self.scope.readIDN()))\n print('next line')\n self.scope.clear()\n self.scope.scopeScreenCaptureCopyToPC('siglentImage.png')\n self.pixmap = QPixmap('siglentImage.png')\n self.pictLabel.setText('Image Here')\n self.pictLabel.setPixmap(self.pixmap)\n self.pictLabel.resize(self.pixmap.width(), self.pixmap.height())\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MainWindow(QWidget):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self):\n super(MainWindow, self).__init__()\n self.configInstrument = Instrument()\n self.instrumentList = self.configInstrument.listInstruments()\n self.instrumentTypes = self.configInstrument.listInstrumentTypes()\n self.initUI()\n\n def initUI(self):\n self.setGeometry(300, 300, 500, 600)\n self.setWindowTitle('Tektronix Channel Label Widget')\n self.setWindowIcon(QIcon('Steam_icon_logo.gif'))\n instrumentGroupBox = QGroupBox()\n instrumentGrid = QGridLayout()\n self.scopeComboBox = QComboBox()\n for index in range(0, len(self.instrumentList)):\n self.scopeComboBox.addItem(self.instrumentList[index].rstrip())\n instrumentGrid.addWidget(self.scopeComboBox, 0, 0)\n self.initScopeButton = QPushButton('Initialize Scope', self)\n self.initScopeButton.clicked[bool].connect(self.initScope)\n instrumentGrid.addWidget(self.initScopeButton, 1, 0)\n scopeLabel = QLabel(self)\n scopeLabel.setText('Scope Type')\n instrumentGrid.addWidget(scopeLabel, 2, 0)\n self.scopeIDN = QLabel(self)\n self.scopeIDN.setText(self.instrumentName)\n instrumentGrid.addWidget(self.scopeIDN, 3, 0)\n instrumentGroupBox.setLayout(instrumentGrid)\n instrumentGroupBox.setLayout(instrumentGrid)\n startButtonGroupBox = QGroupBox()\n startButtonLayout = QHBoxLayout()\n self.startStopButton = QPushButton('Test Scope Connection', self)\n self.startStopButton.clicked[bool].connect(self.startStopTest)\n self.startStopButton.setEnabled(False)\n startButtonLayout.addWidget(self.startStopButton)\n self.getScopeShot = QPushButton('Get Scope Shot', self)\n pictureGroupBox = QGroupBox()\n pictureLayout = QHBoxLayout()\n self.pictLabel = QLabel(self)\n pictureLayout.addWidget(self.pictLabel)\n pictureGroupBox.setLayout(pictureLayout)\n self.getScopeShot.clicked[bool].connect(self.scopeShot)\n self.getScopeShot.setEnabled(False)\n startButtonLayout.addWidget(self.getScopeShot)\n startButtonGroupBox.setLayout(startButtonLayout)\n grid = QGridLayout()\n grid.addWidget(instrumentGroupBox, 0, 0)\n grid.addWidget(startButtonGroupBox, 1, 0)\n grid.addWidget(pictureGroupBox, 2, 0)\n self.setLayout(grid)\n self.show()\n\n def initScope(self):\n self.instrumentName = self.scopeComboBox.currentText()\n self.scope, self.scopeName = self.configInstrument.initInstrument(\n '172.18.18.24')\n print('Configured Scope: ' + self.scopeName)\n self.scopeIDN.setText(self.scopeName)\n self.startStopButton.setEnabled(True)\n self.getScopeShot.setEnabled(True)\n\n def startStopTest(self):\n self.scope.setState(1, 'ON')\n self.scope.setState(2, 'ON')\n self.scope.setState(3, 'ON')\n self.scope.setState(4, 'ON')\n self.scope.setBandwidth(1, 'ON')\n self.scope.setBandwidth(2, 'ON')\n self.scope.setBandwidth(3, 'ON')\n self.scope.setBandwidth(4, 'ON')\n self.scope.setEdgeTrigger(3, 50, 'FALL')\n\n def scopeShot(self):\n print('Get Scope Shot')\n self.scope.clear()\n print('ReadIDN Returns: ' + str(self.scope.readIDN()))\n print('next line')\n self.scope.clear()\n self.scope.scopeScreenCaptureCopyToPC('siglentImage.png')\n self.pixmap = QPixmap('siglentImage.png')\n self.pictLabel.setText('Image Here')\n self.pictLabel.setPixmap(self.pixmap)\n self.pictLabel.resize(self.pixmap.width(), self.pixmap.height())\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.append('../Instrument_Libraries')\n<mask token>\nmyappid = u'mycompany.myproduct.subproduct.version'\nctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)\n\n\nclass MainWindow(QWidget):\n instrumentName = 'Unitialized Instrument'\n instrumentList = []\n instrumentTypes = {}\n instrumentKey = 'Uninitialized Key'\n\n def __init__(self):\n super(MainWindow, self).__init__()\n self.configInstrument = Instrument()\n self.instrumentList = self.configInstrument.listInstruments()\n self.instrumentTypes = self.configInstrument.listInstrumentTypes()\n self.initUI()\n\n def initUI(self):\n self.setGeometry(300, 300, 500, 600)\n self.setWindowTitle('Tektronix Channel Label Widget')\n self.setWindowIcon(QIcon('Steam_icon_logo.gif'))\n instrumentGroupBox = QGroupBox()\n instrumentGrid = QGridLayout()\n self.scopeComboBox = QComboBox()\n for index in range(0, len(self.instrumentList)):\n self.scopeComboBox.addItem(self.instrumentList[index].rstrip())\n instrumentGrid.addWidget(self.scopeComboBox, 0, 0)\n self.initScopeButton = QPushButton('Initialize Scope', self)\n self.initScopeButton.clicked[bool].connect(self.initScope)\n instrumentGrid.addWidget(self.initScopeButton, 1, 0)\n scopeLabel = QLabel(self)\n scopeLabel.setText('Scope Type')\n instrumentGrid.addWidget(scopeLabel, 2, 0)\n self.scopeIDN = QLabel(self)\n self.scopeIDN.setText(self.instrumentName)\n instrumentGrid.addWidget(self.scopeIDN, 3, 0)\n instrumentGroupBox.setLayout(instrumentGrid)\n instrumentGroupBox.setLayout(instrumentGrid)\n startButtonGroupBox = QGroupBox()\n startButtonLayout = QHBoxLayout()\n self.startStopButton = QPushButton('Test Scope Connection', self)\n self.startStopButton.clicked[bool].connect(self.startStopTest)\n self.startStopButton.setEnabled(False)\n startButtonLayout.addWidget(self.startStopButton)\n self.getScopeShot = QPushButton('Get Scope Shot', self)\n pictureGroupBox = QGroupBox()\n pictureLayout = QHBoxLayout()\n self.pictLabel = QLabel(self)\n pictureLayout.addWidget(self.pictLabel)\n pictureGroupBox.setLayout(pictureLayout)\n self.getScopeShot.clicked[bool].connect(self.scopeShot)\n self.getScopeShot.setEnabled(False)\n startButtonLayout.addWidget(self.getScopeShot)\n startButtonGroupBox.setLayout(startButtonLayout)\n grid = QGridLayout()\n grid.addWidget(instrumentGroupBox, 0, 0)\n grid.addWidget(startButtonGroupBox, 1, 0)\n grid.addWidget(pictureGroupBox, 2, 0)\n self.setLayout(grid)\n self.show()\n\n def initScope(self):\n self.instrumentName = self.scopeComboBox.currentText()\n self.scope, self.scopeName = self.configInstrument.initInstrument(\n '172.18.18.24')\n print('Configured Scope: ' + self.scopeName)\n self.scopeIDN.setText(self.scopeName)\n self.startStopButton.setEnabled(True)\n self.getScopeShot.setEnabled(True)\n\n def startStopTest(self):\n self.scope.setState(1, 'ON')\n self.scope.setState(2, 'ON')\n self.scope.setState(3, 'ON')\n self.scope.setState(4, 'ON')\n self.scope.setBandwidth(1, 'ON')\n self.scope.setBandwidth(2, 'ON')\n self.scope.setBandwidth(3, 'ON')\n self.scope.setBandwidth(4, 'ON')\n self.scope.setEdgeTrigger(3, 50, 'FALL')\n\n def scopeShot(self):\n print('Get Scope Shot')\n self.scope.clear()\n print('ReadIDN Returns: ' + str(self.scope.readIDN()))\n print('next line')\n self.scope.clear()\n self.scope.scopeScreenCaptureCopyToPC('siglentImage.png')\n self.pixmap = QPixmap('siglentImage.png')\n self.pictLabel.setText('Image Here')\n self.pictLabel.setPixmap(self.pixmap)\n self.pictLabel.resize(self.pixmap.width(), self.pixmap.height())\n\n\nif __name__ == '__main__':\n app = QCoreApplication.instance()\n if app is None:\n app = QApplication(sys.argv)\n ex = MainWindow()\n app.exec_()\n",
"step-4": "<mask token>\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QWidget, QLabel, QRadioButton, QVBoxLayout, QCheckBox, QProgressBar, QGroupBox, QComboBox, QLineEdit, QPushButton, QMessageBox, QInputDialog, QDialog, QDialogButtonBox, QSlider, QGridLayout, QHBoxLayout\nfrom PyQt5.QtGui import QIcon, QPainter, QPen, QFont, QPixmap\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtCore import QCoreApplication, QObject, QRunnable, QThread, QThreadPool, pyqtSignal, pyqtSlot\nsys.path.append('../Instrument_Libraries')\nfrom instrumentConfig import Instrument\nimport ctypes\nmyappid = u'mycompany.myproduct.subproduct.version'\nctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)\n\n\nclass MainWindow(QWidget):\n instrumentName = 'Unitialized Instrument'\n instrumentList = []\n instrumentTypes = {}\n instrumentKey = 'Uninitialized Key'\n\n def __init__(self):\n super(MainWindow, self).__init__()\n self.configInstrument = Instrument()\n self.instrumentList = self.configInstrument.listInstruments()\n self.instrumentTypes = self.configInstrument.listInstrumentTypes()\n self.initUI()\n\n def initUI(self):\n self.setGeometry(300, 300, 500, 600)\n self.setWindowTitle('Tektronix Channel Label Widget')\n self.setWindowIcon(QIcon('Steam_icon_logo.gif'))\n instrumentGroupBox = QGroupBox()\n instrumentGrid = QGridLayout()\n self.scopeComboBox = QComboBox()\n for index in range(0, len(self.instrumentList)):\n self.scopeComboBox.addItem(self.instrumentList[index].rstrip())\n instrumentGrid.addWidget(self.scopeComboBox, 0, 0)\n self.initScopeButton = QPushButton('Initialize Scope', self)\n self.initScopeButton.clicked[bool].connect(self.initScope)\n instrumentGrid.addWidget(self.initScopeButton, 1, 0)\n scopeLabel = QLabel(self)\n scopeLabel.setText('Scope Type')\n instrumentGrid.addWidget(scopeLabel, 2, 0)\n self.scopeIDN = QLabel(self)\n self.scopeIDN.setText(self.instrumentName)\n instrumentGrid.addWidget(self.scopeIDN, 3, 0)\n instrumentGroupBox.setLayout(instrumentGrid)\n instrumentGroupBox.setLayout(instrumentGrid)\n startButtonGroupBox = QGroupBox()\n startButtonLayout = QHBoxLayout()\n self.startStopButton = QPushButton('Test Scope Connection', self)\n self.startStopButton.clicked[bool].connect(self.startStopTest)\n self.startStopButton.setEnabled(False)\n startButtonLayout.addWidget(self.startStopButton)\n self.getScopeShot = QPushButton('Get Scope Shot', self)\n pictureGroupBox = QGroupBox()\n pictureLayout = QHBoxLayout()\n self.pictLabel = QLabel(self)\n pictureLayout.addWidget(self.pictLabel)\n pictureGroupBox.setLayout(pictureLayout)\n self.getScopeShot.clicked[bool].connect(self.scopeShot)\n self.getScopeShot.setEnabled(False)\n startButtonLayout.addWidget(self.getScopeShot)\n startButtonGroupBox.setLayout(startButtonLayout)\n grid = QGridLayout()\n grid.addWidget(instrumentGroupBox, 0, 0)\n grid.addWidget(startButtonGroupBox, 1, 0)\n grid.addWidget(pictureGroupBox, 2, 0)\n self.setLayout(grid)\n self.show()\n\n def initScope(self):\n self.instrumentName = self.scopeComboBox.currentText()\n self.scope, self.scopeName = self.configInstrument.initInstrument(\n '172.18.18.24')\n print('Configured Scope: ' + self.scopeName)\n self.scopeIDN.setText(self.scopeName)\n self.startStopButton.setEnabled(True)\n self.getScopeShot.setEnabled(True)\n\n def startStopTest(self):\n self.scope.setState(1, 'ON')\n self.scope.setState(2, 'ON')\n self.scope.setState(3, 'ON')\n self.scope.setState(4, 'ON')\n self.scope.setBandwidth(1, 'ON')\n self.scope.setBandwidth(2, 'ON')\n self.scope.setBandwidth(3, 'ON')\n self.scope.setBandwidth(4, 'ON')\n self.scope.setEdgeTrigger(3, 50, 'FALL')\n\n def scopeShot(self):\n print('Get Scope Shot')\n self.scope.clear()\n print('ReadIDN Returns: ' + str(self.scope.readIDN()))\n print('next line')\n self.scope.clear()\n self.scope.scopeScreenCaptureCopyToPC('siglentImage.png')\n self.pixmap = QPixmap('siglentImage.png')\n self.pictLabel.setText('Image Here')\n self.pictLabel.setPixmap(self.pixmap)\n self.pictLabel.resize(self.pixmap.width(), self.pixmap.height())\n\n\nif __name__ == '__main__':\n app = QCoreApplication.instance()\n if app is None:\n app = QApplication(sys.argv)\n ex = MainWindow()\n app.exec_()\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on 11/03/2020\r\n\r\n@author: [email protected]\r\n\"\"\"\r\nimport sys\r\nfrom PyQt5.QtWidgets import (QApplication, QWidget, QLabel, QRadioButton, QVBoxLayout, QCheckBox, QProgressBar,\r\n QGroupBox, QComboBox, QLineEdit, QPushButton, QMessageBox, QInputDialog, QDialog, QDialogButtonBox, QSlider, QGridLayout, QHBoxLayout)\r\nfrom PyQt5.QtGui import QIcon, QPainter, QPen, QFont, QPixmap\r\nfrom PyQt5.QtCore import Qt\r\nfrom PyQt5.QtCore import QCoreApplication, QObject, QRunnable, QThread, QThreadPool, pyqtSignal, pyqtSlot\r\n\r\n#append the relative location you want to import from\r\nsys.path.append(\"../Instrument_Libraries\")\r\nfrom instrumentConfig import Instrument\r\n \r\n#For some reason the following code needs to be here for the Steam icon to show on the taskbar.\r\n#Google code, don't know why.\r\nimport ctypes\r\nmyappid = u'mycompany.myproduct.subproduct.version' # arbitrary string\r\nctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid) \r\n\r\nclass MainWindow(QWidget):\r\n\r\n instrumentName = \"Unitialized Instrument\"\r\n \r\n \r\n instrumentList = []\r\n #Instrument Types is a dictionary\r\n instrumentTypes = {}\r\n instrumentKey = \"Uninitialized Key\"\r\n \r\n def __init__(self):\r\n super(MainWindow, self).__init__()\r\n \r\n self.configInstrument = Instrument()\r\n self.instrumentList = self.configInstrument.listInstruments()\r\n self.instrumentTypes = self.configInstrument.listInstrumentTypes()\r\n\r\n self.initUI()\r\n\r\n\r\n def initUI(self): \r\n \r\n self.setGeometry(300, 300, 500, 600)\r\n self.setWindowTitle('Tektronix Channel Label Widget')\r\n self.setWindowIcon(QIcon('Steam_icon_logo.gif')) \r\n \r\n instrumentGroupBox = QGroupBox()\r\n instrumentGrid = QGridLayout()\r\n \r\n self.scopeComboBox = QComboBox()\r\n for index in range (0, len(self.instrumentList)):\r\n self.scopeComboBox.addItem(self.instrumentList[index].rstrip()) \r\n instrumentGrid.addWidget(self.scopeComboBox, 0, 0)\r\n \r\n self.initScopeButton = QPushButton('Initialize Scope', self)\r\n self.initScopeButton.clicked[bool].connect(self.initScope)\r\n \r\n instrumentGrid.addWidget(self.initScopeButton, 1, 0)\r\n\r\n scopeLabel = QLabel(self)\r\n scopeLabel.setText(\"Scope Type\")\r\n instrumentGrid.addWidget(scopeLabel, 2, 0)\r\n\r\n self.scopeIDN = QLabel(self)\r\n self.scopeIDN.setText(self.instrumentName)\r\n instrumentGrid.addWidget(self.scopeIDN, 3, 0)\r\n \r\n instrumentGroupBox.setLayout(instrumentGrid)\r\n \r\n instrumentGroupBox.setLayout(instrumentGrid)\r\n\r\n startButtonGroupBox = QGroupBox()\r\n startButtonLayout = QHBoxLayout()\r\n self.startStopButton = QPushButton('Test Scope Connection', self)\r\n \r\n self.startStopButton.clicked[bool].connect(self.startStopTest)\r\n self.startStopButton.setEnabled(False)\r\n startButtonLayout.addWidget(self.startStopButton)\r\n\r\n\r\n self.getScopeShot = QPushButton('Get Scope Shot', self)\r\n \r\n\r\n pictureGroupBox = QGroupBox()\r\n pictureLayout = QHBoxLayout()\r\n self.pictLabel = QLabel(self)\r\n pictureLayout.addWidget(self.pictLabel)\r\n pictureGroupBox.setLayout(pictureLayout)\r\n\r\n self.getScopeShot.clicked[bool].connect(self.scopeShot)\r\n self.getScopeShot.setEnabled(False)\r\n startButtonLayout.addWidget(self.getScopeShot)\r\n\r\n startButtonGroupBox.setLayout(startButtonLayout)\r\n\r\n grid = QGridLayout()\r\n grid.addWidget(instrumentGroupBox, 0, 0)\r\n grid.addWidget(startButtonGroupBox, 1, 0)\r\n grid.addWidget(pictureGroupBox, 2, 0)\r\n\r\n self.setLayout(grid)\r\n\r\n self.show()\r\n\r\n def initScope(self):\r\n \r\n self.instrumentName = self.scopeComboBox.currentText()\r\n \r\n # self.scope, self.scopeName = self.configInstrument.initInstrument(self.instrumentName)\r\n self.scope, self.scopeName = self.configInstrument.initInstrument(\"172.18.18.24\")\r\n \r\n print (\"Configured Scope: \" + self.scopeName)\r\n \r\n self.scopeIDN.setText(self.scopeName)\r\n\r\n self.startStopButton.setEnabled(True)\r\n self.getScopeShot.setEnabled(True)\r\n\r\n def startStopTest(self):\r\n \r\n self.scope.setState(1, \"ON\")\r\n self.scope.setState(2, \"ON\")\r\n self.scope.setState(3, \"ON\")\r\n self.scope.setState(4, \"ON\")\r\n \r\n self.scope.setBandwidth(1, \"ON\")\r\n self.scope.setBandwidth(2, \"ON\")\r\n self.scope.setBandwidth(3, \"ON\")\r\n self.scope.setBandwidth(4, \"ON\")\r\n \r\n #Siglent library hard codes trigger level to mV\r\n self.scope.setEdgeTrigger(3, 50, \"FALL\")\r\n \r\n def scopeShot(self):\r\n print (\"Get Scope Shot\")\r\n self.scope.clear()\r\n print (\"ReadIDN Returns: \" + str(self.scope.readIDN()))\r\n print (\"next line\")\r\n self.scope.clear()\r\n \r\n self.scope.scopeScreenCaptureCopyToPC(\"siglentImage.png\")\r\n \r\n # loading image \r\n self.pixmap = QPixmap(\"siglentImage.png\") \r\n \r\n # adding image to label \r\n self.pictLabel.setText(\"Image Here\") \r\n self.pictLabel.setPixmap(self.pixmap) \r\n \r\n # Optional, resize label to image size \r\n self.pictLabel.resize(self.pixmap.width(), \r\n self.pixmap.height()) \r\n \r\n \r\nif __name__ == '__main__':\r\n \r\n app = QCoreApplication.instance()\r\n if app is None:\r\n app = QApplication(sys.argv)\r\n ex = MainWindow()\r\n app.exec_() \r\n",
"step-ids": [
4,
6,
9,
10,
11
]
}
|
[
4,
6,
9,
10,
11
] |
from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.terraform.checks.resource.base_resource_negative_value_check import BaseResourceNegativeValueCheck
class FunctionAppDisallowCORS(BaseResourceNegativeValueCheck):
def __init__(self):
name = "Ensure function apps are not accessible from all regions"
id = "CKV_AZURE_62"
supported_resources = ['azurerm_function_app']
categories = [CheckCategories.GENERAL_SECURITY]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources, missing_attribute_result=CheckResult.PASSED)
def get_inspected_key(self):
return 'site_config/[0]/cors/[0]/allowed_origins'
def get_forbidden_values(self):
return [['*']]
check = FunctionAppDisallowCORS()
|
normal
|
{
"blob_id": "30c2d46d6587df3cbc3e83ecb7af787fcd86eb1f",
"index": 7067,
"step-1": "<mask token>\n\n\nclass FunctionAppDisallowCORS(BaseResourceNegativeValueCheck):\n\n def __init__(self):\n name = 'Ensure function apps are not accessible from all regions'\n id = 'CKV_AZURE_62'\n supported_resources = ['azurerm_function_app']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories,\n supported_resources=supported_resources,\n missing_attribute_result=CheckResult.PASSED)\n <mask token>\n\n def get_forbidden_values(self):\n return [['*']]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass FunctionAppDisallowCORS(BaseResourceNegativeValueCheck):\n\n def __init__(self):\n name = 'Ensure function apps are not accessible from all regions'\n id = 'CKV_AZURE_62'\n supported_resources = ['azurerm_function_app']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories,\n supported_resources=supported_resources,\n missing_attribute_result=CheckResult.PASSED)\n\n def get_inspected_key(self):\n return 'site_config/[0]/cors/[0]/allowed_origins'\n\n def get_forbidden_values(self):\n return [['*']]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass FunctionAppDisallowCORS(BaseResourceNegativeValueCheck):\n\n def __init__(self):\n name = 'Ensure function apps are not accessible from all regions'\n id = 'CKV_AZURE_62'\n supported_resources = ['azurerm_function_app']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories,\n supported_resources=supported_resources,\n missing_attribute_result=CheckResult.PASSED)\n\n def get_inspected_key(self):\n return 'site_config/[0]/cors/[0]/allowed_origins'\n\n def get_forbidden_values(self):\n return [['*']]\n\n\ncheck = FunctionAppDisallowCORS()\n",
"step-4": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_negative_value_check import BaseResourceNegativeValueCheck\n\n\nclass FunctionAppDisallowCORS(BaseResourceNegativeValueCheck):\n\n def __init__(self):\n name = 'Ensure function apps are not accessible from all regions'\n id = 'CKV_AZURE_62'\n supported_resources = ['azurerm_function_app']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories,\n supported_resources=supported_resources,\n missing_attribute_result=CheckResult.PASSED)\n\n def get_inspected_key(self):\n return 'site_config/[0]/cors/[0]/allowed_origins'\n\n def get_forbidden_values(self):\n return [['*']]\n\n\ncheck = FunctionAppDisallowCORS()\n",
"step-5": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_negative_value_check import BaseResourceNegativeValueCheck\n\n\nclass FunctionAppDisallowCORS(BaseResourceNegativeValueCheck):\n def __init__(self):\n name = \"Ensure function apps are not accessible from all regions\"\n id = \"CKV_AZURE_62\"\n supported_resources = ['azurerm_function_app']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources, missing_attribute_result=CheckResult.PASSED)\n\n def get_inspected_key(self):\n return 'site_config/[0]/cors/[0]/allowed_origins'\n\n def get_forbidden_values(self):\n return [['*']]\n\n\ncheck = FunctionAppDisallowCORS()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def findWords(self, board: List[List[str]], words: List[str]) ->List[str]:
m = len(board)
n = len(board[0])
ans = []
root = TrieNode()
def insert(word: str) ->None:
node = root
for c in word:
if c not in node.children:
node.children[c] = TrieNode()
node = node.children[c]
node.word = word
for word in words:
insert(word)
def dfs(i: int, j: int, node: TrieNode) ->None:
if i < 0 or i == m or j < 0 or j == n:
return
if board[i][j] == '*':
return
c = board[i][j]
if c not in node.children:
return
child = node.children[c]
if child.word:
ans.append(child.word)
child.word = None
board[i][j] = '*'
dfs(i + 1, j, child)
dfs(i - 1, j, child)
dfs(i, j + 1, child)
dfs(i, j - 1, child)
board[i][j] = c
for i in range(m):
for j in range(n):
dfs(i, j, root)
return ans
<|reserved_special_token_1|>
class TrieNode:
<|reserved_special_token_0|>
class Solution:
def findWords(self, board: List[List[str]], words: List[str]) ->List[str]:
m = len(board)
n = len(board[0])
ans = []
root = TrieNode()
def insert(word: str) ->None:
node = root
for c in word:
if c not in node.children:
node.children[c] = TrieNode()
node = node.children[c]
node.word = word
for word in words:
insert(word)
def dfs(i: int, j: int, node: TrieNode) ->None:
if i < 0 or i == m or j < 0 or j == n:
return
if board[i][j] == '*':
return
c = board[i][j]
if c not in node.children:
return
child = node.children[c]
if child.word:
ans.append(child.word)
child.word = None
board[i][j] = '*'
dfs(i + 1, j, child)
dfs(i - 1, j, child)
dfs(i, j + 1, child)
dfs(i, j - 1, child)
board[i][j] = c
for i in range(m):
for j in range(n):
dfs(i, j, root)
return ans
<|reserved_special_token_1|>
class TrieNode:
def __init__(self):
self.children: Dict[str, TrieNode] = collections.defaultdict(TrieNode)
self.word: Optional[str] = None
class Solution:
def findWords(self, board: List[List[str]], words: List[str]) ->List[str]:
m = len(board)
n = len(board[0])
ans = []
root = TrieNode()
def insert(word: str) ->None:
node = root
for c in word:
if c not in node.children:
node.children[c] = TrieNode()
node = node.children[c]
node.word = word
for word in words:
insert(word)
def dfs(i: int, j: int, node: TrieNode) ->None:
if i < 0 or i == m or j < 0 or j == n:
return
if board[i][j] == '*':
return
c = board[i][j]
if c not in node.children:
return
child = node.children[c]
if child.word:
ans.append(child.word)
child.word = None
board[i][j] = '*'
dfs(i + 1, j, child)
dfs(i - 1, j, child)
dfs(i, j + 1, child)
dfs(i, j - 1, child)
board[i][j] = c
for i in range(m):
for j in range(n):
dfs(i, j, root)
return ans
|
flexible
|
{
"blob_id": "f996dffcb9650663278ec1e31d9f88d50142f4ea",
"index": 4491,
"step-1": "<mask token>\n\n\nclass Solution:\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n\n def findWords(self, board: List[List[str]], words: List[str]) ->List[str]:\n m = len(board)\n n = len(board[0])\n ans = []\n root = TrieNode()\n\n def insert(word: str) ->None:\n node = root\n for c in word:\n if c not in node.children:\n node.children[c] = TrieNode()\n node = node.children[c]\n node.word = word\n for word in words:\n insert(word)\n\n def dfs(i: int, j: int, node: TrieNode) ->None:\n if i < 0 or i == m or j < 0 or j == n:\n return\n if board[i][j] == '*':\n return\n c = board[i][j]\n if c not in node.children:\n return\n child = node.children[c]\n if child.word:\n ans.append(child.word)\n child.word = None\n board[i][j] = '*'\n dfs(i + 1, j, child)\n dfs(i - 1, j, child)\n dfs(i, j + 1, child)\n dfs(i, j - 1, child)\n board[i][j] = c\n for i in range(m):\n for j in range(n):\n dfs(i, j, root)\n return ans\n",
"step-3": "class TrieNode:\n <mask token>\n\n\nclass Solution:\n\n def findWords(self, board: List[List[str]], words: List[str]) ->List[str]:\n m = len(board)\n n = len(board[0])\n ans = []\n root = TrieNode()\n\n def insert(word: str) ->None:\n node = root\n for c in word:\n if c not in node.children:\n node.children[c] = TrieNode()\n node = node.children[c]\n node.word = word\n for word in words:\n insert(word)\n\n def dfs(i: int, j: int, node: TrieNode) ->None:\n if i < 0 or i == m or j < 0 or j == n:\n return\n if board[i][j] == '*':\n return\n c = board[i][j]\n if c not in node.children:\n return\n child = node.children[c]\n if child.word:\n ans.append(child.word)\n child.word = None\n board[i][j] = '*'\n dfs(i + 1, j, child)\n dfs(i - 1, j, child)\n dfs(i, j + 1, child)\n dfs(i, j - 1, child)\n board[i][j] = c\n for i in range(m):\n for j in range(n):\n dfs(i, j, root)\n return ans\n",
"step-4": "class TrieNode:\n\n def __init__(self):\n self.children: Dict[str, TrieNode] = collections.defaultdict(TrieNode)\n self.word: Optional[str] = None\n\n\nclass Solution:\n\n def findWords(self, board: List[List[str]], words: List[str]) ->List[str]:\n m = len(board)\n n = len(board[0])\n ans = []\n root = TrieNode()\n\n def insert(word: str) ->None:\n node = root\n for c in word:\n if c not in node.children:\n node.children[c] = TrieNode()\n node = node.children[c]\n node.word = word\n for word in words:\n insert(word)\n\n def dfs(i: int, j: int, node: TrieNode) ->None:\n if i < 0 or i == m or j < 0 or j == n:\n return\n if board[i][j] == '*':\n return\n c = board[i][j]\n if c not in node.children:\n return\n child = node.children[c]\n if child.word:\n ans.append(child.word)\n child.word = None\n board[i][j] = '*'\n dfs(i + 1, j, child)\n dfs(i - 1, j, child)\n dfs(i, j + 1, child)\n dfs(i, j - 1, child)\n board[i][j] = c\n for i in range(m):\n for j in range(n):\n dfs(i, j, root)\n return ans\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class YandexSearch(BaseEngine):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def next_url(self, soup):
if (regex := re.findall('"(/search/\\?[^>]+p=[^"]+)', str(soup))):
return self.base_url + regex[-1]
def parse_soup(self, soup):
for raw in soup.find_all('li', class_='serp-item'):
if (url := raw.a.get('href')):
yield url
def captcha(self, response):
return 'showcaptcha' in response.url
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class YandexSearch(BaseEngine):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_params(self, query, **params):
params['text'] = query
params['p'] = None
return params
def next_url(self, soup):
if (regex := re.findall('"(/search/\\?[^>]+p=[^"]+)', str(soup))):
return self.base_url + regex[-1]
def parse_soup(self, soup):
for raw in soup.find_all('li', class_='serp-item'):
if (url := raw.a.get('href')):
yield url
def captcha(self, response):
return 'showcaptcha' in response.url
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class YandexSearch(BaseEngine):
base_url = 'https://yandex.com'
search_url = 'https://yandex.com/search/'
def get_params(self, query, **params):
params['text'] = query
params['p'] = None
return params
def next_url(self, soup):
if (regex := re.findall('"(/search/\\?[^>]+p=[^"]+)', str(soup))):
return self.base_url + regex[-1]
def parse_soup(self, soup):
for raw in soup.find_all('li', class_='serp-item'):
if (url := raw.a.get('href')):
yield url
def captcha(self, response):
return 'showcaptcha' in response.url
<|reserved_special_token_1|>
from .base import BaseEngine
import re
class YandexSearch(BaseEngine):
base_url = 'https://yandex.com'
search_url = 'https://yandex.com/search/'
def get_params(self, query, **params):
params['text'] = query
params['p'] = None
return params
def next_url(self, soup):
if (regex := re.findall('"(/search/\\?[^>]+p=[^"]+)', str(soup))):
return self.base_url + regex[-1]
def parse_soup(self, soup):
for raw in soup.find_all('li', class_='serp-item'):
if (url := raw.a.get('href')):
yield url
def captcha(self, response):
return 'showcaptcha' in response.url
<|reserved_special_token_1|>
from .base import BaseEngine
import re
class YandexSearch(BaseEngine):
base_url = "https://yandex.com"
search_url = "https://yandex.com/search/"
def get_params(self, query, **params):
params["text"] = query
params["p"] = None
return params
def next_url(self, soup):
if (regex := re.findall(r'"(/search/\?[^>]+p=[^"]+)', str(soup))):
return self.base_url + regex[-1]
def parse_soup(self, soup):
for raw in soup.find_all('li', class_="serp-item"):
if (url := raw.a.get("href")):
yield url
def captcha(self, response):
return "showcaptcha" in response.url
|
flexible
|
{
"blob_id": "0ec3ca0f952dbc09c7a7a3e746c0aeab28ee9834",
"index": 6498,
"step-1": "<mask token>\n\n\nclass YandexSearch(BaseEngine):\n <mask token>\n <mask token>\n <mask token>\n\n def next_url(self, soup):\n if (regex := re.findall('\"(/search/\\\\?[^>]+p=[^\"]+)', str(soup))):\n return self.base_url + regex[-1]\n\n def parse_soup(self, soup):\n for raw in soup.find_all('li', class_='serp-item'):\n if (url := raw.a.get('href')):\n yield url\n\n def captcha(self, response):\n return 'showcaptcha' in response.url\n",
"step-2": "<mask token>\n\n\nclass YandexSearch(BaseEngine):\n <mask token>\n <mask token>\n\n def get_params(self, query, **params):\n params['text'] = query\n params['p'] = None\n return params\n\n def next_url(self, soup):\n if (regex := re.findall('\"(/search/\\\\?[^>]+p=[^\"]+)', str(soup))):\n return self.base_url + regex[-1]\n\n def parse_soup(self, soup):\n for raw in soup.find_all('li', class_='serp-item'):\n if (url := raw.a.get('href')):\n yield url\n\n def captcha(self, response):\n return 'showcaptcha' in response.url\n",
"step-3": "<mask token>\n\n\nclass YandexSearch(BaseEngine):\n base_url = 'https://yandex.com'\n search_url = 'https://yandex.com/search/'\n\n def get_params(self, query, **params):\n params['text'] = query\n params['p'] = None\n return params\n\n def next_url(self, soup):\n if (regex := re.findall('\"(/search/\\\\?[^>]+p=[^\"]+)', str(soup))):\n return self.base_url + regex[-1]\n\n def parse_soup(self, soup):\n for raw in soup.find_all('li', class_='serp-item'):\n if (url := raw.a.get('href')):\n yield url\n\n def captcha(self, response):\n return 'showcaptcha' in response.url\n",
"step-4": "from .base import BaseEngine\nimport re\n\n\nclass YandexSearch(BaseEngine):\n base_url = 'https://yandex.com'\n search_url = 'https://yandex.com/search/'\n\n def get_params(self, query, **params):\n params['text'] = query\n params['p'] = None\n return params\n\n def next_url(self, soup):\n if (regex := re.findall('\"(/search/\\\\?[^>]+p=[^\"]+)', str(soup))):\n return self.base_url + regex[-1]\n\n def parse_soup(self, soup):\n for raw in soup.find_all('li', class_='serp-item'):\n if (url := raw.a.get('href')):\n yield url\n\n def captcha(self, response):\n return 'showcaptcha' in response.url\n",
"step-5": "from .base import BaseEngine\nimport re\n\n\nclass YandexSearch(BaseEngine):\n base_url = \"https://yandex.com\"\n search_url = \"https://yandex.com/search/\"\n\n def get_params(self, query, **params):\n params[\"text\"] = query\n params[\"p\"] = None\n return params\n\n def next_url(self, soup):\n if (regex := re.findall(r'\"(/search/\\?[^>]+p=[^\"]+)', str(soup))):\n return self.base_url + regex[-1]\n\n def parse_soup(self, soup):\n for raw in soup.find_all('li', class_=\"serp-item\"):\n if (url := raw.a.get(\"href\")):\n yield url\n\n def captcha(self, response):\n return \"showcaptcha\" in response.url\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class deeplens_classifier(BaseKerasClassifier):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class deeplens_classifier(BaseKerasClassifier):
def _model_definition(self, net):
"""
Builds the architecture of the network
"""
print(net.shape)
print('resnet17_scp')
net = Conv2D(filters=128, kernel_size=5, activation=None, padding=
'same', data_format='channels_first', input_shape=(1, 100, 100))(
net)
net = BatchNormalization(axis=1)(net)
net = LeakyReLU()(net)
net = MaxPooling2D(pool_size=(2, 2))(net)
net = Conv2D(filters=64, kernel_size=3, activation=None, padding=
'same', data_format='channels_first')(net)
net = BatchNormalization(axis=1)(net)
net = LeakyReLU()(net)
net = MaxPooling2D(pool_size=(2, 2))(net)
net = Conv2D(filters=64, kernel_size=3, activation=None, padding=
'same', data_format='channels_first')(net)
net = BatchNormalization(axis=1)(net)
net = LeakyReLU()(net)
net = MaxPooling2D(pool_size=(2, 2))(net)
return net
<|reserved_special_token_1|>
from HSTLens_base_classifier_resnet17_s import BaseKerasClassifier
from keras.layers import Activation, AveragePooling2D, MaxPooling2D
from keras.layers import Conv2D, ELU, Dropout, LeakyReLU
from keras.layers.normalization import BatchNormalization
class deeplens_classifier(BaseKerasClassifier):
def _model_definition(self, net):
"""
Builds the architecture of the network
"""
print(net.shape)
print('resnet17_scp')
net = Conv2D(filters=128, kernel_size=5, activation=None, padding=
'same', data_format='channels_first', input_shape=(1, 100, 100))(
net)
net = BatchNormalization(axis=1)(net)
net = LeakyReLU()(net)
net = MaxPooling2D(pool_size=(2, 2))(net)
net = Conv2D(filters=64, kernel_size=3, activation=None, padding=
'same', data_format='channels_first')(net)
net = BatchNormalization(axis=1)(net)
net = LeakyReLU()(net)
net = MaxPooling2D(pool_size=(2, 2))(net)
net = Conv2D(filters=64, kernel_size=3, activation=None, padding=
'same', data_format='channels_first')(net)
net = BatchNormalization(axis=1)(net)
net = LeakyReLU()(net)
net = MaxPooling2D(pool_size=(2, 2))(net)
return net
<|reserved_special_token_1|>
# coding: utf-8
# In[2]:
from HSTLens_base_classifier_resnet17_s import BaseKerasClassifier
from keras.layers import Activation, AveragePooling2D, MaxPooling2D
from keras.layers import Conv2D, ELU, Dropout, LeakyReLU
from keras.layers.normalization import BatchNormalization
class deeplens_classifier(BaseKerasClassifier):
def _model_definition(self, net):
"""
Builds the architecture of the network
"""
# Input filtering and downsampling with max pooling
print(net.shape) #channels must be specified first otherwise keras assumes channels last
print('resnet17_scp')
net = Conv2D( filters=128, kernel_size=5, activation=None, padding='same',
data_format="channels_first", input_shape=(1, 100, 100))(net)
net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels
net = LeakyReLU()(net)
net= MaxPooling2D(pool_size=(2,2))(net)
net = Conv2D( filters=64, kernel_size=3, activation=None, padding='same', data_format="channels_first")(net)
net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels
net = LeakyReLU()(net)
net= MaxPooling2D(pool_size=(2,2))(net)
net = Conv2D( filters=64, kernel_size=3,activation=None, padding='same', data_format="channels_first")(net)
net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels
net = LeakyReLU()(net)
net= MaxPooling2D(pool_size=(2,2))(net)
return net
# In[ ]:
|
flexible
|
{
"blob_id": "6bd47fb71a32b8383a75e72111d802008bc6bc68",
"index": 3350,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass deeplens_classifier(BaseKerasClassifier):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass deeplens_classifier(BaseKerasClassifier):\n\n def _model_definition(self, net):\n \"\"\"\n Builds the architecture of the network\n \"\"\"\n print(net.shape)\n print('resnet17_scp')\n net = Conv2D(filters=128, kernel_size=5, activation=None, padding=\n 'same', data_format='channels_first', input_shape=(1, 100, 100))(\n net)\n net = BatchNormalization(axis=1)(net)\n net = LeakyReLU()(net)\n net = MaxPooling2D(pool_size=(2, 2))(net)\n net = Conv2D(filters=64, kernel_size=3, activation=None, padding=\n 'same', data_format='channels_first')(net)\n net = BatchNormalization(axis=1)(net)\n net = LeakyReLU()(net)\n net = MaxPooling2D(pool_size=(2, 2))(net)\n net = Conv2D(filters=64, kernel_size=3, activation=None, padding=\n 'same', data_format='channels_first')(net)\n net = BatchNormalization(axis=1)(net)\n net = LeakyReLU()(net)\n net = MaxPooling2D(pool_size=(2, 2))(net)\n return net\n",
"step-4": "from HSTLens_base_classifier_resnet17_s import BaseKerasClassifier\nfrom keras.layers import Activation, AveragePooling2D, MaxPooling2D\nfrom keras.layers import Conv2D, ELU, Dropout, LeakyReLU\nfrom keras.layers.normalization import BatchNormalization\n\n\nclass deeplens_classifier(BaseKerasClassifier):\n\n def _model_definition(self, net):\n \"\"\"\n Builds the architecture of the network\n \"\"\"\n print(net.shape)\n print('resnet17_scp')\n net = Conv2D(filters=128, kernel_size=5, activation=None, padding=\n 'same', data_format='channels_first', input_shape=(1, 100, 100))(\n net)\n net = BatchNormalization(axis=1)(net)\n net = LeakyReLU()(net)\n net = MaxPooling2D(pool_size=(2, 2))(net)\n net = Conv2D(filters=64, kernel_size=3, activation=None, padding=\n 'same', data_format='channels_first')(net)\n net = BatchNormalization(axis=1)(net)\n net = LeakyReLU()(net)\n net = MaxPooling2D(pool_size=(2, 2))(net)\n net = Conv2D(filters=64, kernel_size=3, activation=None, padding=\n 'same', data_format='channels_first')(net)\n net = BatchNormalization(axis=1)(net)\n net = LeakyReLU()(net)\n net = MaxPooling2D(pool_size=(2, 2))(net)\n return net\n",
"step-5": "\n# coding: utf-8\n\n# In[2]:\n\n\n\nfrom HSTLens_base_classifier_resnet17_s import BaseKerasClassifier\n\nfrom keras.layers import Activation, AveragePooling2D, MaxPooling2D\nfrom keras.layers import Conv2D, ELU, Dropout, LeakyReLU\n\nfrom keras.layers.normalization import BatchNormalization\n\nclass deeplens_classifier(BaseKerasClassifier):\n\n def _model_definition(self, net):\n \"\"\"\n Builds the architecture of the network\n \"\"\"\n \n # Input filtering and downsampling with max pooling\n print(net.shape) #channels must be specified first otherwise keras assumes channels last\n print('resnet17_scp')\n \n net = Conv2D( filters=128, kernel_size=5, activation=None, padding='same', \n data_format=\"channels_first\", input_shape=(1, 100, 100))(net)\n net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels\n net = LeakyReLU()(net)\n net= MaxPooling2D(pool_size=(2,2))(net)\n \n net = Conv2D( filters=64, kernel_size=3, activation=None, padding='same', data_format=\"channels_first\")(net)\n net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels\n net = LeakyReLU()(net)\n net= MaxPooling2D(pool_size=(2,2))(net)\n \n net = Conv2D( filters=64, kernel_size=3,activation=None, padding='same', data_format=\"channels_first\")(net)\n net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels \n net = LeakyReLU()(net)\n net= MaxPooling2D(pool_size=(2,2))(net)\n \n\n\n \n return net\n\n\n# In[ ]:\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Copyright 2017-2018 Ivan Yelizariev <https://it-projects.info/team/yelizariev>
# License MIT (https://opensource.org/licenses/MIT).
from datetime import datetime, timedelta
from odoo import fields
from odoo.tests.common import TransactionCase
class TestCase(TransactionCase):
def setUp(self):
super(TestCase, self).setUp()
self.event = self.env["event.event"].create(
{
"name": "TestEvent",
"attendee_signup": True,
"create_partner": True,
"date_begin": fields.Datetime.to_string(
datetime.today() + timedelta(days=1)
),
"date_end": fields.Datetime.to_string(
datetime.today() + timedelta(days=15)
),
}
)
|
normal
|
{
"blob_id": "29ec576d1fe04108eeb03a5d1b167671d3004570",
"index": 4403,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestCase(TransactionCase):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestCase(TransactionCase):\n\n def setUp(self):\n super(TestCase, self).setUp()\n self.event = self.env['event.event'].create({'name': 'TestEvent',\n 'attendee_signup': True, 'create_partner': True, 'date_begin':\n fields.Datetime.to_string(datetime.today() + timedelta(days=1)),\n 'date_end': fields.Datetime.to_string(datetime.today() +\n timedelta(days=15))})\n",
"step-4": "from datetime import datetime, timedelta\nfrom odoo import fields\nfrom odoo.tests.common import TransactionCase\n\n\nclass TestCase(TransactionCase):\n\n def setUp(self):\n super(TestCase, self).setUp()\n self.event = self.env['event.event'].create({'name': 'TestEvent',\n 'attendee_signup': True, 'create_partner': True, 'date_begin':\n fields.Datetime.to_string(datetime.today() + timedelta(days=1)),\n 'date_end': fields.Datetime.to_string(datetime.today() +\n timedelta(days=15))})\n",
"step-5": "# Copyright 2017-2018 Ivan Yelizariev <https://it-projects.info/team/yelizariev>\n# License MIT (https://opensource.org/licenses/MIT).\nfrom datetime import datetime, timedelta\n\nfrom odoo import fields\nfrom odoo.tests.common import TransactionCase\n\n\nclass TestCase(TransactionCase):\n def setUp(self):\n super(TestCase, self).setUp()\n self.event = self.env[\"event.event\"].create(\n {\n \"name\": \"TestEvent\",\n \"attendee_signup\": True,\n \"create_partner\": True,\n \"date_begin\": fields.Datetime.to_string(\n datetime.today() + timedelta(days=1)\n ),\n \"date_end\": fields.Datetime.to_string(\n datetime.today() + timedelta(days=15)\n ),\n }\n )\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import csv
#ratings.csv must be in the same directory
skipped_header = False
with open("ratings.csv") as in_file:
csvreader = csv.reader(in_file)
#read each row of ratings.csv (userId,movieId,rating,timestamp)
with open("ratings_train.csv", 'w') as train_out:
with open("ratings_test.csv", 'w') as test_out:
for row in csvreader:
if not skipped_header:
skipped_header = True
continue
elif int(row[0]) <= 146541:
train_out.write(",".join(row[:-1]))
train_out.write("\n")
else: #rest of the data (16000 of them)
test_out.write(",".join(row[:-1]))
test_out.write("\n")
|
normal
|
{
"blob_id": "e48a6a84268a0fe64e90714bd32712665934fc39",
"index": 2223,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('ratings.csv') as in_file:\n csvreader = csv.reader(in_file)\n with open('ratings_train.csv', 'w') as train_out:\n with open('ratings_test.csv', 'w') as test_out:\n for row in csvreader:\n if not skipped_header:\n skipped_header = True\n continue\n elif int(row[0]) <= 146541:\n train_out.write(','.join(row[:-1]))\n train_out.write('\\n')\n else:\n test_out.write(','.join(row[:-1]))\n test_out.write('\\n')\n",
"step-3": "<mask token>\nskipped_header = False\nwith open('ratings.csv') as in_file:\n csvreader = csv.reader(in_file)\n with open('ratings_train.csv', 'w') as train_out:\n with open('ratings_test.csv', 'w') as test_out:\n for row in csvreader:\n if not skipped_header:\n skipped_header = True\n continue\n elif int(row[0]) <= 146541:\n train_out.write(','.join(row[:-1]))\n train_out.write('\\n')\n else:\n test_out.write(','.join(row[:-1]))\n test_out.write('\\n')\n",
"step-4": "import csv\nskipped_header = False\nwith open('ratings.csv') as in_file:\n csvreader = csv.reader(in_file)\n with open('ratings_train.csv', 'w') as train_out:\n with open('ratings_test.csv', 'w') as test_out:\n for row in csvreader:\n if not skipped_header:\n skipped_header = True\n continue\n elif int(row[0]) <= 146541:\n train_out.write(','.join(row[:-1]))\n train_out.write('\\n')\n else:\n test_out.write(','.join(row[:-1]))\n test_out.write('\\n')\n",
"step-5": "import csv\r\n\r\n#ratings.csv must be in the same directory\r\n\r\nskipped_header = False\r\nwith open(\"ratings.csv\") as in_file:\r\n csvreader = csv.reader(in_file)\r\n\t#read each row of ratings.csv (userId,movieId,rating,timestamp)\r\n with open(\"ratings_train.csv\", 'w') as train_out:\r\n with open(\"ratings_test.csv\", 'w') as test_out:\r\n for row in csvreader:\r\n if not skipped_header:\r\n skipped_header = True\r\n continue\r\n elif int(row[0]) <= 146541:\r\n train_out.write(\",\".join(row[:-1]))\r\n train_out.write(\"\\n\")\r\n else: #rest of the data (16000 of them)\r\n test_out.write(\",\".join(row[:-1]))\r\n test_out.write(\"\\n\")\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def checkRaiz():
a = int(input("Informe o primeiro coeficiente: "))
b = int(input("Informe o segundo coeficiente: "))
c = int(input("Informe o terceiro coeficiente: "))
delta = (b*b) - (4*a*c)
if (delta < 0):
print("Não tem raiz real")
elif (delta == 0):
print("Existe uma raiz real")
else:
print("Existem duas raizes reais")
checkRaiz()
|
normal
|
{
"blob_id": "603a73a7cc0487fcabb527ebc21d44cb95817ecb",
"index": 5909,
"step-1": "<mask token>\n",
"step-2": "def checkRaiz():\n a = int(input('Informe o primeiro coeficiente: '))\n b = int(input('Informe o segundo coeficiente: '))\n c = int(input('Informe o terceiro coeficiente: '))\n delta = b * b - 4 * a * c\n if delta < 0:\n print('Não tem raiz real')\n elif delta == 0:\n print('Existe uma raiz real')\n else:\n print('Existem duas raizes reais')\n\n\n<mask token>\n",
"step-3": "def checkRaiz():\n a = int(input('Informe o primeiro coeficiente: '))\n b = int(input('Informe o segundo coeficiente: '))\n c = int(input('Informe o terceiro coeficiente: '))\n delta = b * b - 4 * a * c\n if delta < 0:\n print('Não tem raiz real')\n elif delta == 0:\n print('Existe uma raiz real')\n else:\n print('Existem duas raizes reais')\n\n\ncheckRaiz()\n",
"step-4": "\ndef checkRaiz():\n a = int(input(\"Informe o primeiro coeficiente: \"))\n b = int(input(\"Informe o segundo coeficiente: \"))\n c = int(input(\"Informe o terceiro coeficiente: \"))\n\n delta = (b*b) - (4*a*c)\n\n if (delta < 0):\n print(\"Não tem raiz real\")\n elif (delta == 0):\n print(\"Existe uma raiz real\")\n else:\n print(\"Existem duas raizes reais\")\n\ncheckRaiz()",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.